def test_rotateIntervalCluster(self):
        intervalSec = self.input.param("intervalSec", None)
        nodes_init = self.input.param("nodes_init", 2)
        auditIns = audit(host=self.master)
	auditIns.setAuditEnable('true')
        originalInt = auditIns.getAuditRotateInterval()
        auditIns.setAuditRotateInterval(intervalSec)
        firstEventTime = []

        try:
            for i in range(len(self.servers[:nodes_init])):
                auditTemp = audit(host=self.servers[i])
                firstEventTime.append(self.getTimeStampForFile(auditTemp))

            self.sleep(intervalSec + 20, 'Sleep for log roll over to happen')

            for i in range(len(self.servers[:nodes_init])):
                shell = RemoteMachineShellConnection(self.servers[i])
                rest = RestConnection(self.servers[i])
                status, content = rest.validateLogin(self.master.rest_username, self.master.rest_password, True, getContent=True)
                self.sleep(120, "sleeping for log file creation")
                try:
                    hostname = shell.execute_command("hostname")
                    self.log.info ("print firstEventTime {0}".format(firstEventTime[i]))
                    archiveFile = hostname[0][0] + '-' + firstEventTime[i] + "-audit.log"
                    self.log.info ("Archive File Name is {0}".format(archiveFile))
                    result = shell.file_exists(auditIns.pathLogFile, archiveFile)
                    self.assertTrue(result, "Archive Audit.log is not created on time interval")
                    self.log.info ("Validation of archive File created is True, Audit archive File is created {0}".format(archiveFile))
                    result = shell.file_exists(auditIns.pathLogFile, auditIns.AUDITLOGFILENAME)
                    self.assertTrue(result, "Audit.log is not created as per the roll over time specified")
                finally:
                    shell.disconnect()
        finally:
            auditIns.setAuditRotateInterval(originalInt)
 def test_rotateInterval(self):
     intervalSec = self.input.param("intervalSec", None)
     auditIns = audit(host=self.master)
     rest = RestConnection(self.master)
     originalInt = auditIns.getAuditRotateInterval()
     try:
         firstEventTime = self.getTimeStampForFile(auditIns)
         self.log.info ("first time evetn is {0}".format(firstEventTime))
         auditIns.setAuditRotateInterval(intervalSec)
         self.sleep(intervalSec + 20, 'Sleep for log roll over to happen')
         status, content = rest.validateLogin(self.master.rest_username, self.master.rest_password, True, getContent=True)
         self.sleep(120)
         shell = RemoteMachineShellConnection(self.master)
         try:
             hostname = shell.execute_command("hostname")
             archiveFile = hostname[0][0] + '-' + firstEventTime + "-audit.log"
             self.log.info ("Archive File Name is {0}".format(archiveFile))
             result = shell.file_exists(auditIns.pathLogFile, archiveFile)
             self.assertTrue(result, "Archive Audit.log is not created on time interval")
             self.log.info ("Validation of archive File created is True, Audit archive File is created {0}".format(archiveFile))
             result = shell.file_exists(auditIns.pathLogFile, auditIns.AUDITLOGFILENAME)
             self.assertTrue(result, "Audit.log is not created when memcached server is killed")
         finally:
             shell.disconnect()
     finally:
         auditIns.setAuditRotateInterval(originalInt)
 def test_fileRotate20MB(self):
     auditIns = audit(host=self.master)
     firstEventTime = self.getTimeStampForFile(auditIns)
     tempEventCounter = 0
     rest = RestConnection(self.master)
     shell = RemoteMachineShellConnection(self.master)
     filePath = auditIns.pathLogFile + auditIns.AUDITLOGFILENAME
     number = int(shell.get_data_file_size(filePath))
     hostname = shell.execute_command("hostname")
     archiveFile = hostname[0][0] + '-' + firstEventTime + "-audit.log"
     result = shell.file_exists(auditIns.pathLogFile, archiveFile)
     tempTime = 0
     starttime = time.time()
     while ((number < 21089520) and (tempTime < 36000)
            and (result == False)):
         for i in range(1, 10):
             status, content = rest.validateLogin("Administrator",
                                                  "password",
                                                  True,
                                                  getContent=True)
             tempEventCounter += 1
             number = int(shell.get_data_file_size(filePath))
             currTime = time.time()
             tempTime = int(currTime - starttime)
             result = shell.file_exists(auditIns.pathLogFile, archiveFile)
     self.sleep(30)
     result = shell.file_exists(auditIns.pathLogFile, archiveFile)
     shell.disconnect()
     self.log.info(
         "--------Total Event Created ---- {0}".format(tempEventCounter))
     self.assertTrue(
         result,
         "Archive Audit.log is not created on reaching 20MB threshhold")
 def test_fileRotate20MB(self):
     auditIns = audit(host=self.master)
     firstEventTime = self.getTimeStampForFile(auditIns)
     tempEventCounter = 0
     rest = RestConnection(self.master)
     shell = RemoteMachineShellConnection(self.master)
     filePath = auditIns.pathLogFile + auditIns.AUDITLOGFILENAME
     number = int (shell.get_data_file_size(filePath))
     hostname = shell.execute_command("hostname")
     archiveFile = hostname[0][0] + '-' + firstEventTime + "-audit.log"
     result = shell.file_exists(auditIns.pathLogFile, archiveFile)
     tempTime = 0
     starttime = time.time()
     while ((number < 21089520) and (tempTime < 36000) and (result == False)):
         for i in range(1, 10):
             status, content = rest.validateLogin("Administrator", "password", True, getContent=True)
             tempEventCounter += 1
             number = int (shell.get_data_file_size(filePath))
             currTime = time.time()
             tempTime = int (currTime - starttime)
             result = shell.file_exists(auditIns.pathLogFile, archiveFile)
     self.sleep(30)
     result = shell.file_exists(auditIns.pathLogFile, archiveFile)
     shell.disconnect()
     self.log.info ("--------Total Event Created ---- {0}".format(tempEventCounter))
     self.assertTrue(result, "Archive Audit.log is not created on reaching 20MB threshhold")
 def verify_log_files_exist(self,
                            remotepath=None,
                            redactFileName=None,
                            nonredactFileName=None):
     '''
     Verifies if log files exist after collection
     :param remotepath: absolute path to log files
     :param redactFileName: redacted zip log file name
     :param nonredactFileName: non-redacted zip log file name
     :return:
     '''
     if not remotepath:
         self.fail("Remote path needed to verify if log files exist")
     shell = RemoteMachineShellConnection(self.master)
     if shell.file_exists(remotepath=remotepath,
                          filename=nonredactFileName):
         self.log.info("Regular non-redacted log file exists as expected")
     else:
         self.fail("Regular non-redacted log file does not exist")
     if redactFileName and self.log_redaction_level == "partial":
         if shell.file_exists(remotepath=remotepath,
                              filename=redactFileName):
             self.log.info("Redacted file exists as expected")
         else:
             self.log.info("Redacted file does not exist")
     shell.disconnect()
Beispiel #6
0
 def serviceInstallBasic(self):
     for server in self.servers:
         shell = RemoteMachineShellConnection(server)
         self.assertTrue(self.service_clean(shell))
         self.assertTrue(self.install_gateway(shell))
         shell.execute_command_raw(
             'rm -rf {0}/* {1}/* {2}/sync_gateway.json {3}/tmp/test*; mkdir {3}/tmp/test {3}/tmp/test2'
             .format(self.logsdir, self.datadir, self.configdir,
                     self.folder_prefix))
         output, error = self.run_sync_gateway_service_install(
             shell, self.extra_param)
         self.check_normal_error_output(shell, output, error)
         self.assertTrue(self.is_sync_gateway_service_running(shell))
         self.assertTrue(self.is_sync_gateway_process_running(shell))
         if not "--runbase" in self.extra_param:
             # hardcoded for services LOGS_TEMPLATE_VAR=${RUNBASE_TEMPLATE_VAR}/logs
             self.datadir = '/home/sync_gateway'
         if not "--logsdir" in self.extra_param:
             self.logsdir = '/home/sync_gateway/logs'
         if not "--cfgpath" in self.extra_param:
             self.configdir = '/home/sync_gateway'
         self.assertTrue(
             shell.file_exists(self.logsdir, 'sync_gateway_error.log'))
         self.assertTrue(shell.file_exists(self.datadir, 'data'))
         self.assertTrue(shell.file_exists(self.configdir, self.configfile))
Beispiel #7
0
 def serviceInstallLogsDirNotExist(self):
     for server in self.servers:
         shell = RemoteMachineShellConnection(server)
         self.assertTrue(self.service_clean(shell))
         self.assertTrue(self.install_gateway(shell))
         output, error = self.run_sync_gateway_service_install(shell, self.extra_param)
         self.check_normal_error_output(shell, output, error)
         self.assertTrue(self.is_sync_gateway_service_running(shell))
         self.assertTrue(self.is_sync_gateway_process_running(shell))
         self.assertTrue(shell.file_exists(self.logsdir, 'sync_gateway_error.log'))
         self.assertTrue(shell.file_exists(self.datadir, 'data'))
         self.assertTrue(shell.file_exists(self.configdir, self.configfile))
Beispiel #8
0
 def serviceInstallSGPath(self):
     for server in self.servers:
         shell = RemoteMachineShellConnection(server)
         self.assertTrue(self.service_clean(shell))
         self.assertTrue(self.install_gateway(shell))
         shell.execute_command_raw('mv /opt/couchbase-sync-gateway/bin /opt/couchbase-sync-gateway/bin2 ')
         output, error = self.run_sync_gateway_service_install(shell, '--sgpath=/opt/couchbase-sync-gateway/bin2/sync_gateway')
         self.check_normal_error_output(shell, output, error)
         self.assertTrue(self.is_sync_gateway_service_running(shell))
         self.assertTrue(self.is_sync_gateway_process_running(shell))
         self.assertTrue(shell.file_exists(self.logsdir, 'sync_gateway_error.log'))
         self.assertTrue(shell.file_exists(self.datadir, 'data'))
         self.assertTrue(shell.file_exists(self.configdir, self.configfile))
Beispiel #9
0
 def serviceInstallNegative(self):
     for server in self.servers:
         shell = RemoteMachineShellConnection(server)
         self.assertTrue(self.service_clean(shell))
         self.assertTrue(self.install_gateway(shell))
         output, error = self.run_sync_gateway_service_install(shell, self.extra_param)
         self.assertEqual(error, [self.expected_error])
         self.assertEqual(output, [])
         self.assertFalse(self.is_sync_gateway_service_running(shell))
         self.assertFalse(self.is_sync_gateway_process_running(shell))
         self.assertFalse(shell.file_exists(self.logsdir, 'sync_gateway_error.log'))
         self.assertFalse(shell.file_exists(self.datadir, 'data'))
         self.assertFalse(shell.file_exists(self.configdir, self.configfile))
    def test_rotateIntervalCluster(self):
        intervalSec = self.input.param("intervalSec", None)
        nodes_init = self.input.param("nodes_init", 2)
        auditIns = audit(host=self.master)
        auditIns.setAuditEnable('true')
        originalInt = auditIns.getAuditRotateInterval()
        auditIns.setAuditRotateInterval(intervalSec)
        firstEventTime = []

        try:
            for i in range(len(self.servers[:nodes_init])):
                auditTemp = audit(host=self.servers[i])
                firstEventTime.append(self.getTimeStampForFile(auditTemp))

            self.sleep(intervalSec + 20, 'Sleep for log roll over to happen')

            for i in range(len(self.servers[:nodes_init])):
                shell = RemoteMachineShellConnection(self.servers[i])
                rest = RestConnection(self.servers[i])
                status, content = rest.validateLogin(self.master.rest_username,
                                                     self.master.rest_password,
                                                     True,
                                                     getContent=True)
                self.sleep(120, "sleeping for log file creation")
                try:
                    hostname = shell.execute_command("hostname")
                    self.log.info("print firstEventTime {0}".format(
                        firstEventTime[i]))
                    archiveFile = hostname[0][0] + '-' + firstEventTime[
                        i] + "-audit.log"
                    self.log.info(
                        "Archive File Name is {0}".format(archiveFile))
                    result = shell.file_exists(auditIns.pathLogFile,
                                               archiveFile)
                    self.assertTrue(
                        result,
                        "Archive Audit.log is not created on time interval")
                    self.log.info(
                        "Validation of archive File created is True, Audit archive File is created {0}"
                        .format(archiveFile))
                    result = shell.file_exists(auditIns.pathLogFile,
                                               auditIns.AUDITLOGFILENAME)
                    self.assertTrue(
                        result,
                        "Audit.log is not created as per the roll over time specified"
                    )
                finally:
                    shell.disconnect()
        finally:
            auditIns.setAuditRotateInterval(originalInt)
Beispiel #11
0
 def serviceInstallLogsDirNotExist(self):
     for server in self.servers:
         shell = RemoteMachineShellConnection(server)
         self.assertTrue(self.service_clean(shell))
         self.assertTrue(self.install_gateway(shell))
         output, error = self.run_sync_gateway_service_install(
             shell, self.extra_param)
         self.check_normal_error_output(shell, output, error)
         self.assertTrue(self.is_sync_gateway_service_running(shell))
         self.assertTrue(self.is_sync_gateway_process_running(shell))
         self.assertTrue(
             shell.file_exists(self.logsdir, 'sync_gateway_error.log'))
         self.assertTrue(shell.file_exists(self.datadir, 'data'))
         self.assertTrue(shell.file_exists(self.configdir, self.configfile))
Beispiel #12
0
 def serviceInstallBasic(self):
     for server in self.servers:
         shell = RemoteMachineShellConnection(server)
         self.assertTrue(self.service_clean(shell))
         self.assertTrue(self.install_gateway(shell))
         output, error = shell.execute_command_raw(
             'rm -rf {0}/* {1}/* {2}/sync_gateway.json /tmp/test*; mkdir /tmp/test /tmp/test2'.
             format(self.logsdir, self.datadir, self.configdir))
         output, error = self.run_sync_gateway_service_install(shell, self.extra_param)
         self.check_normal_error_output(shell, output, error)
         self.assertTrue(self.is_sync_gateway_service_running(shell))
         self.assertTrue(self.is_sync_gateway_process_running(shell))
         self.assertTrue(shell.file_exists(self.logsdir, 'sync_gateway_error.log'))
         self.assertTrue(shell.file_exists(self.datadir, 'data'))
         self.assertTrue(shell.file_exists(self.configdir, self.configfile))
Beispiel #13
0
 def serviceInstallNegative(self):
     for server in self.servers:
         shell = RemoteMachineShellConnection(server)
         self.assertTrue(self.service_clean(shell))
         self.assertTrue(self.install_gateway(shell))
         output, error = self.run_sync_gateway_service_install(
             shell, self.extra_param)
         self.assertEqual(error, [self.expected_error])
         self.assertEqual(output, [])
         self.assertFalse(self.is_sync_gateway_service_running(shell))
         self.assertFalse(self.is_sync_gateway_process_running(shell))
         self.assertFalse(
             shell.file_exists(self.logsdir, 'sync_gateway_error.log'))
         self.assertFalse(shell.file_exists(self.datadir, 'data'))
         self.assertFalse(shell.file_exists(self.configdir,
                                            self.configfile))
Beispiel #14
0
 def setUp(self):
     super(GatewayWebhookBaseTest, self).setUp()
     self.log = logger.Logger.get_logger()
     self.input = TestInputSingleton.input
     self.version = self.input.param("version", "0.0.0-358")
     self.extra_param = self.input.param("extra_param", "")
     self.configfile = self.input.param("config", "config_webhook_basic.json")
     self.doc_id = self.input.param("doc_id", "doc1")
     self.doc_content = self.input.param("doc_content", "{'a':1}")
     self.expected_error = self.input.param("expected_error", "")
     self.servers = self.input.servers
     self.master = self.servers[0]
     for server in self.servers:
         shell = RemoteMachineShellConnection(server)
         if self.case_number == 1:
             shell.execute_command("rm -rf {0}/tmp/*".format(self.folder_prefix))
             # will install sg only the first time
             self.install(shell)
             pid = self.is_sync_gateway_process_running(shell)
             self.assertNotEqual(pid, 0)
             exist = shell.file_exists("{0}/tmp/".format(self.folder_prefix), "gateway.log")
             self.assertTrue(exist)
             shell.copy_files_local_to_remote("pytests/sg/resources", "/tmp")
         self.start_simpleServe(shell)
         shell.disconnect()
    def test_AuditEvent(self):
        auditIns = audit(host=self.master)
        ops = self.input.param("ops", None)
        source = 'internal'
        user = '******'
        rest = RestConnection(self.master)
        #status = rest.setAuditSettings(enabled='true')
        auditIns.setAuditEnable('true')
        if (ops in ['enable', 'disable']):
            if ops == 'disable':
                #status = rest.setAuditSettings(enabled='false')
                auditIns.setAuditEnable('false')
            else:
                #status = rest.setAuditSettings(enabled='true')
                auditIns.setAuditEnable('true')

        if ops == 'disable':
            shell = RemoteMachineShellConnection(self.master)
            try:
                result = shell.file_exists(auditIns.getAuditLogPath(), auditIns.AUDITLOGFILENAME)
            finally:
                shell.disconnect()
            self.assertTrue(result, 'Issue with file getting create in new directory')
        else:
            auditIns = audit(host=self.master)
            expectedResults = {"auditd_enabled":auditIns.getAuditStatus(),
                               "descriptors_path":self.changePathWindows(auditIns.getAuditConfigElement('descriptors_path')),
                               "log_path":self.changePathWindows((auditIns.getAuditLogPath())[:-1]), "source":"internal",
                               "user":"******", "rotate_interval":86400, "version":2, 'hostname':self.getHostName(self.master),
                               "uuid":"111731321"}
            self.checkConfig(self.AUDITCONFIGRELOAD, self.master, expectedResults)
Beispiel #16
0
 def setUp(self):
     super(GatewayWebhookBaseTest, self).setUp()
     self.log = logger.Logger.get_logger()
     self.input = TestInputSingleton.input
     self.extra_param = self.input.param("extra_param", "")
     self.configfile = self.input.param("config",
                                        "config_webhook_basic.json")
     self.doc_id = self.input.param("doc_id", "doc1")
     self.doc_content = self.input.param("doc_content", "{'a':1}")
     self.expected_error = self.input.param("expected_error", "")
     self.servers = self.input.servers
     self.master = self.servers[0]
     for server in self.servers:
         shell = RemoteMachineShellConnection(server)
         if self.case_number == 1:
             shell.execute_command("rm -rf {0}/tmp/*".format(
                 self.folder_prefix))
             # will install sg only the first time
             self.install(shell)
             pid = self.is_sync_gateway_process_running(shell)
             self.assertNotEqual(pid, 0)
             exist = shell.file_exists(
                 '{0}/tmp/'.format(self.folder_prefix), 'gateway.log')
             self.assertTrue(exist)
             shell.copy_files_local_to_remote('pytests/sg/resources',
                                              '/tmp')
         self.start_simpleServe(shell)
         shell.disconnect()
 def verify_for_recovery_type(self, chosen=[], serverMap={}, buckets=[], recoveryTypeMap={}, fileMap={}, deltaRecoveryBuckets=[]):
     """ Verify recovery type is delta or full """
     summary = ""
     logic = True
     for server in self.chosen:
         shell = RemoteMachineShellConnection(serverMap[server.ip])
         os_type = shell.extract_remote_info()
         if os_type.type.lower() == 'windows':
             return
         for bucket in buckets:
             path = fileMap[server.ip][bucket.name]
             exists = shell.file_exists(path, "check.txt")
             if deltaRecoveryBuckets != None:
                 if recoveryTypeMap[server.ip] == "delta" and (bucket.name in deltaRecoveryBuckets) and not exists:
                     logic = False
                     summary += "\n Failed Condition :: node {0}, bucket {1} :: Expected Delta, Actual Full".format(server.ip, bucket.name)
                 elif recoveryTypeMap[server.ip] == "delta" and (bucket.name not in deltaRecoveryBuckets) and exists:
                     summary += "\n Failed Condition :: node {0}, bucket {1} :: Expected Full, Actual Delta".format(server.ip, bucket.name)
                     logic = False
             else:
                 if recoveryTypeMap[server.ip] == "delta"  and not exists:
                     logic = False
                     summary += "\n Failed Condition :: node {0}, bucket {1} :: Expected Delta, Actual Full".format(server.ip, bucket.name)
                 elif recoveryTypeMap[server.ip] == "full" and exists:
                     logic = False
                     summary += "\n Failed Condition :: node {0}, bucket {1}  :: Expected Full, Actual Delta".format(server.ip, bucket.name)
         shell.disconnect()
     self.assertTrue(logic, summary)
Beispiel #18
0
 def _verify_log_file(self, rest):
     progress, status, perNode = rest.get_cluster_logs_collection_status()
     node_failed_to_collect = []
     nodes = rest.get_nodes()
     for node in perNode:
         for server in self.servers[:len(nodes)]:
             if server.ip in node or (self.nodes_init == 1 \
                                      and "127.0.0.1" in node):
                 shell = RemoteMachineShellConnection(server)
         file_name = perNode[node]["path"].replace(self.log_path, "")
         collected = False
         retry = 0
         while not collected and retry < 5:
             existed = shell.file_exists(self.log_path, file_name)
             if existed:
                 self.log.info("file {0} exists on node {1}"
                               .format(perNode[node]["path"], node.replace("ns_1@", "")))
                 collected = True
             else:
                 self.log.info("retry {0} ".format(retry))
                 retry += 1
                 self.sleep(5)
             if retry == 5:
                 self.log.error("failed to collect log after {0} try at node {1}"
                                .format(retry, node.replace("ns_1@", "")))
                 node_failed_to_collect.append(node)
     if not node_failed_to_collect:
         return True
     else:
         self.fail("Cluster-wide collectinfo failed to collect log at {0}" \
                        .format(node_failed_to_collect))
Beispiel #19
0
 def _cli_verify_log_file(self, shell):
     stt, perNode = self._cli_get_cluster_logs_collection_status(shell)
     node_failed_to_collect = []
     for node in perNode:
         for server in self.servers[:self.nodes_init]:
             if server.ip in node or (self.nodes_init == 1 \
                                      and "127.0.0.1" in node):
                 shell = RemoteMachineShellConnection(server)
         file_name = perNode[node]["path"].replace(self.log_path, "")
         collected = False
         retry = 0
         while not collected and retry < 5:
             existed = shell.file_exists(self.log_path, file_name)
             if existed:
                 self.log.info("file {0} exists on node {1}"
                               .format(perNode[node]["path"], node))
                 collected = True
             else:
                 self.log.info("retry {0} ".format(retry))
                 retry += 1
                 self.sleep(5)
             if retry == 5:
                 self.log.error("failed to collect log by CLI after {0} try at node {1}"
                                .format(retry, node.replace("ns_1@", "")))
                 node_failed_to_collect.append(node)
     if not node_failed_to_collect:
         return True
     else:
         self.fail("CLI Cluster-wide collectinfo failed to collect log at {0}" \
                        .format(node_failed_to_collect))
Beispiel #20
0
 def verify_for_recovery_type(self, chosen = [], serverMap = {}, buckets = [], recoveryTypeMap = {}, fileMap = {}, deltaRecoveryBuckets = []):
     """ Verify recovery type is delta or full """
     summary = ""
     logic = True
     for server in self.chosen:
         shell = RemoteMachineShellConnection(serverMap[server.ip])
         os_type = shell.extract_remote_info()
         if os_type.type.lower() == 'windows':
             return
         for bucket in buckets:
             path = fileMap[server.ip][bucket.name]
             exists = shell.file_exists(path,"check.txt")
             if deltaRecoveryBuckets != None:
                 if recoveryTypeMap[server.ip] == "delta" and (bucket.name in deltaRecoveryBuckets) and not exists:
                     logic = False
                     summary += "\n Failed Condition :: node {0}, bucket {1} :: Expected Delta, Actual Full".format(server.ip,bucket.name)
                 elif recoveryTypeMap[server.ip] == "delta" and (bucket.name not in deltaRecoveryBuckets) and exists:
                     summary += "\n Failed Condition :: node {0}, bucket {1} :: Expected Full, Actual Delta".format(server.ip,bucket.name)
                     logic = False
             else:
                 if recoveryTypeMap[server.ip] == "delta"  and not exists:
                     logic = False
                     summary += "\n Failed Condition :: node {0}, bucket {1} :: Expected Delta, Actual Full".format(server.ip,bucket.name)
                 elif recoveryTypeMap[server.ip] == "full" and exists:
                     logic = False
                     summary += "\n Failed Condition :: node {0}, bucket {1}  :: Expected Full, Actual Delta".format(server.ip,bucket.name)
         shell.disconnect()
     self.assertTrue(logic, summary)
Beispiel #21
0
    def run(self):
        remote_client = RemoteMachineShellConnection(self.server)
        now = datetime.now()
        day = now.day
        month = now.month
        year = now.year
        file_name = "%s-%s%s%s-diag.zip" % (self.server.ip, month, day, year)
        print "Collecting logs from %s\n" % (self.server.ip)
        output, error = remote_client.execute_cbcollect_info(file_name)
        print "\n".join(output)
        print "\n".join(error)

        user_path = "/home/"
        if self.server.ssh_username == "root":
            user_path = "/"
        if not remote_client.file_exists(
                "%s%s" % (user_path, self.server.ssh_username), file_name):
            raise Exception("%s doesn't exists on server" % (file_name))
        if remote_client.get_file(
                "%s%s" % (user_path, self.server.ssh_username), file_name,
                "%s/%s" % (self.path, file_name)):
            print "Downloading zipped logs from %s" % (self.server.ip)
        else:
            raise Exception("Fail to download zipped logs from %s" %
                            (self.server.ip))
        remote_client.disconnect()
Beispiel #22
0
 def setUp(self):
     super(SGConfigTests, self).setUp()
     for server in self.servers:
         if self.case_number == 1:
             with open('pytests/sg/resources/gateway_config_walrus_template.json', 'r') as file:
                 filedata = file.read()
                 filedata = filedata.replace('LOCAL_IP', server.ip)
             with open('pytests/sg/resources/gateway_config_walrus.json', 'w') as file:
                 file.write(filedata)
             shell = RemoteMachineShellConnection(server)
             shell.execute_command("rm -rf {0}/tmp/*".format(self.folder_prefix))
             shell.copy_files_local_to_remote('pytests/sg/resources', '{0}/tmp'.format(self.folder_prefix))
             # will install sg only the first time
             self.install(shell)
             pid = self.is_sync_gateway_process_running(shell)
             self.assertNotEqual(pid, 0)
             exist = shell.file_exists('{0}/tmp/'.format(self.folder_prefix), 'gateway.log')
             self.assertTrue(exist)
             shell.disconnect()
     if self.case_number == 1:
         shutil.copy2('pytests/sg/resources/gateway_config_backup.json', 'pytests/sg/resources/gateway_config.json')
         BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
         self.cluster = Cluster()
         self.cluster.create_default_bucket(self.master, 150)
         task = self.cluster.async_create_sasl_bucket(self.master, 'test_%E-.5', 'password', 150, 1)
         task.result()
         task = self.cluster.async_create_standard_bucket(self.master, 'db', 11219, 150, 1)
         task.result()
Beispiel #23
0
    def run(self):
        file_name = "%s-%s-diag.zip" % (self.server.ip, time_stamp())
        if not self.local:
            remote_client = RemoteMachineShellConnection(self.server)
            print("Collecting logs from %s\n" % self.server.ip)
            output, error = remote_client.execute_cbcollect_info(
                file_name)
            print("\n".join(error))

            user_path = "/home/"
            if remote_client.info.distribution_type.lower() == 'mac':
                user_path = "/Users/"
            else:
                if self.server.ssh_username == "root":
                    user_path = "/"

            remote_path = "%s%s" % (user_path, self.server.ssh_username)
            status = remote_client.file_exists(remote_path, file_name)
            if not status:
                raise Exception(
                    "%s doesn't exists on server" % file_name)
            status = remote_client.get_file(remote_path, file_name,
                                            "%s/%s" % (
                                                self.path, file_name))
            if status:
                print("Downloading zipped logs from %s" % self.server.ip)
            else:
                raise Exception("Fail to download zipped logs from %s"
                                % self.server.ip)
            remote_client.execute_command(
                "rm -f %s" % os.path.join(remote_path, file_name))
            remote_client.disconnect()
Beispiel #24
0
    def test_AuditEvent(self):
        auditIns = audit(host=self.master)
        ops = self.input.param("ops", None)
        source = 'internal'
        user = '******'
        rest = RestConnection(self.master)
        #status = rest.setAuditSettings(enabled='true')
        auditIns.setAuditEnable('true')
        if (ops in ['enable', 'disable']):
            if ops == 'disable':
                #status = rest.setAuditSettings(enabled='false')
                auditIns.setAuditEnable('false')
            else:
                #status = rest.setAuditSettings(enabled='true')
                auditIns.setAuditEnable('true')

        if ops == 'disable':
            shell = RemoteMachineShellConnection(self.master)
            try:
                result = shell.file_exists(auditIns.getAuditLogPath(), auditIns.AUDITLOGFILENAME)
            finally:
                shell.disconnect()
            self.assertFalse(result, 'Issue with file getting create in new directory')
        else:
            auditIns = audit(host=self.master)
            expectedResults = {"auditd_enabled":auditIns.getAuditStatus(),
                               "descriptors_path":self.changePathWindows(auditIns.getAuditConfigElement('descriptors_path')),
                               "log_path":self.changePathWindows((auditIns.getAuditLogPath())[:-1]), "source":"internal",
                               "user":"******", "rotate_interval":86400, "version":1, 'hostname':self.getHostName(self.master)}
            self.checkConfig(self.AUDITCONFIGRELOAD, self.master, expectedResults)
Beispiel #25
0
 def installBasic(self):
     for server in self.servers:
         shell = RemoteMachineShellConnection(server)
         self.install(shell)
         pid = self.is_sync_gateway_process_running(shell)
         self.assertNotEqual(pid, 0)
         exist = shell.file_exists('/root/', 'gateway.log')
         self.assertTrue(exist)
         shell.disconnect()
Beispiel #26
0
 def installBasic(self):
     for server in self.servers:
         shell = RemoteMachineShellConnection(server)
         self.install(shell)
         pid = self.is_sync_gateway_process_running(shell)
         self.assertNotEqual(pid, 0)
         exist = shell.file_exists('{0}/tmp/'.format(self.folder_prefix),
                                   'gateway.log')
         self.assertTrue(exist)
         shell.disconnect()
    def test_changeLogPath(self):
        nodes_init = self.input.param("nodes_init", 0)
        auditMaster = audit(host=self.servers[0])
        auditSecNode = audit(host=self.servers[1])
        #Capture original Audit Log Path
        originalPath = auditMaster.getAuditLogPath()

        #Create folders on CB server machines and change permission
        try:
            newPath = auditMaster.getAuditLogPath() + "folder"

            for server in self.servers[:nodes_init]:
                shell = RemoteMachineShellConnection(server)
                try:
                    shell.create_directory(newPath)
                    command = 'chown couchbase:couchbase ' + newPath
                    shell.execute_command(command)
                finally:
                    shell.disconnect()

            source = 'ns_server'
            user = self.master.rest_username
            auditMaster.setAuditLogPath(newPath)

            #Create an event of Updating autofailover settings
            for server in self.servers[:nodes_init]:
                rest = RestConnection(server)
                expectedResults = {
                    'max_nodes': 1,
                    "timeout": 120,
                    'source': source,
                    "user": user,
                    'ip': self.ipAddress,
                    'port': 12345
                }
                rest.update_autofailover_settings(True,
                                                  expectedResults['timeout'])

                self.sleep(120, 'Waiting for new audit file to get created')
                #check for audit.log remotely
                shell = RemoteMachineShellConnection(server)
                try:
                    result = shell.file_exists(newPath,
                                               auditMaster.AUDITLOGFILENAME)
                finally:
                    shell.disconnect()

                if (result is False):
                    self.assertTrue(
                        result,
                        'Issue with file getting create in new directory')

        finally:
            auditMaster.setAuditLogPath(originalPath)
Beispiel #28
0
 def serviceInstallSGPath(self):
     for server in self.servers:
         shell = RemoteMachineShellConnection(server)
         self.assertTrue(self.service_clean(shell))
         self.assertTrue(self.install_gateway(shell))
         shell.execute_command_raw(
             'mv /opt/couchbase-sync-gateway/bin /opt/couchbase-sync-gateway/bin2 '
         )
         output, error = self.run_sync_gateway_service_install(
             shell,
             '--sgpath=/opt/couchbase-sync-gateway/bin2/sync_gateway')
         self.check_normal_error_output(shell, output, error)
         self.assertTrue(self.is_sync_gateway_service_running(shell))
         self.assertTrue(self.is_sync_gateway_process_running(shell))
         self.assertTrue(
             shell.file_exists('/home/sync_gateway/logs',
                               'sync_gateway_error.log'))
         self.assertTrue(shell.file_exists('/home/sync_gateway', 'data'))
         self.assertTrue(
             shell.file_exists('/home/sync_gateway', self.configfile))
 def verify_log_files_exist(self, remotepath=None, redactFileName=None, nonredactFileName=None):
     '''
     Verifies if log files exist after collection
     :param remotepath: absolute path to log files
     :param redactFileName: redacted zip log file name
     :param nonredactFileName: non-redacted zip log file name
     :return:
     '''
     if not remotepath:
         self.fail("Remote path needed to verify if log files exist")
     shell = RemoteMachineShellConnection(self.master)
     if shell.file_exists(remotepath=remotepath, filename=nonredactFileName):
         self.log.info("Regular non-redacted log file exists as expected")
     else:
         self.fail("Regular non-redacted log file does not exist")
     if redactFileName and self.log_redaction_level == "partial":
         if shell.file_exists(remotepath=remotepath, filename=redactFileName):
             self.log.info("Redacted file exists as expected")
         else:
             self.log.info("Redacted file does not exist")
     shell.disconnect()
Beispiel #30
0
 def serviceInstallNegativeCfgPath(self):
     for server in self.servers:
         shell = RemoteMachineShellConnection(server)
         self.assertTrue(self.service_clean(shell))
         self.assertTrue(self.install_gateway(shell))
         output, error = self.run_sync_gateway_service_install(shell, self.extra_param)
         self.assertTrue(error[0].startswith(self.expected_error))
         # self.assertEqual(output, [])
         # self.assertFalse(shell.file_exists("/home/sync_gateway", 'logs'))
         #self.assertFalse(shell.file_exists("/home/sync_gateway", 'data'))
         self.assertFalse(shell.file_exists("/home/sync_gateway", 'sync_gateway.json'))
         self.assertFalse(self.is_sync_gateway_service_running(shell))
         self.assertFalse(self.is_sync_gateway_process_running(shell))
 def serviceInstallBasic(self):
     for server in self.servers:
         shell = RemoteMachineShellConnection(server)
         self.assertTrue(self.service_clean(shell))
         self.assertTrue(self.install_gateway(shell))
         shell.execute_command_raw(
             'rm -rf {0}/* {1}/* {2}/sync_gateway.json {3}/tmp/test*; mkdir {3}/tmp/test {3}/tmp/test2'.
                 format(self.logsdir, self.datadir, self.configdir, self.folder_prefix))
         output, error = self.run_sync_gateway_service_install(shell, self.extra_param)
         self.check_normal_error_output(shell, output, error)
         self.assertTrue(self.is_sync_gateway_service_running(shell))
         self.assertTrue(self.is_sync_gateway_process_running(shell))
         if not "--runbase" in self.extra_param:
             # hardcoded for services LOGS_TEMPLATE_VAR=${RUNBASE_TEMPLATE_VAR}/logs
             self.datadir = '/home/sync_gateway'
         if not "--logsdir" in self.extra_param:
             self.logsdir = '/home/sync_gateway/logs'
         if not "--cfgpath" in self.extra_param:
             self.configdir = '/home/sync_gateway'
         self.assertTrue(shell.file_exists(self.logsdir, 'sync_gateway_error.log'))
         self.assertTrue(shell.file_exists(self.datadir, 'data'))
         self.assertTrue(shell.file_exists(self.configdir, self.configfile))
Beispiel #32
0
 def test_eventing_lifecycle_with_couchbase_cli(self):
     # load some data in the source bucket
     self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
               batch_size=self.batch_size)
     # This value is hardcoded in the exported function name
     script_dir = os.path.dirname(__file__)
     abs_file_path = os.path.join(script_dir, EXPORTED_FUNCTION.NEW_BUCKET_OP)
     fh = open(abs_file_path, "r")
     lines = fh.read()
     shell = RemoteMachineShellConnection(self.servers[0])
     info = shell.extract_remote_info().type.lower()
     if info == 'linux':
         self.cli_command_location = testconstants.LINUX_COUCHBASE_BIN_PATH
     elif info == 'windows':
         self.cmd_ext = ".exe"
         self.cli_command_location = testconstants.WIN_COUCHBASE_BIN_PATH_RAW
     elif info == 'mac':
         self.cli_command_location = testconstants.MAC_COUCHBASE_BIN_PATH
     else:
         raise Exception("OS not supported.")
     # create the json file need on the node
     eventing_node = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=False)
     remote_client = RemoteMachineShellConnection(eventing_node)
     remote_client.write_remote_file_single_quote("/root", "Function_396275055_test_export_function.json", lines)
     # import the function
     self._couchbase_cli_eventing(eventing_node, "Function_396275055_test_export_function", "import",
                                  "SUCCESS: Events imported",
                                  file_name="Function_396275055_test_export_function.json")
     # deploy the function
     self._couchbase_cli_eventing(eventing_node, "Function_396275055_test_export_function", "deploy",
                                  "SUCCESS: Function deployed")
     self.verify_eventing_results("Function_396275055_test_export_function", self.docs_per_day * 2016,
                                  skip_stats_validation=True)
     # list the function
     self._couchbase_cli_eventing(eventing_node, "Function_396275055_test_export_function", "list",
                                  " Status: Deployed")
     # export the function
     self._couchbase_cli_eventing(eventing_node, "Function_396275055_test_export_function", "export",
                                  "SUCCESS: Function exported to: Function_396275055_test_export_function2.json",
                                  file_name="Function_396275055_test_export_function2.json")
     # check if the exported function actually exists
     exists = remote_client.file_exists("/root", "Function_396275055_test_export_function2.json")
     # check if the exported file exists
     if not exists:
         self.fail("file does not exist after export")
     # undeploy the function
     self._couchbase_cli_eventing(eventing_node, "Function_396275055_test_export_function", "undeploy",
                                  "SUCCESS: Function undeployed")
     # delete the function
     self._couchbase_cli_eventing(eventing_node, "Function_396275055_test_export_function", "delete",
                                  "SUCCESS: Function deleted")
 def test_rotateInterval(self):
     intervalSec = self.input.param("intervalSec", None)
     auditIns = audit(host=self.master)
     rest = RestConnection(self.master)
     originalInt = auditIns.getAuditRotateInterval()
     try:
         firstEventTime = self.getTimeStampForFile(auditIns)
         self.log.info("first time evetn is {0}".format(firstEventTime))
         auditIns.setAuditRotateInterval(intervalSec)
         self.sleep(intervalSec + 20, 'Sleep for log roll over to happen')
         status, content = rest.validateLogin(self.master.rest_username,
                                              self.master.rest_password,
                                              True,
                                              getContent=True)
         self.sleep(120)
         shell = RemoteMachineShellConnection(self.master)
         try:
             hostname = shell.execute_command("hostname")
             archiveFile = hostname[0][
                 0] + '-' + firstEventTime + "-audit.log"
             self.log.info("Archive File Name is {0}".format(archiveFile))
             result = shell.file_exists(auditIns.pathLogFile, archiveFile)
             self.assertTrue(
                 result,
                 "Archive Audit.log is not created on time interval")
             self.log.info(
                 "Validation of archive File created is True, Audit archive File is created {0}"
                 .format(archiveFile))
             result = shell.file_exists(auditIns.pathLogFile,
                                        auditIns.AUDITLOGFILENAME)
             self.assertTrue(
                 result,
                 "Audit.log is not created when memcached server is killed")
         finally:
             shell.disconnect()
     finally:
         auditIns.setAuditRotateInterval(originalInt)
Beispiel #34
0
 def serviceInstallNegativeCfgPath(self):
     for server in self.servers:
         shell = RemoteMachineShellConnection(server)
         self.assertTrue(self.service_clean(shell))
         self.assertTrue(self.install_gateway(shell))
         output, error = self.run_sync_gateway_service_install(
             shell, self.extra_param)
         self.assertTrue(error[0].startswith(self.expected_error))
         time.sleep(3)
         # self.assertEqual(output, [])
         # self.assertFalse(shell.file_exists("/tmp/sync_gateway", 'logs'))
         # self.assertFalse(shell.file_exists("/tmp/sync_gateway", 'data'))
         self.assertFalse(
             shell.file_exists("/tmp/sync_gateway", 'sync_gateway.json'))
         self.assertFalse(self.is_sync_gateway_service_running(shell))
         self.assertFalse(self.is_sync_gateway_process_running(shell))
Beispiel #35
0
        def get_tar(remotepath, filepath, filename, servers, todir="."):
            if type(servers) is not list:
                servers = [servers]
            for server in servers:
                shell = RemoteMachineShellConnection(server)
                _ = shell.execute_command("tar -zcvf %s.tar.gz %s" %
                                          (filepath, filepath))
                file_check = shell.file_exists(remotepath, filename)
                if not file_check:
                    self.log.error(
                        "Tar File {} doesn't exist".format(filename))
                tar_file_copied = shell.get_file(remotepath, filename, todir)
                if not tar_file_copied:
                    self.log.error("Failed to copy Tar file")

                _ = shell.execute_command("rm -rf %s.tar.gz" % filepath)
 def verify_for_recovery_type(self, chosen = [], serverMap = {}, buckets = [], recoveryTypeMap = {}, fileMap = {}):
     """ Verify recovery type is delta or full """
     logic = True
     summary = ""
     for server in self.chosen:
         shell = RemoteMachineShellConnection(serverMap[server.ip])
         for bucket in buckets:
             path = fileMap[server.ip][bucket.name]
             exists = shell.file_exists(path,"check.txt")
             if recoveryTypeMap[server.ip] == "delta" and not exists:
                 logic = False
                 summary += "\n Failed Condition :: node {0}, bucket {1} :: Expected Delta, Actual Full".format(server.ip,bucket.name)
             elif recoveryTypeMap[server.ip] == "full" and exists:
                 logic = False
                 summary += "\n Failed Condition :: node {0}, bucket {1}  :: Expected Full, Actual Delta".format(server.ip,bucket.name)
         shell.disconnect()
     self.assertTrue(logic, summary)
Beispiel #37
0
    def setUp(self):
        super(SGConfigTests, self).setUp()
        for server in self.servers:
            if self.case_number == 1:
                with open(
                        'pytests/sg/resources/gateway_config_walrus_template.json',
                        'r') as file:
                    filedata = file.read()
                    filedata = filedata.replace('LOCAL_IP', server.ip)
                with open('pytests/sg/resources/gateway_config_walrus.json',
                          'w') as file:
                    file.write(filedata)
                shell = RemoteMachineShellConnection(server)
                shell.execute_command("rm -rf {0}/tmp/*".format(
                    self.folder_prefix))
                shell.copy_files_local_to_remote(
                    'pytests/sg/resources',
                    '{0}/tmp'.format(self.folder_prefix))
                # will install sg only the first time
                self.install(shell)
                pid = self.is_sync_gateway_process_running(shell)
                self.assertNotEqual(pid, 0)
                exist = shell.file_exists(
                    '{0}/tmp/'.format(self.folder_prefix), 'gateway.log')
                self.assertTrue(exist)
                shell.disconnect()
        if self.case_number == 1:
            shutil.copy2('pytests/sg/resources/gateway_config_backup.json',
                         'pytests/sg/resources/gateway_config.json')
            BucketOperationHelper.delete_all_buckets_or_assert(
                self.servers, self)
            self.cluster = Cluster()
            shared_params = self._create_bucket_params(server=self.master,
                                                       size=150)
            self.cluster.create_default_bucket(shared_params)
            task = self.cluster.async_create_sasl_bucket(
                name='test_%E-.5',
                password='******',
                bucket_params=shared_params)
            task.result()
            task = self.cluster.async_create_standard_bucket(
                name='db', port=11219, bucket_params=shared_params)

            task.result()
Beispiel #38
0
    def test_changeLogPath(self):
        nodes_init = self.input.param("nodes_init", 0)
        auditMaster = audit(host=self.servers[0])
        auditSecNode = audit(host=self.servers[1])
        #Capture original Audit Log Path
        originalPath = auditMaster.getAuditLogPath()

        #Create folders on CB server machines and change permission
        try:
            newPath = auditMaster.getAuditLogPath() + "folder"

            for server in self.servers[:nodes_init]:
                shell = RemoteMachineShellConnection(server)
                try:
                    shell.create_directory(newPath)
                    command = 'chown couchbase:couchbase ' + newPath
                    shell.execute_command(command)
                finally:
                    shell.disconnect()

            source = 'ns_server'
            user = self.master.rest_username
            auditMaster.setAuditLogPath(newPath)

            #Create an event of Updating autofailover settings
            for server in self.servers[:nodes_init]:
                rest = RestConnection(server)
                expectedResults = {'max_nodes':1, "timeout":120, 'source':source, "user":user, 'ip':self.ipAddress, 'port':12345}
                rest.update_autofailover_settings(True, expectedResults['timeout'])

                self.sleep(120, 'Waiting for new audit file to get created')
                #check for audit.log remotely
                shell = RemoteMachineShellConnection(server)
                try:
                    result = shell.file_exists(newPath, auditMaster.AUDITLOGFILENAME)
                finally:
                    shell.disconnect()

                if (result is False):
                    self.assertTrue(result, 'Issue with file getting create in new directory')

        finally:
            auditMaster.setAuditLogPath(originalPath)
    def run(self):
        remote_client = RemoteMachineShellConnection(self.server)
        now = datetime.now()
        day = now.day
        month = now.month
        year = now.year
        file_name = "%s-%s%s%s-diag.zip" % (self.server.ip, month, day, year)
        print "Collecting logs from %s\n" % (self.server.ip)
        output, error = remote_client.execute_cbcollect_info(file_name)
        print "\n".join(output)
        print "\n".join(error)

        user_path = "/home/"
        if self.server.ssh_username == "root":
            user_path = "/"
        if not remote_client.file_exists("%s%s" % (user_path, self.server.ssh_username), file_name):
            raise Exception("%s doesn't exists on server" % (file_name))
        if remote_client.get_file("%s%s" % (user_path, self.server.ssh_username), file_name, "%s/%s" % (self.path, file_name)):
            print "Downloading zipped logs from %s" % (self.server.ip)
        else:
            raise Exception("Fail to download zipped logs from %s" % (self.server.ip))
        remote_client.disconnect()
Beispiel #40
0
    def _load_snapshot(self, server, bucket, file_base=None, overwrite=True):
        """Load data files from a snapshot"""

        dest_data_path = os.path.dirname(server.data_path or
                                         testconstants.COUCHBASE_DATA_PATH)
        src_data_path = "{0}-snapshots".format(dest_data_path)

        print "[perf: _load_snapshot] server = {0} , src_data_path = {1}, dest_data_path = {2}"\
            .format(server.ip, src_data_path, dest_data_path)

        shell = RemoteMachineShellConnection(server)

        build_name, short_version, full_version = \
            shell.find_build_version("/opt/couchbase/", "VERSION.txt", "cb")

        src_file = self._build_tar_name(bucket, full_version, file_base)

        if not shell.file_exists(src_data_path, src_file):
            print "[perf: _load_snapshot] file '{0}/{1}' does not exist"\
                .format(src_data_path, src_file)
            shell.disconnect()
            return False

        if not overwrite:
            self._save_snapshot(server, bucket,
                                "{0}.tar.gz".format(
                                    time.strftime(PerfDefaults.strftime)))  # TODO: filename

        rm_cmd = "rm -rf {0}/{1} {0}/{1}-data {0}/_*".format(dest_data_path,
                                                             bucket)
        self._exec_and_log(shell, rm_cmd)

        unzip_cmd = "cd {0}; tar -xvzf {1}/{2}".format(dest_data_path,
                                                       src_data_path, src_file)
        self._exec_and_log(shell, unzip_cmd)

        shell.disconnect()
        return True
Beispiel #41
0
    def _load_snapshot(self, server, bucket, file_base=None, overwrite=True):
        """Load data files from a snapshot"""

        dest_data_path = os.path.dirname(server.data_path
                                         or testconstants.COUCHBASE_DATA_PATH)
        src_data_path = "{0}-snapshots".format(dest_data_path)

        print "[perf: _load_snapshot] server = {0} , src_data_path = {1}, dest_data_path = {2}"\
            .format(server.ip, src_data_path, dest_data_path)

        shell = RemoteMachineShellConnection(server)

        build_name, short_version, full_version = \
            shell.find_build_version("/opt/couchbase/", "VERSION.txt", "cb")

        src_file = self._build_tar_name(bucket, full_version, file_base)

        if not shell.file_exists(src_data_path, src_file):
            print "[perf: _load_snapshot] file '{0}/{1}' does not exist"\
                .format(src_data_path, src_file)
            shell.disconnect()
            return False

        if not overwrite:
            self._save_snapshot(server, bucket, "{0}.tar.gz".format(
                time.strftime(PerfDefaults.strftime)))  # TODO: filename

        rm_cmd = "rm -rf {0}/{1} {0}/{1}-data {0}/_*".format(
            dest_data_path, bucket)
        self._exec_and_log(shell, rm_cmd)

        unzip_cmd = "cd {0}; tar -xvzf {1}/{2}".format(dest_data_path,
                                                       src_data_path, src_file)
        self._exec_and_log(shell, unzip_cmd)

        shell.disconnect()
        return True
Beispiel #42
0
    def test_win_uninstall_standalone(self):
        query = BuildQuery()
        builds, changes = query.get_all_builds()
        os_version = self.input.test_params['win']

        task = 'uninstall'
        ex_type = 'exe'
        bat_file = 'uninstall.bat'
        version_file = 'VERSION.txt'
        if self.input.test_params["ostype"] == '64':
            Arch = 'x86_64'
            os_type = '64'
        elif self.input.test_params["ostype"] == '32':
            Arch = 'x86'
            os_type = '32'
        else:
            ok = False
            self.log.error("Unknown os version.")

        product = self.input.test_params["product"]
        if product == 'cse':
            name = 'couchbase-server-enterprise'
        elif product == 'csse':
            name = 'couchbase-single-server-enterprise'
        elif product == 'csc':
            name = 'couchbase-server-community'
        elif product == 'cssc':
            name = 'couchbase-single-server-community'
        else:
            self.log.error("Unknon product type.")

        # no need later
        cb_server_alias = ['cse','csc']
        cb_single_alias = ['csse','cssc']
        if product in cb_server_alias:
            server_path = "/cygdrive/c/Program Files/Couchbase/Server/"
        elif product in cb_single_alias:
            server_path = "/cygdrive/c/Program Files (x86)/Couchbase/Server/"

        for serverInfo in self.servers:
                remote_client = RemoteMachineShellConnection(serverInfo)
                info = RemoteMachineShellConnection(serverInfo).extract_remote_info()

                exist = remote_client.file_exists(server_path, version_file)
                if exist:
                    build_name, version = remote_client.find_build_version(server_path, version_file)
                    self.log.info('build needed to do auto uninstall {0}'.format(build_name))
                    # find installed build in tmp directory
                    build_name = build_name.rstrip() + ".exe"
                    self.log.info('Check if {0} is in tmp directory'.format(build_name))
                    exist = remote_client.file_exists("/cygdrive/c/tmp/", build_name)
                    if not exist:
                        build = query.find_build(builds, name, ex_type, Arch, version)
                        downloaded = remote_client.download_binary_in_win(build.url,product,version)
                        if downloaded:
                            self.log.info('Successful download {0}_{1}.exe'.format(product, version))
                        else:
                            self.log.error('Download {0}_{1}.exe failed'.format(product, version))
                    # modify uninstall bat file to change build name.
                    remote_client.modify_bat_file('/cygdrive/c/automation', bat_file,
                                                      product, os_type, os_version, version, task)
                    self.log.info('sleep for 5 seconds before running task schedule uninstall')
                    time.sleep(5)
                    # run task schedule to uninstall Couchbase Server
                    self.log.info('Start to uninstall couchbase {0}_{1}'.format(product, version))
                    output, error = remote_client.execute_command("cmd /c schtasks /run /tn removeme")
                    remote_client.log_command_output(output, error)
                    remote_client.wait_till_file_deleted(server_path, version_file, timeout_in_seconds=600)
                    self.log.info('sleep 15 seconds before running the next job ...')
                    time.sleep(15)
                else:
                    self.log.info('Couchbase server may not install on this server')
    def test_cbServerOps(self):
        ops = self.input.param("ops", None)
        auditIns = audit(host=self.master)

        #Capture timestamp from first event for filename
        firstEventTime = self.getTimeStampForFile(auditIns)

        shell = RemoteMachineShellConnection(self.master)

        #Kill memcached to check for file roll over and new audit.log
        if (ops == "kill"):
            result = shell.kill_memcached()
            self.sleep(10)

        #Stop CB Server to check for file roll over and new audit.log
        if (ops == 'shutdown'):
            try:
                result = shell.stop_couchbase()
                self.sleep(120, 'Waiting for server to shutdown')
            finally:
                result = shell.start_couchbase()

        #Check for audit.log and for roll over file
        self.sleep(120, 'Waiting for server to start after shutdown')
        rest = RestConnection(self.master)
        #Create an Event for Bucket Creation
        #expectedResults = self.createBucketAudit(self.master, "TestBucketKillShutdown")
        status, content = rest.validateLogin("Administrator", "password", True, getContent=True)
        self.sleep(30)
        result = shell.file_exists(auditIns.pathLogFile, audit.AUDITLOGFILENAME)
        self.assertTrue(result, "Audit.log is not created when memcached server is killed or stopped")
        hostname = shell.execute_command("hostname")

        archiveFile = hostname[0][0] + '-' + firstEventTime + "-audit.log"
        self.log.info ("Archive File expected is {0}".format(auditIns.pathLogFile + archiveFile))
        result = shell.file_exists(auditIns.pathLogFile, archiveFile)
        self.assertTrue(result, "Archive Audit.log is not created when memcached server is killed or stopped")

        #archiveFile = auditIns.currentLogFile + "/" + archiveFile

        if (ops == 'shutdown'):
            expectedResult = {"source":"internal", "user":"******", "id":4097, "name":"shutting down audit daemon", "description":"The audit daemon is being shutdown"}
            data = auditIns.returnEvent(4097, archiveFile)
            flag = True
            for items in data:
                if (items == 'timestamp'):
                    tempFlag = auditIns.validateTimeStamp(data['timestamp'])
                    if (tempFlag is False):
                        flag = False
                else:
                    if (isinstance(data[items], dict)):
                        for seclevel in data[items]:
                            tempValue = expectedResult[seclevel]
                            if data[items][seclevel] == tempValue:
                                self.log.info ('Match Found expected values - {0} -- actual value -- {1} - eventName - {2}'.format(tempValue, data[items][seclevel], seclevel))
                            else:
                                self.log.info ('Mis-Match Found expected values - {0} -- actual value -- {1} - eventName - {2}'.format(tempValue, data[items][seclevel], seclevel))
                                flag = False
                    else:
                        if (data[items] == expectedResult[items]):
                            self.log.info ('Match Found expected values - {0} -- actual value -- {1} - eventName - {2}'.format(expectedResult[items.encode('utf-8')], data[items.encode('utf-8')], items))
                        else:
                            self.log.info ('Mis - Match Found expected values - {0} -- actual value -- {1} - eventName - {2}'.format(expectedResult[items.encode('utf-8')], data[items.encode('utf-8')], items))
                            flag = False
            self.assertTrue(flag, "Shutdown event is not printed")

        expectedResults = {"auditd_enabled":auditIns.getAuditConfigElement('auditd_enabled'),
                           "descriptors_path":self.changePathWindows(auditIns.getAuditConfigElement('descriptors_path')),
                           "log_path":self.changePathWindows(auditIns.getAuditLogPath().strip()[:-2]),
                           'source':'internal', 'user':'******',
                           "rotate_interval":auditIns.getAuditConfigElement('rotate_interval'),
                           "version":1, 'hostname':self.getHostName(self.master)}
        self.checkConfig(self.AUDITCONFIGRELOAD, self.master, expectedResults)
Beispiel #44
0
class CommunityTests(CommunityBaseTest):
    def setUp(self):
        super(CommunityTests, self).setUp()
        self.command = self.input.param("command", "")
        self.zone = self.input.param("zone", 1)
        self.replica = self.input.param("replica", 1)
        self.command_options = self.input.param("command_options", '')
        self.set_get_ratio = self.input.param("set_get_ratio", 0.9)
        self.item_size = self.input.param("item_size", 128)
        self.shutdown_zone = self.input.param("shutdown_zone", 1)
        self.do_verify = self.input.param("do-verify", True)
        self.num_node = self.input.param("num_node", 4)
        self.services = self.input.param("services", None)
        self.start_node_services = self.input.param("start_node_services",
                                                    "kv")
        self.add_node_services = self.input.param("add_node_services", "kv")
        self.timeout = 6000
        self.user_add = self.input.param("user_add", None)
        self.user_role = self.input.param("user_role", None)

    def tearDown(self):
        super(CommunityTests, self).tearDown()

    def test_disabled_zone(self):
        disabled_zone = False
        zone_name = "group1"
        serverInfo = self.servers[0]
        self.rest = RestConnection(serverInfo)
        try:
            self.log.info("create zone name 'group1'!")
            result = self.rest.add_zone(zone_name)
            print("result  ", result)
        except Exception as e:
            if e:
                print(e)
                disabled_zone = True
                pass
        if not disabled_zone:
            self.fail("CE version should not have zone feature")

    def check_audit_available(self):
        audit_available = False
        try:
            self.rest.getAuditSettings()
            audit_available = True
        except Exception as e:
            if e:
                print(e)
        if audit_available:
            self.fail("This feature 'audit' only available on "
                      "Enterprise Edition")

    def check_ldap_available(self):
        ldap_available = False
        self.rest = RestConnection(self.master)
        try:
            s, c, h = self.rest.clearLDAPSettings()
            if s:
                ldap_available = True
        except Exception as e:
            if e:
                print(e)
        if ldap_available:
            self.fail("This feature 'ldap' only available on "
                      "Enterprise Edition")

    def check_set_services(self):
        self.rest.force_eject_node()
        self.sleep(7, "wait for node reset done")
        try:
            status = self.rest.init_node_services(hostname=self.master.ip,
                                                  services=[self.services])
        except Exception as e:
            if e:
                print(e)
        if self.services == "kv":
            if status:
                self.log.info("CE could set {0} only service.".format(
                    self.services))
            else:
                self.fail("Failed to set {0} only service.".format(
                    self.services))
        elif self.services == "index,kv":
            if status:
                self.fail("CE does not support kv and index on same node")
            else:
                self.log.info("services enforced in CE")
        elif self.services == "kv,n1ql":
            if status:
                self.fail("CE does not support kv and n1ql on same node")
            else:
                self.log.info("services enforced in CE")
        elif self.services == "kv,eventing":
            if status:
                self.fail("CE does not support kv and eventing on same node")
            else:
                self.log.info("services enforced in CE")
        elif self.services == "index,n1ql":
            if status:
                self.fail("CE does not support index and n1ql on same node")
            else:
                self.log.info("services enforced in CE")
        elif self.services == "index,kv,n1ql":
            if status:
                self.log.info(
                    "CE could set all services {0} on same nodes.".format(
                        self.services))
            else:
                self.fail("Failed to set kv, index and query services on CE")
        elif self.version[:5] in COUCHBASE_FROM_WATSON:
            if self.version[:
                            5] in COUCHBASE_FROM_VULCAN and "eventing" in self.services:
                if status:
                    self.fail("CE does not support eventing in vulcan")
                else:
                    self.log.info("services enforced in CE")
            elif self.services == "fts,index,kv":
                if status:
                    self.fail(
                        "CE does not support fts, index and kv on same node")
                else:
                    self.log.info("services enforced in CE")
            elif self.services == "fts,index,n1ql":
                if status:
                    self.fail(
                        "CE does not support fts, index and n1ql on same node")
                else:
                    self.log.info("services enforced in CE")
            elif self.services == "fts,kv,n1ql":
                if status:
                    self.fail(
                        "CE does not support fts, kv and n1ql on same node")
                else:
                    self.log.info("services enforced in CE")
            elif self.services == "fts,index,kv,n1ql":
                if status:
                    self.log.info(
                        "CE could set all services {0} on same nodes.".format(
                            self.services))
                else:
                    self.fail("Failed to set "
                              "fts, index, kv, and query services on CE")
        else:
            self.fail("some services don't support")

    def check_set_services_when_add_node(self):
        self.rest.force_eject_node()
        sherlock_services_in_ce = ["kv", "index,kv,n1ql"]
        watson_services_in_ce = ["kv", "index,kv,n1ql", "fts,index,kv,n1ql"]
        self.sleep(5, "wait for node reset done")
        try:
            self.log.info("Initialize node with services {0}".format(
                self.start_node_services))
            status = self.rest.init_node_services(
                hostname=self.master.ip, services=[self.start_node_services])
            init_node = self.cluster.async_init_node(
                self.master, services=[self.start_node_services])
        except Exception as e:
            if e:
                print(e)
        if not status:
            if self.version not in COUCHBASE_FROM_WATSON and \
                         self.start_node_services not in sherlock_services_in_ce:
                self.log.info(
                    "initial services setting enforced in Sherlock CE")
            elif self.version in COUCHBASE_FROM_WATSON and \
                         self.start_node_services not in watson_services_in_ce:
                self.log.info("initial services setting enforced in Watson CE")

        elif status and init_node.result() != 0:
            add_node = False
            try:
                self.log.info("node with services {0} try to add".format(
                    self.add_node_services))
                add_node = self.cluster.rebalance(
                    self.servers[:2],
                    self.servers[1:2], [],
                    services=[self.add_node_services])
            except Exception:
                pass
            if add_node:
                self.get_services_map()
                list_nodes = self.get_nodes_from_services_map(
                    get_all_nodes=True)
                map = self.get_nodes_services()
                if map[self.master.ip] == self.start_node_services and \
                    map[self.servers[1].ip] == self.add_node_services:
                    self.log.info(
                        "services set correctly when node added & rebalance")
                else:
                    self.fail("services set incorrectly when node added & rebalance. "
                        "cluster expected services: {0}; set cluster services {1} ."
                        "add node expected srv: {2}; set add node srv {3}"\
                        .format(map[self.master.ip], self.start_node_services, \
                         map[self.servers[1].ip], self.add_node_services))
            else:
                if self.version not in COUCHBASE_FROM_WATSON:
                    if self.start_node_services in ["kv", "index,kv,n1ql"] and \
                          self.add_node_services not in ["kv", "index,kv,n1ql"]:
                        self.log.info("services are enforced in CE")
                    elif self.start_node_services not in [
                            "kv", "index,kv,n1ql"
                    ]:
                        self.log.info("services are enforced in CE")
                    else:
                        self.fail("maybe bug in add node")
                elif self.version in COUCHBASE_FROM_WATSON:
                    if self.start_node_services in ["kv", "index,kv,n1ql",
                         "fts,index,kv,n1ql"] and self.add_node_services not in \
                                    ["kv", "index,kv,n1ql", "fts,index,kv,n1ql"]:
                        self.log.info("services are enforced in CE")
                    elif self.start_node_services not in [
                            "kv", "index,kv,n1ql", "fts,index,kv,n1ql"
                    ]:
                        self.log.info("services are enforced in CE")
                    else:
                        self.fail("maybe bug in add node")
        else:
            self.fail("maybe bug in node initialization")

    def check_full_backup_only(self):
        """ for windows vm, ask IT to put uniq.exe at
            /cygdrive/c/Program Files (x86)/ICW/bin directory """

        self.remote = RemoteMachineShellConnection(self.master)
        """ put params items=0 in test param so that init items = 0 """
        self.remote.execute_command("{0}cbworkloadgen -n {1}:8091 -j -i 1000 " \
                                    "-u Administrator -p password" \
                                            .format(self.bin_path, self.master.ip))
        """ delete backup location before run backup """
        self.remote.execute_command("rm -rf {0}*".format(self.backup_location))
        output, error = self.remote.execute_command("ls -lh {0}".format(
            self.backup_location))
        self.remote.log_command_output(output, error)
        """ first full backup """
        self.remote.execute_command("{0}cbbackup http://{1}:8091 {2} -m full " \
                                    "-u Administrator -p password"\
                                    .format(self.bin_path,
                                            self.master.ip,
                                            self.backup_c_location))
        output, error = self.remote.execute_command("ls -lh {0}*/".format(
            self.backup_location))
        self.remote.log_command_output(output, error)
        output, error = self.remote.execute_command("{0}cbtransfer -u Administrator "\
                                           "-p password {1}*/*-full/ " \
                                           "stdout: | grep set | uniq | wc -l"\
                                           .format(self.bin_path,
                                                   self.backup_c_location))
        self.remote.log_command_output(output, error)
        if int(output[0]) != 1000:
            self.fail("full backup did not work in CE. "
                      "Expected 1000, actual: {0}".format(output[0]))
        self.remote.execute_command("{0}cbworkloadgen -n {1}:8091 -j -i 1000 "\
                                    " -u Administrator -p password --prefix=t_"
                                    .format(self.bin_path, self.master.ip))
        """ do different backup mode """
        self.remote.execute_command("{0}cbbackup -u Administrator -p password "\
                                    "http://{1}:8091 {2} -m {3}"\
                                    .format(self.bin_path,
                                            self.master.ip,
                                            self.backup_c_location,
                                            self.backup_option))
        output, error = self.remote.execute_command("ls -lh {0}".format(
            self.backup_location))
        self.remote.log_command_output(output, error)
        output, error = self.remote.execute_command("{0}cbtransfer -u Administrator "\
                                           "-p password {1}*/*-{2}/ stdout: "\
                                           "| grep set | uniq | wc -l"\
                                           .format(self.bin_path,
                                                   self.backup_c_location,
                                                   self.backup_option))
        self.remote.log_command_output(output, error)
        if int(output[0]) == 2000:
            self.log.info("backup option 'diff' is enforced in CE")
        elif int(output[0]) == 1000:
            self.fail("backup option 'diff' is not enforced in CE. "
                      "Expected 2000, actual: {0}".format(output[0]))
        else:
            self.fail("backup failed to backup correct items")
        self.remote.disconnect()

    def check_ent_backup(self):
        """ for CE version from Watson, cbbackupmgr exe file should not in bin """
        command = "cbbackupmgr"
        self.remote = RemoteMachineShellConnection(self.master)
        self.log.info("check if {0} in {1} directory".format(
            command, self.bin_path))
        found = self.remote.file_exists(self.bin_path, command)
        if found:
            self.log.info("found {0} in {1} directory".format(
                command, self.bin_path))
            self.fail("CE from Watson should not contain {0}".format(command))
        elif not found:
            self.log.info("Ent. backup in CE is enforced, not in bin!")
        self.remote.disconnect()

    def check_memory_optimized_storage_mode(self):
        """ from Watson, CE should not have option 'memory_optimized' to set """
        self.rest.force_eject_node()
        self.sleep(5, "wait for node reset done")
        try:
            self.log.info("Initialize node with 'Memory Optimized' option")
            status = self.rest.set_indexer_storage_mode(
                username=self.input.membase_settings.rest_username,
                password=self.input.membase_settings.rest_password,
                storageMode='memory_optimized')
        except Exception as ex:
            if ex:
                print(ex)
        if not status:
            self.log.info("Memory Optimized setting enforced in CE "
                          "Could not set memory_optimized option")
        else:
            self.fail("Memory Optimzed setting does not enforced in CE "
                      "We could set this option in")

    def check_x509_cert(self):
        """ from Watson, X509 certificate only support in EE """
        api = self.rest.baseUrl + "pools/default/certificate?extended=true"
        self.log.info("request to get certificate at "
                      "'pools/default/certificate?extended=true' "
                      "should return False")
        try:
            status, content, header = self.rest._http_request(api, 'GET')
        except Exception as ex:
            if ex:
                print(ex)
        if status:
            self.fail("This X509 certificate feature only available in EE")
        elif not status:
            if "requires enterprise edition" in content:
                self.log.info("X509 cert is enforced in CE")

    def check_roles_base_access(self):
        """ from Watson, roles base access for admin should not in in CE """
        if self.user_add is None:
            self.fail(
                "We need to pass user name (user_add) to run this test. ")
        if self.user_role is None:
            self.fail(
                "We need to pass user roles (user_role) to run this test. ")
        api = self.rest.baseUrl + "settings/rbac/users/" + self.user_add
        self.log.info("url to run this test: %s" % api)
        """ add admin user """
        param = "name=%s&roles=%s" % (self.user_add, self.user_role)
        try:
            status, content, header = self.rest._http_request(
                api, 'PUT', param)
        except Exception as ex:
            if ex:
                print(ex)
        if status:
            self.fail("CE should not allow to add admin users")
        else:
            self.log.info("roles base is enforced in CE! ")

    def check_root_certificate(self):
        """ from watson, ce should not see root certificate
            manual test:
            curl -u Administrator:password -X GET
                            http://localhost:8091/pools/default/certificate """
        api = self.rest.baseUrl + "pools/default/certificate"
        try:
            status, content, header = self.rest._http_request(api, 'GET')
        except Exception as ex:
            if ex:
                print(ex)
        if status:
            self.fail("CE should not see root certificate!")
        elif "requires enterprise edition" in content:
            self.log.info("root certificate is enforced in CE! ")

    def check_settings_audit(self):
        """ from watson, ce should not set audit
            manual test:
            curl -u Administrator:password -X GET
                            http://localhost:8091/settings/audit """
        api = self.rest.baseUrl + "settings/audit"
        try:
            status, content, header = self.rest._http_request(api, 'GET')
        except Exception as ex:
            if ex:
                print(ex)
        if status:
            self.fail("CE should not allow to set audit !")
        elif "requires enterprise edition" in content:
            self.log.info("settings audit is enforced in CE! ")

    def check_infer(self):
        """ from watson, ce should not see infer
            manual test:
            curl -H "Content-Type: application/json" -X POST
                 -d '{"statement":"infer `bucket_name`;"}'
                       http://localhost:8093/query/service
            test params: new_services=kv-index-n1ql,default_bucket=False """
        self.rest.force_eject_node()
        self.sleep(7, "wait for node reset done")
        self.rest.init_node()
        bucket = "default"
        self.rest.create_bucket(bucket, ramQuotaMB=200)
        api = self.rest.query_baseUrl + "query/service"
        param = urllib.parse.urlencode({"statement": "infer `%s` ;" % bucket})
        try:
            status, content, header = self.rest._http_request(
                api, 'POST', param)
            json_parsed = json.loads(content)
        except Exception as ex:
            if ex:
                print(ex)
        if json_parsed["status"] == "success":
            self.fail("CE should not allow to run INFER !")
        elif json_parsed["status"] == "fatal":
            self.log.info("INFER is enforced in CE! ")

    def check_auto_complete(self):
        """ this feature has not complete to block in CE """

    """ Check new features from spock start here """

    def check_cbbackupmgr(self):
        """ cbbackupmgr should not available in CE from spock """
        if self.cb_version[:5] in COUCHBASE_FROM_SPOCK:
            file_name = "cbbackupmgr" + self.file_extension
            self.log.info("check if cbbackupmgr in bin dir in CE")
            result = self.remote.file_exists(self.bin_path, file_name)
            if result:
                self.fail("cbbackupmgr should not in bin dir of CE")
            else:
                self.log.info("cbbackupmgr is enforced in CE")
        self.remote.disconnect()

    def test_max_ttl_bucket(self):
        """
            From vulcan, EE bucket has has an option to set --max-ttl, not it CE.
            This test is make sure CE could not create bucket with option --max-ttl
            This test must pass default_bucket=False
        """
        if self.cb_version[:5] not in COUCHBASE_FROM_VULCAN:
            self.log.info("This test only for vulcan and later")
            return
        cmd = 'curl -X POST -u Administrator:password \
                                    http://{0}:8091/pools/default/buckets \
                                 -d name=bucket0 \
                                 -d maxTTL=100 \
                                 -d authType=sasl \
                                 -d ramQuotaMB=100 '.format(self.master.ip)
        if self.cli_test:
            cmd = "{0}couchbase-cli bucket-create -c {1}:8091 --username Administrator \
                --password password --bucket bucket0 --bucket-type couchbase \
                --bucket-ramsize 512 --bucket-replica 1 --bucket-priority high \
                --bucket-eviction-policy fullEviction --enable-flush 0 \
                --enable-index-replica 1 --max-ttl 200".format(
                self.bin_path, self.master.ip)
        conn = RemoteMachineShellConnection(self.master)
        output, error = conn.execute_command(cmd)
        conn.log_command_output(output, error)
        mesg = "Max TTL is supported in enterprise edition only"
        if self.cli_test:
            mesg = "Maximum TTL can only be configured on enterprise edition"
        if output and mesg not in str(output[0]):
            self.fail("max ttl feature should not in Community Edition")
        buckets = RestConnection(self.master).get_buckets()
        if buckets:
            for bucket in buckets:
                self.log.info("bucekt in cluser: {0}".format(bucket.name))
                if bucket.name == "bucket0":
                    self.fail("Failed to enforce feature max ttl in CE.")
        conn.disconnect()

    def test_setting_audit(self):
        """
           CE does not allow to set audit from vulcan 5.5.0
        """
        if self.cb_version[:5] not in COUCHBASE_FROM_VULCAN:
            self.log.info("This test only for vulcan and later")
            return
        cmd = 'curl -X POST -u Administrator:password \
              http://{0}:8091/settings/audit \
              -d auditdEnabled=true '.format(self.master.ip)
        if self.cli_test:
            cmd = "{0}couchbase-cli setting-audit -c {1}:8091 -u Administrator \
                -p password --audit-enabled 1 --audit-log-rotate-interval 604800 \
                --audit-log-path /opt/couchbase/var/lib/couchbase/logs "\
                .format(self.bin_path, self.master.ip)

        conn = RemoteMachineShellConnection(self.master)
        output, error = conn.execute_command(cmd)
        conn.log_command_output(output, error)
        mesg = "This http API endpoint requires enterprise edition"
        if output and mesg not in str(output[0]):
            self.fail("setting-audit feature should not in Community Edition")
        conn.disconnect()

    def test_setting_autofailover_enterprise_only(self):
        """
           CE does not allow set auto failover if disk has issue
           and failover group from vulcan 5.5.0
        """
        if self.cb_version[:5] not in COUCHBASE_FROM_VULCAN:
            self.log.info("This test only for vulcan and later")
            return
        self.failover_disk_period = self.input.param("failover_disk_period",
                                                     False)
        self.failover_server_group = self.input.param("failover_server_group",
                                                      False)

        failover_disk_period = ""
        if self.failover_disk_period:
            if self.cli_test:
                failover_disk_period = "--failover-data-disk-period 300"
            else:
                failover_disk_period = "-d failoverOnDataDiskIssues[timePeriod]=300"
        failover_server_group = ""
        if self.failover_server_group and self.cli_test:
            failover_server_group = "--enable-failover-of-server-group 1"

        cmd = 'curl -X POST -u Administrator:password \
              http://{0}:8091/settings/autoFailover -d enabled=true -d timeout=120 \
              -d maxCount=1 \
              -d failoverOnDataDiskIssues[enabled]=true {1} \
              -d failoverServerGroup={2}'.format(self.master.ip,
                                                 failover_disk_period,
                                                 self.failover_server_group)
        if self.cli_test:
            cmd = "{0}couchbase-cli setting-autofailover -c {1}:8091 \
                   -u Administrator -p password \
                   --enable-failover-on-data-disk-issues 1 {2} {3} "\
                  .format(self.bin_path, self.master.ip,
                          failover_disk_period,
                          failover_server_group)
        conn = RemoteMachineShellConnection(self.master)
        output, error = conn.execute_command(cmd)
        conn.log_command_output(output, error)
        mesg = "Auto failover on Data Service disk issues can only be " + \
               "configured on enterprise edition"
        if not self.cli_test:
            if self.failover_disk_period or \
                                   self.failover_server_group:
                if output and not error:
                    self.fail("setting autofailover disk issues feature\
                               should not in Community Edition")
        else:
            if self.failover_server_group:
                mesg = "--enable-failover-of-server-groups can only be " + \
                       "configured on enterprise edition"

        if output and mesg not in str(output[0]):
            self.fail("Setting EE autofailover features \
                       should not in Community Edition")
        else:
            self.log.info("EE setting autofailover are disable in CE")
        conn.disconnect()

    def test_set_bucket_compression(self):
        """
           CE does not allow to set bucket compression to bucket
           from vulcan 5.5.0.   Mode compression: off,active,passive
           Note: must set defaultbucket=False for this test
        """
        if self.cb_version[:5] not in COUCHBASE_FROM_VULCAN:
            self.log.info("This test only for vulcan and later")
            return
        self.compression_mode = self.input.param("compression_mode", "off")
        cmd = 'curl -X POST -u Administrator:password \
                                    http://{0}:8091/pools/default/buckets \
                                 -d name=bucket0 \
                                 -d compressionMode={1} \
                                 -d authType=sasl \
                                 -d ramQuotaMB=100 '.format(
            self.master.ip, self.compression_mode)
        if self.cli_test:
            cmd = "{0}couchbase-cli bucket-create -c {1}:8091 --username Administrator \
                --password password --bucket bucket0 --bucket-type couchbase \
                --bucket-ramsize 512 --bucket-replica 1 --bucket-priority high \
                --bucket-eviction-policy fullEviction --enable-flush 0 \
                --enable-index-replica 1 --compression-mode {2}".format(
                self.bin_path, self.master.ip, self.compression_mode)
        conn = RemoteMachineShellConnection(self.master)
        output, error = conn.execute_command(cmd)
        conn.log_command_output(output, error)
        mesg = "Compression mode is supported in enterprise edition only"
        if self.cli_test:
            mesg = "Compression mode can only be configured on enterprise edition"
        if output and mesg not in str(output[0]):
            self.fail("Setting bucket compression should not in CE")
        conn.disconnect()
Beispiel #45
0
class NodeHelper:
    def __init__(self, node):
        self.node = node
        self.ip = node.ip
        self.params = params
        self.build = None
        self.queue = None
        self.thread = None
        self.rest = None
        self.install_success = False
        self.connect_ok = False
        self.shell = None
        self.info = None
        self.enable_ipv6 = False
        self.check_node_reachable()
        self.nonroot = self.shell.nonroot
        self.actions_dict = install_constants.NON_ROOT_CMDS \
            if self.nonroot else install_constants.CMDS

    def check_node_reachable(self):
        start_time = time.time()
        # Try 3 times
        while time.time() < start_time + 60:
            try:
                self.shell = RemoteMachineShellConnection(self.node)
                self.info = self.shell.extract_remote_info()
                self.connect_ok = True
                if self.connect_ok:
                    break
            except Exception as e:
                log.warning("{0} unreachable, {1}, retrying.."
                            .format(self.ip, e))
                time.sleep(20)

    def get_os(self):
        os = self.info.distribution_version.lower()
        to_be_replaced = ['\n', ' ', 'gnu/linux']
        for _ in to_be_replaced:
            if _ in os:
                os = os.replace(_, '')
        if self.info.deliverable_type == "dmg":
            major_version = os.split('.')
            os = major_version[0] + '.' + major_version[1]
        return os

    def uninstall_cb(self):
        need_nonroot_relogin = False
        if self.shell.nonroot:
            self.node.ssh_username = "******"
            self.shell = RemoteMachineShellConnection(self.node)
            need_nonroot_relogin = True
        if self.actions_dict[self.info.deliverable_type]["uninstall"]:
            cmd = self.actions_dict[self.info.deliverable_type]["uninstall"]
            if "msi" in cmd:
                '''WINDOWS UNINSTALL'''
                self.shell.terminate_processes(self.info, [s for s in testconstants.WIN_PROCESSES_KILLED])
                self.shell.terminate_processes(self.info,
                                               [s + "-*" for s in testconstants.COUCHBASE_FROM_VERSION_3])
                installed_version, _ = self.shell.execute_command(
                    "cat " + install_constants.DEFAULT_INSTALL_DIR["WINDOWS_SERVER"] + "VERSION.txt")
                if len(installed_version) == 1:
                    installed_msi, _ = self.shell.execute_command(
                        "cd " + install_constants.DOWNLOAD_DIR["WINDOWS_SERVER"] + "; ls *" + installed_version[
                            0] + "*.msi")
                    if len(installed_msi) == 1:
                        self.shell.execute_command(
                            self.actions_dict[self.info.deliverable_type]["uninstall"]
                                .replace("installed-msi", installed_msi[0]))
                for browser in install_constants.WIN_BROWSERS:
                    self.shell.execute_command("taskkill /F /IM " + browser + " /T")
            else:
                duration, event, timeout = install_constants.WAIT_TIMES[self.info.deliverable_type]["uninstall"]
                start_time = time.time()
                while time.time() < start_time + timeout:
                    try:
                        o, e = self.shell.execute_command(cmd, debug=self.params["debug_logs"])
                        if o == ['1']:
                            break
                        self.wait_for_completion(duration, event)
                    except Exception as e:
                        log.warning("Exception {0} occurred on {1}, retrying.."
                                    .format(e, self.ip))
                        self.wait_for_completion(duration, event)
            self.shell.terminate_processes(self.info, install_constants.PROCESSES_TO_TERMINATE)

        if need_nonroot_relogin:
            self.node.ssh_username = "******"
            self.shell = RemoteMachineShellConnection(self.node)

    def pre_install_cb(self):
        if self.actions_dict[self.info.deliverable_type]["pre_install"]:
            cmd = self.actions_dict[self.info.deliverable_type]["pre_install"]
            duration, event, timeout = install_constants.WAIT_TIMES[self.info.deliverable_type]["pre_install"]
            if cmd is not None and "HDIUTIL_DETACH_ATTACH" in cmd:
                start_time = time.time()
                while time.time() < start_time + timeout:
                    try:
                        ret = hdiutil_attach(self.shell, self.build.path)
                        if ret:
                            break
                        self.wait_for_completion(duration, event)
                    except Exception as e:
                        log.warning("Exception {0} occurred on {1}, retrying..".format(e, self.ip))
                        self.wait_for_completion(duration, event)

    def install_cb(self):
        self.pre_install_cb()
        if self.actions_dict[self.info.deliverable_type]["install"]:
            if "suse" in self.get_os():
                cmd = self.actions_dict[self.info.deliverable_type]["suse_install"]
            else:
                cmd = self.actions_dict[self.info.deliverable_type]["install"]
            cmd = cmd.replace("buildbinary", self.build.name)
            cmd = cmd.replace("buildpath", self.build.path)
            cmd = cmd.replace("mountpoint", "/tmp/couchbase-server-" + params["version"])
            duration, event, timeout = install_constants.WAIT_TIMES[self.info.deliverable_type]["install"]
            start_time = time.time()
            while time.time() < start_time + timeout:
                try:
                    o, e = self.shell.execute_command(cmd, debug=self.params["debug_logs"])
                    if o == ['1']:
                        break
                    self.wait_for_completion(duration, event)
                except Exception as e:
                    log.warning("Exception {0} occurred on {1}, retrying.."
                                .format(e, self.ip))
                    self.wait_for_completion(duration, event)
        self.post_install_cb()

    def post_install_cb(self):
        duration, event, timeout = install_constants.WAIT_TIMES[self.info.deliverable_type]["post_install"]
        start_time = time.time()
        while time.time() < start_time + timeout:
            try:
                if self.actions_dict[self.info.deliverable_type]["post_install"]:
                    cmd = self.actions_dict[self.info.deliverable_type]["post_install"].replace("buildversion", self.build.version)
                    o, e = self.shell.execute_command(cmd, debug=self.params["debug_logs"])
                    if o == ['1']:
                        break
                    else:
                        if self.actions_dict[self.info.deliverable_type]["post_install_retry"]:
                            if self.info.deliverable_type == "msi":
                                check_if_downgrade, _ = self.shell.execute_command(
                                    "cd " + install_constants.DOWNLOAD_DIR["WINDOWS_SERVER"] +
                                    "; vi +\"set nobomb | set fenc=ascii | x\" install_status.txt; "
                                    "grep 'Adding WIX_DOWNGRADE_DETECTED property' install_status.txt")
                                print((check_if_downgrade * 10))
                            else:
                                self.shell.execute_command(
                                    self.actions_dict[self.info.deliverable_type]["post_install_retry"],
                                    debug=self.params["debug_logs"])
                        self.wait_for_completion(duration, event)
            except Exception as e:
                log.warning("Exception {0} occurred on {1}, retrying.."
                            .format(e, self.ip))
                self.wait_for_completion(duration, event)

    def set_cbft_env_options(self, name, value, retries=3):
        if self.get_os() in install_constants.LINUX_DISTROS:
            while retries > 0:
                if self.shell.file_exists("/opt/couchbase/bin/", "couchbase-server"):
                    ret, _ = self.shell.execute_command(install_constants.CBFT_ENV_OPTIONS[name].format(value))
                    self.shell.stop_server()
                    self.shell.start_server()
                    time.sleep(10)
                    if ret == ['1']:
                        log.info("{0} set to {1} on {2}"
                                 .format(name, value, self.ip))
                        break
                else:
                    time.sleep(20)
                retries -= 1
            else:
                print_result_and_exit("Unable to set fts_query_limit on {0}"
                                      .format(self.ip))

    def _get_cli_path(self):
        if self.get_os() in install_constants.LINUX_DISTROS:
            return install_constants.DEFAULT_CLI_PATH["LINUX_DISTROS"]
        elif self.get_os() in install_constants.MACOS_VERSIONS:
            return install_constants.DEFAULT_CLI_PATH["MACOS_VERSIONS"]
        elif self.get_os() in install_constants.WINDOWS_SERVER:
            return install_constants.DEFAULT_CLI_PATH["WINDOWS_SERVER"]

    def _set_ip_version(self):
        if params["enable_ipv6"]:
            self.enable_ipv6 = True
            if self.node.ip.startswith("["):
                hostname = self.node.ip[self.node.ip.find("[") + 1:self.node.ip.find("]")]
            else:
                hostname = self.node.ip
            cmd = install_constants.NODE_INIT["ipv6"]\
                .format(self._get_cli_path(),
                        self.ip,
                        hostname,
                        self.node.rest_username,
                        self.node.rest_password)
        else:
            cmd = install_constants.NODE_INIT["ipv4"]\
                .format(self._get_cli_path(),
                        self.ip,
                        self.node.rest_username,
                        self.node.rest_password)

        self.shell.execute_command(cmd)

    def pre_init_cb(self):
        try:
            self._set_ip_version()

            if params["fts_query_limit"] > 0:
                self.set_cbft_env_options("fts_query_limit",
                                          params["fts_query_limit"])
        except Exception as e:
            log.warning("Exception {0} occurred during pre-init".format(e))

    def post_init_cb(self):
        # Optionally change node name and restart server
        if params.get('use_domain_names', False):
            RemoteUtilHelper.use_hostname_for_server_settings(self.node)

        # Optionally disable consistency check
        if params.get('disable_consistency', False):
            self.rest.set_couchdb_option(section='couchdb',
                                         option='consistency_check_ratio',
                                         value='0.0')

    def get_services(self):
        if not self.node.services:
            return ["kv"]
        elif self.node.services:
            return self.node.services.split(',')

    def allocate_memory_quotas(self):
        kv_quota = 0
        info = self.rest.get_nodes_self()

        start_time = time.time()
        while time.time() < start_time + 30 and kv_quota == 0:
            kv_quota = int(info.mcdMemoryReserved * testconstants.CLUSTER_QUOTA_RATIO)
            time.sleep(1)

        self.services = self.get_services()
        if "index" in self.services:
            log.info("Setting INDEX memory quota as {0} MB on {1}"
                     .format(testconstants.INDEX_QUOTA, self.ip))
            self.rest.set_service_memoryQuota(
                service='indexMemoryQuota',
                memoryQuota=testconstants.INDEX_QUOTA)
            kv_quota -= testconstants.INDEX_QUOTA
        if "fts" in self.services:
            log.info("Setting FTS memory quota as {0} MB on {1}"
                     .format(params["fts_quota"], self.ip))
            self.rest.set_service_memoryQuota(service='ftsMemoryQuota',
                                              memoryQuota=params["fts_quota"])
            kv_quota -= params["fts_quota"]
        if "cbas" in self.services:
            log.info("Setting CBAS memory quota as {0} MB on {1}"
                     .format(testconstants.CBAS_QUOTA, self.ip))
            self.rest.set_service_memoryQuota(
                service="cbasMemoryQuota",
                memoryQuota=testconstants.CBAS_QUOTA)
            kv_quota -= testconstants.CBAS_QUOTA
        if "kv" in self.services:
            if kv_quota < testconstants.MIN_KV_QUOTA:
                log.warning("KV memory quota is {0}MB but needs to be at least {1}MB on {2}"
                            .format(kv_quota,
                                    testconstants.MIN_KV_QUOTA,
                                    self.ip))
                kv_quota = testconstants.MIN_KV_QUOTA
            log.info("Setting KV memory quota as {0} MB on {1}"
                     .format(kv_quota, self.ip))
        self.rest.init_cluster_memoryQuota(self.node.rest_username,
                                           self.node.rest_password,
                                           kv_quota)

    def init_cb(self):
        duration, event, timeout = install_constants.WAIT_TIMES[self.info.deliverable_type]["init"]
        self.wait_for_completion(duration * 2, event)
        start_time = time.time()
        while time.time() < start_time + timeout:
            try:
                init_success = False
                self.pre_init_cb()

                self.rest = RestConnection(self.node)
                # Make sure that data_and index_path are writable by couchbase user
                for path in set([_f for _f in [self.node.data_path, self.node.index_path] if _f]):
                    for cmd in ("rm -rf {0}/*".format(path),
                                "chown -R couchbase:couchbase {0}".format(path)):
                        self.shell.execute_command(cmd)
                self.rest.set_data_path(data_path=self.node.data_path,
                                        index_path=self.node.index_path)
                self.allocate_memory_quotas()
                self.rest.init_node_services(hostname=None,
                                             username=self.node.rest_username,
                                             password=self.node.rest_password,
                                             services=self.get_services())

                if "index" in self.get_services():
                    self.rest.set_indexer_storage_mode(
                        storageMode=params["storage_mode"])

                self.rest.init_cluster(username=self.node.rest_username,
                                       password=self.node.rest_password)
                init_success = True
                if init_success:
                    break
                self.wait_for_completion(duration, event)
            except Exception as e:
                log.warning("Exception {0} occurred on {1}, retrying.."
                            .format(e, self.ip))
                self.wait_for_completion(duration, event)
        self.post_init_cb()

    def wait_for_completion(self, duration, event):
        if params["debug_logs"]:
            log.info(event.format(duration, self.ip))
        time.sleep(duration)

    def cleanup_cb(self):
        cmd = self.actions_dict[self.info.deliverable_type]["cleanup"]
        if cmd:
            try:
                # Delete all but the most recently accessed build binaries
                self.shell.execute_command(cmd,
                                           debug=self.params["debug_logs"])
            except:
                pass
Beispiel #46
0
    def test_win_install(self):
        query = BuildQuery()
        builds, changes = query.get_all_builds()
        version = self.input.test_params['version']
        os_version = self.input.test_params['win']

        task = 'install'
        ok = True
        ex_type = 'exe'
        bat_file = 'install.bat'
        version_file = 'VERSION.txt'
        if self.input.test_params["ostype"] == '64':
            Arch = 'x86_64'
            os_type = '64'
        elif self.input.test_params["ostype"] == '32':
            Arch = 'x86'
            os_type = '32'
        else:
            ok = False
            self.log.error("Unknown os version.")

        product = self.input.test_params["product"]
        if product == 'cse':
            name = 'couchbase-server-enterprise'
        elif product == 'csse':
            name = 'couchbase-single-server-enterprise'
        elif product == 'csc':
            name = 'couchbase-server-community'
        elif product == 'cssc':
            name = 'couchbase-single-server-community'
        else:
            ok = False
            self.log.error("Unknon product type.")

        cb_server_alias = ['cse','csc']
        cb_single_alias = ['csse','cssc']
        if product in cb_server_alias:
            server_path = "/cygdrive/c/Program Files/Couchbase/Server/"
        elif product in cb_single_alias:
            server_path = "/cygdrive/c/Program Files (x86)/Couchbase/Server/"

        if ok:
            for serverInfo in self.servers:
                remote_client = RemoteMachineShellConnection(serverInfo)
                info = RemoteMachineShellConnection(serverInfo).extract_remote_info()
                build = query.find_build(builds, name, ex_type, Arch, version)
                #self.log.info("what is this {0}".format(build.url))
                # check if previous couchbase server installed
                exist = remote_client.file_exists("/cygdrive/c/Program Files/Couchbase/Server/", version_file)
                if exist:
                    # call uninstall function to install couchbase server
                    self.log.info("Start uninstall cb server on this server")
                    self.test_win_uninstall(remote_client, product, os_type, os_version, version, server_path)
                else:
                    self.log.info('I am free. You can install couchbase server now')
                # directory path in remote server used to create or delete directory
                dir_paths = ['/cygdrive/c/automation','/cygdrive/c/tmp']
                remote_client.create_multiple_dir(dir_paths)
                # copy files from local server to remote server
                remote_client.copy_files_local_to_remote('resources/windows/automation', '/cygdrive/c/automation')
                downloaded = remote_client.download_binary_in_win(build.url,product,version)
                if downloaded:
                    self.log.info('Successful download {0}_{1}.exe'.format(product, version))
                else:
                    self.log.error('Download {0}_{1}.exe failed'.format(product, version))
                remote_client.modify_bat_file('/cygdrive/c/automation', bat_file,
                                               product, os_type, os_version, version, task)
                self.log.info('sleep for 5 seconds before running task schedule install me')
                time.sleep(5)
                # run task schedule to install Couchbase Server
                output, error = remote_client.execute_command("cmd /c schtasks /run /tn installme")
                remote_client.log_command_output(output, error)
                remote_client.wait_till_file_added(server_path, version_file, timeout_in_seconds=600)
                self.log.info('sleep 15 seconds before running the next job ...')
                time.sleep(15)
        else:
            self.log.error("Can not install Couchbase Server.")
Beispiel #47
0
    def test_cbServerOps(self):
        ops = self.input.param("ops", None)
        auditIns = audit(host=self.master)

        #Capture timestamp from first event for filename
        firstEventTime = self.getTimeStampForFile(auditIns)

        shell = RemoteMachineShellConnection(self.master)

        #Kill memcached to check for file roll over and new audit.log
        if (ops == "kill"):
            result = shell.kill_memcached()
            self.sleep(10)

        #Stop CB Server to check for file roll over and new audit.log
        if (ops == 'shutdown'):
            try:
                result = shell.stop_couchbase()
                self.sleep(120, 'Waiting for server to shutdown')
            finally:
                result = shell.start_couchbase()

        #Check for audit.log and for roll over file
        self.sleep(120, 'Waiting for server to start after shutdown')
        rest = RestConnection(self.master)
        #Create an Event for Bucket Creation
        #expectedResults = self.createBucketAudit(self.master, "TestBucketKillShutdown")
        status, content = rest.validateLogin("Administrator", "password", True, getContent=True)
        self.sleep(30)
        result = shell.file_exists(auditIns.pathLogFile, audit.AUDITLOGFILENAME)
        self.assertTrue(result, "Audit.log is not created when memcached server is killed or stopped")
        hostname = shell.execute_command("hostname")

        archiveFile = hostname[0][0] + '-' + firstEventTime + "-audit.log"
        self.log.info ("Archive File expected is {0}".format(auditIns.pathLogFile + archiveFile))
        result = shell.file_exists(auditIns.pathLogFile, archiveFile)
        self.assertTrue(result, "Archive Audit.log is not created when memcached server is killed or stopped")

        #archiveFile = auditIns.currentLogFile + "/" + archiveFile

        if (ops == 'shutdown'):
            expectedResult = {"source":"internal", "user":"******", "id":4097, "name":"shutting down audit daemon", "description":"The audit daemon is being shutdown"}
            data = auditIns.returnEvent(4097, archiveFile)
            flag = True
            for items in data:
                if (items == 'timestamp'):
                    tempFlag = auditIns.validateTimeStamp(data['timestamp'])
                    if (tempFlag is False):
                        flag = False
                else:
                    if (isinstance(data[items], dict)):
                        for seclevel in data[items]:
                            tempValue = expectedResult[seclevel]
                            if data[items][seclevel] == tempValue:
                                self.log.info ('Match Found expected values - {0} -- actual value -- {1} - eventName - {2}'.format(tempValue, data[items][seclevel], seclevel))
                            else:
                                self.log.info ('Mis-Match Found expected values - {0} -- actual value -- {1} - eventName - {2}'.format(tempValue, data[items][seclevel], seclevel))
                                flag = False
                    else:
                        if (data[items] == expectedResult[items]):
                            self.log.info ('Match Found expected values - {0} -- actual value -- {1} - eventName - {2}'.format(expectedResult[items.encode('utf-8')], data[items.encode('utf-8')], items))
                        else:
                            self.log.info ('Mis - Match Found expected values - {0} -- actual value -- {1} - eventName - {2}'.format(expectedResult[items.encode('utf-8')], data[items.encode('utf-8')], items))
                            flag = False
            self.assertTrue(flag, "Shutdown event is not printed")

        expectedResults = {"auditd_enabled":auditIns.getAuditConfigElement('auditd_enabled'),
                           "descriptors_path":self.changePathWindows(auditIns.getAuditConfigElement('descriptors_path')),
                           "log_path":self.changePathWindows(auditIns.getAuditLogPath().strip()[:-2]),
                           'source':'internal', 'user':'******',
                           "rotate_interval":auditIns.getAuditConfigElement('rotate_interval'),
                           "version":1, 'hostname':self.getHostName(self.master)}
        self.checkConfig(self.AUDITCONFIGRELOAD, self.master, expectedResults)
Beispiel #48
0
class CommunityTests(CommunityBaseTest):
    def setUp(self):
        super(CommunityTests, self).setUp()
        self.command = self.input.param("command", "")
        self.zone = self.input.param("zone", 1)
        self.replica = self.input.param("replica", 1)
        self.command_options = self.input.param("command_options", '')
        self.set_get_ratio = self.input.param("set_get_ratio", 0.9)
        self.item_size = self.input.param("item_size", 128)
        self.shutdown_zone = self.input.param("shutdown_zone", 1)
        self.do_verify = self.input.param("do-verify", True)
        self.num_node = self.input.param("num_node", 4)
        self.services = self.input.param("services", None)
        self.start_node_services = self.input.param("start_node_services",
                                                    "kv")
        self.add_node_services = self.input.param("add_node_services", "kv")
        self.timeout = 6000
        self.user_add = self.input.param("user_add", None)
        self.user_role = self.input.param("user_role", None)

    def tearDown(self):
        super(CommunityTests, self).tearDown()

    def test_disabled_zone(self):
        disabled_zone = False
        zone_name = "group1"
        serverInfo = self.servers[0]
        self.rest = RestConnection(serverInfo)
        try:
            self.log.info("create zone name 'group1'!")
            result = self.rest.add_zone(zone_name)
            print("result  ", result)
        except Exception as e:
            if e:
                print(e)
                disabled_zone = True
                pass
        if not disabled_zone:
            self.fail("CE version should not have zone feature")

    def check_audit_available(self):
        audit_available = False
        try:
            self.rest.getAuditSettings()
            audit_available = True
        except Exception as e:
            if e:
                print(e)
        if audit_available:
            self.fail("This feature 'audit' only available on "
                      "Enterprise Edition")

    def check_ldap_available(self):
        ldap_available = False
        self.rest = RestConnection(self.master)
        try:
            s, c, h = self.rest.clearLDAPSettings()
            if s:
                ldap_available = True
        except Exception as e:
            if e:
                print(e)
        if ldap_available:
            self.fail("This feature 'ldap' only available on "
                      "Enterprise Edition")

    def check_set_services(self):
        self.rest.force_eject_node()
        self.sleep(7, "wait for node reset done")
        try:
            status = self.rest.init_node_services(hostname=self.master.ip,
                                                  services=[self.services])
        except Exception as e:
            if e:
                print(e)
        if self.services == "kv":
            if status:
                self.log.info("CE could set {0} only service.".format(
                    self.services))
            else:
                self.fail("Failed to set {0} only service.".format(
                    self.services))
        elif self.services == "index,kv":
            if status:
                self.fail("CE does not support kv and index on same node")
            else:
                self.log.info("services enforced in CE")
        elif self.services == "kv,n1ql":
            if status:
                self.fail("CE does not support kv and n1ql on same node")
            else:
                self.log.info("services enforced in CE")
        elif self.services == "kv,eventing":
            if status:
                self.fail("CE does not support kv and eventing on same node")
            else:
                self.log.info("services enforced in CE")
        elif self.services == "index,n1ql":
            if status:
                self.fail("CE does not support index and n1ql on same node")
            else:
                self.log.info("services enforced in CE")
        elif self.services == "index,kv,n1ql":
            if status:
                self.log.info(
                    "CE could set all services {0} on same nodes.".format(
                        self.services))
            else:
                self.fail("Failed to set kv, index and query services on CE")
        elif self.version[:5] in COUCHBASE_FROM_WATSON:
            if self.version[:
                            5] in COUCHBASE_FROM_VULCAN and "eventing" in self.services:
                if status:
                    self.fail("CE does not support eventing in vulcan")
                else:
                    self.log.info("services enforced in CE")
            elif self.services == "fts,index,kv":
                if status:
                    self.fail(
                        "CE does not support fts, index and kv on same node")
                else:
                    self.log.info("services enforced in CE")
            elif self.services == "fts,index,n1ql":
                if status:
                    self.fail(
                        "CE does not support fts, index and n1ql on same node")
                else:
                    self.log.info("services enforced in CE")
            elif self.services == "fts,kv,n1ql":
                if status:
                    self.fail(
                        "CE does not support fts, kv and n1ql on same node")
                else:
                    self.log.info("services enforced in CE")
            elif self.services == "fts,index,kv,n1ql":
                if status:
                    self.log.info(
                        "CE could set all services {0} on same nodes.".format(
                            self.services))
                else:
                    self.fail("Failed to set "
                              "fts, index, kv, and query services on CE")
        else:
            self.fail("some services don't support")

    def check_set_services_when_add_node(self):
        self.rest.force_eject_node()
        sherlock_services_in_ce = ["kv", "index,kv,n1ql"]
        watson_services_in_ce = ["kv", "index,kv,n1ql", "fts,index,kv,n1ql"]
        self.sleep(5, "wait for node reset done")
        kv_quota = 0
        while kv_quota == 0:
            time.sleep(1)
            kv_quota = int(self.rest.get_nodes_self().mcdMemoryReserved)
        info = self.rest.get_nodes_self()
        kv_quota = int(info.mcdMemoryReserved * (CLUSTER_QUOTA_RATIO))
        self.rest.set_service_memoryQuota(service='indexMemoryQuota',
                                          memoryQuota=INDEX_QUOTA)
        self.rest.set_service_memoryQuota(service='ftsMemoryQuota',
                                          memoryQuota=FTS_QUOTA)
        self.rest.init_cluster_memoryQuota(
            self.input.membase_settings.rest_username,
            self.input.membase_settings.rest_password,
            kv_quota - INDEX_QUOTA - FTS_QUOTA - 100)
        try:
            self.log.info("Initialize node with services {0}".format(
                self.start_node_services))
            status = self.rest.init_node_services(
                hostname=self.master.ip, services=[self.start_node_services])
            self.rest.init_cluster()
        except Exception as e:
            if e:
                print(e)
        if not status:
            if self.version not in COUCHBASE_FROM_WATSON and \
                         self.start_node_services not in sherlock_services_in_ce:
                self.log.info(
                    "initial services setting enforced in Sherlock CE")
            elif self.version in COUCHBASE_FROM_WATSON and \
                         self.start_node_services not in watson_services_in_ce:
                self.log.info("initial services setting enforced in Watson CE")

        elif status:
            add_node = False
            try:
                self.log.info("node with services {0} try to add".format(
                    self.add_node_services))
                add_node = self.cluster.rebalance(
                    self.servers[:2],
                    self.servers[1:2], [],
                    services=[self.add_node_services])
            except Exception:
                pass
            if add_node:
                self.get_services_map()
                list_nodes = self.get_nodes_from_services_map(
                    get_all_nodes=True)
                map = self.get_nodes_services()
                if map[self.master.ip] == self.start_node_services and \
                    map[self.servers[1].ip] == self.add_node_services:
                    self.log.info(
                        "services set correctly when node added & rebalance")
                else:
                    self.fail("services set incorrectly when node added & rebalance. "
                        "cluster expected services: {0}; set cluster services {1} ."
                        "add node expected srv: {2}; set add node srv {3}"\
                        .format(map[self.master.ip], self.start_node_services, \
                         map[self.servers[1].ip], self.add_node_services))
            else:
                if self.version not in COUCHBASE_FROM_WATSON:
                    if self.start_node_services in ["kv", "index,kv,n1ql"] and \
                          self.add_node_services not in ["kv", "index,kv,n1ql"]:
                        self.log.info("services are enforced in CE")
                    elif self.start_node_services not in [
                            "kv", "index,kv,n1ql"
                    ]:
                        self.log.info("services are enforced in CE")
                    else:
                        self.fail("maybe bug in add node")
                elif self.version in COUCHBASE_FROM_WATSON:
                    if self.start_node_services in ["kv", "index,kv,n1ql",
                         "fts,index,kv,n1ql"] and self.add_node_services not in \
                                    ["kv", "index,kv,n1ql", "fts,index,kv,n1ql"]:
                        self.log.info("services are enforced in CE")
                    elif self.start_node_services not in [
                            "kv", "index,kv,n1ql", "fts,index,kv,n1ql"
                    ]:
                        self.log.info("services are enforced in CE")
                    else:
                        self.fail("maybe bug in add node")
        else:
            self.fail("maybe bug in node initialization")

    def check_full_backup_only(self):
        """ for windows vm, ask IT to put uniq.exe at
            /cygdrive/c/Program Files (x86)/ICW/bin directory """

        self.remote = RemoteMachineShellConnection(self.master)
        """ put params items=0 in test param so that init items = 0 """
        self.remote.execute_command("{0}cbworkloadgen -n {1}:8091 -j -i 1000 " \
                                    "-u Administrator -p password" \
                                            .format(self.bin_path, self.master.ip))
        """ delete backup location before run backup """
        self.remote.execute_command("rm -rf {0}*".format(self.backup_location))
        output, error = self.remote.execute_command("ls -lh {0}".format(
            self.backup_location))
        self.remote.log_command_output(output, error)
        """ first full backup """
        self.remote.execute_command("{0}cbbackup http://{1}:8091 {2} -m full " \
                                    "-u Administrator -p password"\
                                    .format(self.bin_path,
                                            self.master.ip,
                                            self.backup_c_location))
        output, error = self.remote.execute_command("ls -lh {0}*/".format(
            self.backup_location))
        self.remote.log_command_output(output, error)
        output, error = self.remote.execute_command("{0}cbtransfer -u Administrator "\
                                           "-p password {1}*/*-full/ " \
                                           "stdout: | grep set | uniq | wc -l"\
                                           .format(self.bin_path,
                                                   self.backup_c_location))
        self.remote.log_command_output(output, error)
        if int(output[0]) != 1000:
            self.fail("full backup did not work in CE. "
                      "Expected 1000, actual: {0}".format(output[0]))
        self.remote.execute_command("{0}cbworkloadgen -n {1}:8091 -j -i 1000 "\
                                    " -u Administrator -p password --prefix=t_"
                                    .format(self.bin_path, self.master.ip))
        """ do different backup mode """
        self.remote.execute_command("{0}cbbackup -u Administrator -p password "\
                                    "http://{1}:8091 {2} -m {3}"\
                                    .format(self.bin_path,
                                            self.master.ip,
                                            self.backup_c_location,
                                            self.backup_option))
        output, error = self.remote.execute_command("ls -lh {0}".format(
            self.backup_location))
        self.remote.log_command_output(output, error)
        output, error = self.remote.execute_command("{0}cbtransfer -u Administrator "\
                                           "-p password {1}*/*-{2}/ stdout: "\
                                           "| grep set | uniq | wc -l"\
                                           .format(self.bin_path,
                                                   self.backup_c_location,
                                                   self.backup_option))
        self.remote.log_command_output(output, error)
        if int(output[0]) == 2000:
            self.log.info("backup option 'diff' is enforced in CE")
        elif int(output[0]) == 1000:
            self.fail("backup option 'diff' is not enforced in CE. "
                      "Expected 2000, actual: {0}".format(output[0]))
        else:
            self.fail("backup failed to backup correct items")
        self.remote.disconnect()

    def check_ent_backup(self):
        """ for CE version from Watson, cbbackupmgr exe file should not in bin """
        command = "cbbackupmgr"
        self.remote = RemoteMachineShellConnection(self.master)
        self.log.info("check if {0} in {1} directory".format(
            command, self.bin_path))
        found = self.remote.file_exists(self.bin_path, command)
        if found:
            self.log.info("found {0} in {1} directory".format(
                command, self.bin_path))
            self.log.info("Ent. backup in CE is in bin!")
        elif not found:
            self.fail(
                "CE from Cheshire Cat should contain {0}".format(command))
        self.remote.disconnect()

    def check_memory_optimized_storage_mode(self):
        """ from Watson, CE should not have option 'memory_optimized' to set """
        self.rest.force_eject_node()
        self.sleep(5, "wait for node reset done")
        try:
            self.log.info("Initialize node with 'Memory Optimized' option")
            status = self.rest.set_indexer_storage_mode(
                username=self.input.membase_settings.rest_username,
                password=self.input.membase_settings.rest_password,
                storageMode='memory_optimized')
        except Exception as ex:
            if ex:
                print(ex)
        if not status:
            self.log.info("Memory Optimized setting enforced in CE "
                          "Could not set memory_optimized option")
        else:
            self.fail("Memory Optimzed setting does not enforced in CE "
                      "We could set this option in")

    def check_plasma_storage_mode(self):
        """ from Watson, CE should not have option 'memory_optimized' to set """
        self.rest.force_eject_node()
        self.sleep(5, "wait for node reset done")
        try:
            self.log.info("Initialize node with 'Memory Optimized' option")
            status = self.rest.set_indexer_storage_mode(
                username=self.input.membase_settings.rest_username,
                password=self.input.membase_settings.rest_password,
                storageMode='plasma')
        except Exception as ex:
            if ex:
                print(ex)
        if not status:
            self.log.info("Plasma setting enforced in CE "
                          "Could not set Plasma option")
        else:
            self.fail("Plasma setting does not enforced in CE "
                      "We could set this option in")

    def check_x509_cert(self):
        """ from Watson, X509 certificate only support in EE """
        api = self.rest.baseUrl + "pools/default/certificate?extended=true"
        self.log.info("request to get certificate at "
                      "'pools/default/certificate?extended=true' "
                      "should return False")
        try:
            status, content, header = self.rest._http_request(api, 'GET')
        except Exception as ex:
            if ex:
                print(ex)
        if status:
            self.fail("This X509 certificate feature only available in EE")
        elif not status:
            if b'requires enterprise edition' in content:
                self.log.info("X509 cert is enforced in CE")

    def check_roles_base_access(self):
        """ from Watson, roles base access for admin should not in in CE """
        if self.user_add is None:
            self.fail(
                "We need to pass user name (user_add) to run this test. ")
        if self.user_role is None:
            self.fail(
                "We need to pass user roles (user_role) to run this test. ")
        api = self.rest.baseUrl + "settings/rbac/users/" + self.user_add
        self.log.info("url to run this test: %s" % api)
        """ add admin user """
        param = "name=%s&roles=%s" % (self.user_add, self.user_role)
        try:
            status, content, header = self.rest._http_request(
                api, 'PUT', param)
        except Exception as ex:
            if ex:
                print(ex)
        if status:
            self.fail("CE should not allow to add admin users")
        else:
            self.log.info("roles base is enforced in CE! ")

    def check_root_certificate(self):
        """ from watson, ce should not see root certificate
            manual test:
            curl -u Administrator:password -X GET
                            http://localhost:8091/pools/default/certificate """
        api = self.rest.baseUrl + "pools/default/certificate"
        try:
            status, content, header = self.rest._http_request(api, 'GET')
        except Exception as ex:
            if ex:
                print(ex)
        if status:
            self.fail("CE should not see root certificate!")
        elif b'requires enterprise edition' in content:
            self.log.info("root certificate is enforced in CE! ")

    def check_settings_audit(self):
        """ from watson, ce should not set audit
            manual test:
            curl -u Administrator:password -X GET
                            http://localhost:8091/settings/audit """
        api = self.rest.baseUrl + "settings/audit"
        try:
            status, content, header = self.rest._http_request(api, 'GET')
        except Exception as ex:
            if ex:
                print(ex)
        if status:
            self.fail("CE should not allow to set audit !")
        elif b'requires enterprise edition' in content:
            self.log.info("settings audit is enforced in CE! ")

    def check_infer(self):
        """ from watson, ce should not see infer
            manual test:
            curl -H "Content-Type: application/json" -X POST
                 -d '{"statement":"infer `bucket_name`;"}'
                       http://localhost:8093/query/service
            test params: new_services=kv-index-n1ql,default_bucket=False """
        self.rest.force_eject_node()
        self.sleep(7, "wait for node reset done")
        self.rest.init_node()
        bucket = "default"
        self.rest.create_bucket(bucket, ramQuotaMB=200)
        api = self.rest.query_baseUrl + "query/service"
        param = urllib.parse.urlencode({"statement": "infer `%s` ;" % bucket})
        try:
            status, content, header = self.rest._http_request(
                api, 'POST', param)
            json_parsed = json.loads(content)
        except Exception as ex:
            if ex:
                print(ex)
        if json_parsed["status"] == "success":
            self.fail("CE should not allow to run INFER !")
        elif json_parsed["status"] == "fatal":
            self.log.info("INFER is enforced in CE! ")

    def check_query_monitoring(self):
        self.rest.force_eject_node()
        self.sleep(7, "wait for node reset done")
        self.rest.init_node()
        bucket = "default"
        self.rest.create_bucket(bucket, ramQuotaMB=200)
        api = self.rest.query_baseUrl + "admin/settings"
        param = {'profile': 'phases'}
        try:
            status, content, header = self.rest._http_request(
                api, 'POST', json.dumps(param))
        except Exception as ex:
            if ex:
                print(ex)
        if status:
            self.fail("CE should not be allowed to do query monitoring !")
        elif b'Profiling is an EE only feature' in content:
            self.log.info("Query monitoring is enforced in CE! ")

    def check_flex_index(self):
        """ from watson, ce should not see infer
            manual test:
            curl -H "Content-Type: application/json" -X POST
                 -d '{"statement":"infer `bucket_name`;"}'
                       http://localhost:8093/query/service
            test params: new_services=kv-index-n1ql,default_bucket=False """
        self.rest.force_eject_node()
        self.sleep(7, "wait for node reset done")
        self.rest.init_node()
        bucket = "default"
        self.rest.create_bucket(bucket, ramQuotaMB=200)
        api = self.rest.query_baseUrl + "query/service"
        param = urllib.parse.urlencode({
            "statement":
            "SELECT META(d).id FROM `%s` AS d USE INDEX (USING FTS) WHERE d.f2 = 100;"
            % bucket
        })
        try:
            status, content, header = self.rest._http_request(
                api, 'POST', param)
            json_parsed = json.loads(content)
        except Exception as ex:
            if ex:
                print(ex)
        if json_parsed["status"] == "success":
            self.fail("CE should not allow to run flex index !")
        elif json_parsed["status"] == "fatal":
            self.log.info("Flex index is enforced in CE! ")

    def check_index_partitioning(self):
        self.rest.force_eject_node()
        self.sleep(7, "wait for node reset done")
        self.rest.init_node()
        bucket = "default"
        self.rest.create_bucket(bucket, ramQuotaMB=200)
        api = self.rest.query_baseUrl + "query/service"
        param = urllib.parse.urlencode({
            "statement":
            "CREATE INDEX idx ON `%s`(id) PARTITION BY HASH(META().id)" %
            bucket
        })
        try:
            status, content, header = self.rest._http_request(
                api, 'POST', param)
            json_parsed = json.loads(content)
        except Exception as ex:
            if ex:
                print(ex)
        if json_parsed["status"] == "success":
            self.fail("CE should not be allowed to run index partitioning !")
        elif json_parsed["status"] == "fatal":
            self.log.info("Index partitioning is enforced in CE! ")

    def check_query_cost_based_optimizer(self):
        self.rest.force_eject_node()
        self.sleep(7, "wait for node reset done")
        self.rest.init_node()
        bucket = "default"
        self.rest.create_bucket(bucket, ramQuotaMB=200)
        api = self.rest.query_baseUrl + "query/service"
        param = urllib.parse.urlencode({
            "statement":
            "UPDATE STATISTICS for `hotel` (type, address, city, country, free_breakfast, id, phone);"
        })
        try:
            status, content, header = self.rest._http_request(
                api, 'POST', param)
            json_parsed = json.loads(content)
        except Exception as ex:
            if ex:
                print(ex)
        if json_parsed["status"] == "success":
            self.fail("CE should not be allowed to run CBO !")
        elif json_parsed["status"] == "fatal":
            self.log.info("CBO is enforced in CE! ")

    def check_query_window_functions(self):
        self.rest.force_eject_node()
        self.sleep(7, "wait for node reset done")
        self.rest.init_node()
        bucket = "default"
        self.rest.create_bucket(bucket, ramQuotaMB=200)
        api = self.rest.query_baseUrl + "query/service"
        param = urllib.parse.urlencode({
            "statement":
            "SELECT d.id, d.destinationairport, CUME_DIST() OVER (PARTITION BY d.destinationairport \
                            ORDER BY d.distance NULLS LAST) AS `rank` \
                            FROM `%s` AS d \
                            WHERE d.type='route' \
                            LIMIT 7;" % bucket
        })
        try:
            status, content, header = self.rest._http_request(
                api, 'POST', param)
            json_parsed = json.loads(content)
        except Exception as ex:
            if ex:
                print(ex)
        if json_parsed["status"] == "success":
            self.fail("CE should not be allowed to use window functions !")
        elif json_parsed["status"] == "fatal":
            self.log.info("Window functions is enforced in CE! ")

    def check_auto_complete(self):
        """ this feature has not complete to block in CE """

    """ Check new features from spock start here """

    def check_cbbackupmgr(self):
        """ cbbackupmgr should not available in CE from spock """
        if self.cb_version[:5] in COUCHBASE_FROM_SPOCK:
            file_name = "cbbackupmgr" + self.file_extension
            self.log.info("check if cbbackupmgr in bin dir in CE")
            result = self.remote.file_exists(self.bin_path, file_name)
            if result:
                self.fail("cbbackupmgr should not in bin dir of CE")
            else:
                self.log.info("cbbackupmgr is enforced in CE")
        self.remote.disconnect()

    def test_max_ttl_bucket(self):
        """
            From vulcan, EE bucket has has an option to set --max-ttl, not it CE.
            This test is make sure CE could not create bucket with option --max-ttl
            This test must pass default_bucket=False
        """
        if self.cb_version[:5] not in COUCHBASE_FROM_VULCAN:
            self.log.info("This test only for vulcan and later")
            return
        cmd = 'curl -X POST -u Administrator:password \
                                    http://{0}:8091/pools/default/buckets \
                                 -d name=bucket0 \
                                 -d maxTTL=100 \
                                 -d ramQuotaMB=100 '.format(self.master.ip)
        if self.cli_test:
            cmd = "{0}couchbase-cli bucket-create -c {1}:8091 --username Administrator \
                --password password --bucket bucket0 --bucket-type couchbase \
                --bucket-ramsize 512 --bucket-replica 1 --bucket-priority high \
                --bucket-eviction-policy fullEviction --enable-flush 0 \
                --enable-index-replica 1 --max-ttl 200".format(
                self.bin_path, self.master.ip)
        conn = RemoteMachineShellConnection(self.master)
        output, error = conn.execute_command(cmd)
        conn.log_command_output(output, error)
        mesg = "Max TTL is supported in enterprise edition only"
        if self.cli_test:
            mesg = "Maximum TTL can only be configured on enterprise edition"
        if output and mesg not in str(output[0]):
            self.fail("max ttl feature should not in Community Edition")
        buckets = RestConnection(self.master).get_buckets()
        if buckets:
            for bucket in buckets:
                self.log.info("bucekt in cluser: {0}".format(bucket.name))
                if bucket.name == "bucket0":
                    self.fail("Failed to enforce feature max ttl in CE.")
        conn.disconnect()

    def test_setting_audit(self):
        """
           CE does not allow to set audit from vulcan 5.5.0
        """
        if self.cb_version[:5] not in COUCHBASE_FROM_VULCAN:
            self.log.info("This test only for vulcan and later")
            return
        cmd = 'curl -X POST -u Administrator:password \
              http://{0}:8091/settings/audit \
              -d auditdEnabled=true '.format(self.master.ip)
        if self.cli_test:
            cmd = "{0}couchbase-cli setting-audit -c {1}:8091 -u Administrator \
                -p password --audit-enabled 1 --audit-log-rotate-interval 604800 \
                --audit-log-path /opt/couchbase/var/lib/couchbase/logs --set"\
                .format(self.bin_path, self.master.ip)

        conn = RemoteMachineShellConnection(self.master)
        output, error = conn.execute_command(cmd)
        conn.log_command_output(output, error)
        mesg = "This http API endpoint requires enterprise edition"
        if output and mesg not in str(output[0]):
            self.fail("setting-audit feature should not in Community Edition")
        conn.disconnect()

    def test_setting_autofailover_enterprise_only(self):
        """
           CE does not allow set auto failover if disk has issue
           and failover group from vulcan 5.5.0
        """
        if self.cb_version[:5] not in COUCHBASE_FROM_VULCAN:
            self.log.info("This test only for vulcan and later")
            return
        self.failover_disk_period = self.input.param("failover_disk_period",
                                                     False)
        self.failover_server_group = self.input.param("failover_server_group",
                                                      False)

        failover_disk_period = ""
        if self.failover_disk_period:
            if self.cli_test:
                failover_disk_period = "--failover-data-disk-period 300"
            else:
                failover_disk_period = "-d failoverOnDataDiskIssues[timePeriod]=300"
        failover_server_group = ""
        if self.failover_server_group and self.cli_test:
            failover_server_group = "--enable-failover-of-server-group 1"

        cmd = 'curl -X POST -u Administrator:password \
              http://{0}:8091/settings/autoFailover -d enabled=true -d timeout=120 \
              -d maxCount=1 \
              -d failoverOnDataDiskIssues[enabled]=true {1} \
              -d failoverServerGroup={2}'.format(self.master.ip,
                                                 failover_disk_period,
                                                 self.failover_server_group)
        if self.cli_test:
            cmd = "{0}couchbase-cli setting-autofailover -c {1}:8091 \
                   -u Administrator -p password \
                   --enable-failover-on-data-disk-issues 1 {2} {3} "\
                  .format(self.bin_path, self.master.ip,
                          failover_disk_period,
                          failover_server_group)
        conn = RemoteMachineShellConnection(self.master)
        output, error = conn.execute_command(cmd)
        conn.log_command_output(output, error)
        mesg = "Auto failover on Data Service disk issues can only be " + \
               "configured on enterprise edition"
        if not self.cli_test:
            if self.failover_disk_period or \
                                   self.failover_server_group:
                if output and not error:
                    self.fail("setting autofailover disk issues feature\
                               should not in Community Edition")
        else:
            if self.failover_server_group:
                mesg = "--enable-failover-of-server-groups can only be " + \
                       "configured on enterprise edition"

        if output and mesg not in str(output[0]):
            self.fail("Setting EE autofailover features \
                       should not in Community Edition")
        else:
            self.log.info("EE setting autofailover are disable in CE")
        conn.disconnect()

    def test_set_bucket_compression(self):
        """
           CE does not allow to set bucket compression to bucket
           from vulcan 5.5.0.   Mode compression: off,active,passive
           Note: must set defaultbucket=False for this test
        """
        if self.cb_version[:5] not in COUCHBASE_FROM_VULCAN:
            self.log.info("This test only for vulcan and later")
            return
        self.compression_mode = self.input.param("compression_mode", "off")
        cmd = 'curl -X POST -u Administrator:password \
                                    http://{0}:8091/pools/default/buckets \
                                 -d name=bucket0 \
                                 -d compressionMode={1} \
                                 -d ramQuotaMB=100 '.format(
            self.master.ip, self.compression_mode)
        if self.cli_test:
            cmd = "{0}couchbase-cli bucket-create -c {1}:8091 --username Administrator \
                --password password --bucket bucket0 --bucket-type couchbase \
                --bucket-ramsize 512 --bucket-replica 1 --bucket-priority high \
                --bucket-eviction-policy fullEviction --enable-flush 0 \
                --enable-index-replica 1 --compression-mode {2}".format(
                self.bin_path, self.master.ip, self.compression_mode)
        conn = RemoteMachineShellConnection(self.master)
        output, error = conn.execute_command(cmd)
        conn.log_command_output(output, error)
        mesg = "Compression mode is supported in enterprise edition only"
        if self.cli_test:
            mesg = "Compression mode can only be configured on enterprise edition"
        if output and mesg not in str(output[0]):
            self.fail("Setting bucket compression should not in CE")
        conn.disconnect()

    def test_ldap_groups(self):
        """
           LDAP Groups feature is not available in CE
        """
        if self.cb_version[:5] not in COUCHBASE_FROM_MAD_HATTER:
            self.log.info("This test is only for MH and later")
            return
        cmd = 'curl -X POST -u Administrator:password \
                                    http://{0}:8091/settings/rbac/groups/admins \
                                 -d roles=admin \
                                 -d description="Couchbase+Server+Administrators" \
                                 --data-urlencode ldap_group_ref="uid=cbadmins,ou=groups,dc=example,dc=com"'\
                                .format(self.master.ip)
        if self.cli_test:
            cmd = '{0}couchbase-cli user-manage -c {1}:8091 --username Administrator \
                --password password  \
                --set-group \
                --group-name admins \
                --roles admin \
                --group-description "Couchbase Server Administrators" \
                --ldap-ref "uid=cbadmins,ou=groups,dc=example,dc=com"'.format(
                self.bin_path, self.master.ip)
        conn = RemoteMachineShellConnection(self.master)
        output, error = conn.execute_command(cmd)
        conn.log_command_output(output, error)
        mesg = "Requested resource not found."
        if self.cli_test:
            mesg = "ERROR: This http API endpoint requires enterprise edition"
        if output and mesg not in str(output[0]):
            self.fail("LDAP Groups should not be in CE")
        conn.disconnect()

    def test_ldap_cert(self):
        """
           LDAP Cert feature is not available in CE
        """
        if self.cb_version[:5] not in COUCHBASE_FROM_MAD_HATTER:
            self.log.info("This test is only for MH and later")
            return
        cmd = 'curl -X POST -u Administrator:password http://{0}:8091/settings/ldap \
                                 -d hosts={1} \
                                 -d port=389 \
                                 -d encryption=StartTLSExtension \
                                 -d serverCertValidation=true \
                                 --data-urlencode [email protected] \
                                 -d bindDN="cn=admin,dc=example,dc=com" \
                                 -d bindPass=password \
                                 -d authenticationEnabled=true \
                                 -d authorizationEnabled=true \
                                 --data-urlencode groupsQuery="ou=groups,dc=example,dc=com??one?(member=%D)"'\
                                .format(self.master.ip, self.master.ip)
        if self.cli_test:
            cmd = '{0}couchbase-cli setting-ldap -c {1}:8091 --username Administrator \
                --password password  \
                --authentication-enabled 1 \
                --authorization-enabled 1 \
                --hosts {2} \
                --encryption startTLS \
                --client-cert root.crt \
                --bind-dn "cn=admin,dc=example,dc=com" \
                --bind-password password \
                --group-query "ou=groups,dc=example,dc=com??one?(member=%D)"'.format(
                self.bin_path, self.master.ip, self.master.ip)
        conn = RemoteMachineShellConnection(self.master)
        output, error = conn.execute_command(cmd)
        conn.log_command_output(output, error)
        mesg = "This http API endpoint requires enterprise edition"
        if self.cli_test:
            mesg = "ERROR: Command only available in enterprise edition"
        if output and mesg not in str(output[0]):
            self.fail("LDAP Cert should not be in CE")
        conn.disconnect()

    def test_network_encryption(self):
        """
           Encrypted network access is not available in CE
        """
        if self.cb_version[:5] not in COUCHBASE_FROM_MAD_HATTER:
            self.log.info("This test is only for MH and later")
            return
        cmd = 'curl  -u Administrator:password -v -X POST \
                    http://{0}:8091/settings/security \
                    -d disableUIOverHttp=true \
                    -d clusterEncryptionLevel=control \
                    -d tlsMinVersion=tlsv1.1 \
                    -d "cipherSuites=["TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA"]"'\
                                .format(self.master.ip)
        conn = RemoteMachineShellConnection(self.master)
        output, error = conn.execute_command(cmd)
        conn.log_command_output(output, error)
        mesg = "not supported in community edition"
        if output and mesg not in str(output[0]):
            self.fail("Encrypted network access should not be in CE")
        conn.disconnect()

    def test_n2n_encryption(self):
        """
           Encrypted network access is not available in CE
        """
        if self.cb_version[:5] not in COUCHBASE_FROM_MAD_HATTER:
            self.log.info("This test is only for MH and later")
            return
        cmd = '/opt/couchbase/bin/couchbase-cli node-to-node-encryption \
                -c http://{0}:8091 \
                -u Administrator \
                -p password \
                --enable'\
                .format(self.master.ip)
        conn = RemoteMachineShellConnection(self.master)
        output, error = conn.execute_command(cmd)
        conn.log_command_output(output, error)
        mesg = "not supported in community edition"
        if output and mesg not in str(output[0]):
            self.fail("Encrypted network access should not be in CE")
        conn.disconnect()

    def test_log_redaction(self):
        """
            Log redaction feature is not available in CE
        """
        if self.cb_version[:5] not in COUCHBASE_FROM_MAD_HATTER:
            self.log.info("This test is only for MH and later")
            return
        cmd = 'curl -X POST -u Administrator:password \
                                            http://{0}:8091/controller/startLogsCollection \
                                         -d nodes="*" \
                                         -d logRedactionLevel=partial'.format(
            self.master.ip)
        if self.cli_test:
            cmd = '{0}couchbase-cli collect-logs-start -c {1}:8091 --username Administrator \
                        --password password  \
                        --all-nodes \
                        --redaction-level partial'.format(
                self.bin_path, self.master.ip)
        conn = RemoteMachineShellConnection(self.master)
        output, error = conn.execute_command(cmd)
        conn.log_command_output(output, error)
        mesg = "log redaction is an enterprise only feature"
        if output and mesg not in str(output[0]):
            self.fail("Log redaction should not be in CE")
        conn.disconnect()