Esempio n. 1
0
 def offline_cluster_upgrade_and_rebalance(self):
     num_stoped_nodes = self.input.param('num_stoped_nodes', self.nodes_init)
     stoped_nodes = self.servers[self.nodes_init - num_stoped_nodes :self.nodes_init]
     servs_out = self.servers[self.nodes_init - num_stoped_nodes - self.nodes_out :self.nodes_init - num_stoped_nodes]
     servs_in = self.servers[self.nodes_init:self.nodes_init + self.nodes_in]
     self._install(self.servers)
     self.operations(self.servers[:self.nodes_init])
     if self.ddocs_num:
         self.create_ddocs_and_views()
     if self.during_ops:
         for opn in self.during_ops:
             getattr(self, opn)()
     for upgrade_version in self.upgrade_versions:
         self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for upgrade to {0} version".\
                    format(upgrade_version))
         for server in stoped_nodes:
             remote = RemoteMachineShellConnection(server)
             remote.stop_server()
             remote.disconnect()
         upgrade_threads = self._async_update(upgrade_version, stoped_nodes)
         try:
             self.cluster.rebalance(self.servers[:self.nodes_init], servs_in, servs_out)
         except RebalanceFailedException:
             self.log.info("rebalance failed as expected")
         for upgrade_thread in upgrade_threads:
             upgrade_thread.join()
         success_upgrade = True
         while not self.queue.empty():
             success_upgrade &= self.queue.get()
         if not success_upgrade:
             self.fail("Upgrade failed!")
         ClusterOperationHelper.wait_for_ns_servers_or_assert(stoped_nodes, self)
         self.cluster.rebalance(self.servers[:self.nodes_init], [], servs_out)
         self.dcp_rebalance_in_offline_upgrade_from_version2_to_version3()
         self.verification(list(set(self.servers[:self.nodes_init] + servs_in) - set(servs_out)))
Esempio n. 2
0
 def test_fileRotate20MB(self):
     auditIns = audit(host=self.master)
     firstEventTime = self.getTimeStampForFile(auditIns)
     tempEventCounter = 0
     rest = RestConnection(self.master)
     shell = RemoteMachineShellConnection(self.master)
     filePath = auditIns.pathLogFile + auditIns.AUDITLOGFILENAME
     number = int (shell.get_data_file_size(filePath))
     hostname = shell.execute_command("hostname")
     archiveFile = hostname[0][0] + '-' + firstEventTime + "-audit.log"
     result = shell.file_exists(auditIns.pathLogFile, archiveFile)
     tempTime = 0
     starttime = time.time()
     while ((number < 21089520) and (tempTime < 36000) and (result == False)):
         for i in range(1, 10):
             status, content = rest.validateLogin("Administrator", "password", True, getContent=True)
             tempEventCounter += 1
             number = int (shell.get_data_file_size(filePath))
             currTime = time.time()
             tempTime = int (currTime - starttime)
             result = shell.file_exists(auditIns.pathLogFile, archiveFile)
     self.sleep(30)
     result = shell.file_exists(auditIns.pathLogFile, archiveFile)
     shell.disconnect()
     self.log.info ("--------Total Event Created ---- {0}".format(tempEventCounter))
     self.assertTrue(result, "Archive Audit.log is not created on reaching 20MB threshhold")
Esempio n. 3
0
    def test_restart_node_with_full_disk(self):
        def _get_disk_usage_percentage(remote_client):
            disk_info = remote_client.get_disk_info()
            percentage = disk_info[1] + disk_info[2];
            for item in percentage.split():
                if "%" in item:
                    self.log.info("disk usage {0}".format(item))
                    return item[:-1]

        remote_client = RemoteMachineShellConnection(self.master)
        output, error = remote_client.execute_command_raw("rm -rf full_disk*", use_channel=True)
        remote_client.log_command_output(output, error)
        percentage = _get_disk_usage_percentage(remote_client)
        try:
            while int(percentage) < 99:
                output, error = remote_client.execute_command("dd if=/dev/zero of=full_disk{0} bs=3G count=1".format(percentage + str(time.time())), use_channel=True)
                remote_client.log_command_output(output, error)
                percentage = _get_disk_usage_percentage(remote_client)
            processes1 = remote_client.get_running_processes()
            output, error = remote_client.execute_command("/etc/init.d/couchbase-server restart", use_channel=True)
            remote_client.log_command_output(output, error)
        finally:
            output, error = remote_client.execute_command_raw("rm -rf full_disk*", use_channel=True)
            remote_client.log_command_output(output, error)
            remote_client.disconnect()
Esempio n. 4
0
 def test_rotateInterval(self):
     intervalSec = self.input.param("intervalSec", None)
     auditIns = audit(host=self.master)
     rest = RestConnection(self.master)
     originalInt = auditIns.getAuditRotateInterval()
     try:
         firstEventTime = self.getTimeStampForFile(auditIns)
         self.log.info ("first time evetn is {0}".format(firstEventTime))
         auditIns.setAuditRotateInterval(intervalSec)
         self.sleep(intervalSec + 20, 'Sleep for log roll over to happen')
         status, content = rest.validateLogin(self.master.rest_username, self.master.rest_password, True, getContent=True)
         self.sleep(120)
         shell = RemoteMachineShellConnection(self.master)
         try:
             hostname = shell.execute_command("hostname")
             archiveFile = hostname[0][0] + '-' + firstEventTime + "-audit.log"
             self.log.info ("Archive File Name is {0}".format(archiveFile))
             result = shell.file_exists(auditIns.pathLogFile, archiveFile)
             self.assertTrue(result, "Archive Audit.log is not created on time interval")
             self.log.info ("Validation of archive File created is True, Audit archive File is created {0}".format(archiveFile))
             result = shell.file_exists(auditIns.pathLogFile, auditIns.AUDITLOGFILENAME)
             self.assertTrue(result, "Audit.log is not created when memcached server is killed")
         finally:
             shell.disconnect()
     finally:
         auditIns.setAuditRotateInterval(originalInt)
Esempio n. 5
0
    def test_rotateIntervalCluster(self):
        intervalSec = self.input.param("intervalSec", None)
        nodes_init = self.input.param("nodes_init", 2)
        auditIns = audit(host=self.master)
	auditIns.setAuditEnable('true')
        originalInt = auditIns.getAuditRotateInterval()
        auditIns.setAuditRotateInterval(intervalSec)
        firstEventTime = []

        try:
            for i in range(len(self.servers[:nodes_init])):
                auditTemp = audit(host=self.servers[i])
                firstEventTime.append(self.getTimeStampForFile(auditTemp))

            self.sleep(intervalSec + 20, 'Sleep for log roll over to happen')

            for i in range(len(self.servers[:nodes_init])):
                shell = RemoteMachineShellConnection(self.servers[i])
                rest = RestConnection(self.servers[i])
                status, content = rest.validateLogin(self.master.rest_username, self.master.rest_password, True, getContent=True)
                self.sleep(120, "sleeping for log file creation")
                try:
                    hostname = shell.execute_command("hostname")
                    self.log.info ("print firstEventTime {0}".format(firstEventTime[i]))
                    archiveFile = hostname[0][0] + '-' + firstEventTime[i] + "-audit.log"
                    self.log.info ("Archive File Name is {0}".format(archiveFile))
                    result = shell.file_exists(auditIns.pathLogFile, archiveFile)
                    self.assertTrue(result, "Archive Audit.log is not created on time interval")
                    self.log.info ("Validation of archive File created is True, Audit archive File is created {0}".format(archiveFile))
                    result = shell.file_exists(auditIns.pathLogFile, auditIns.AUDITLOGFILENAME)
                    self.assertTrue(result, "Audit.log is not created as per the roll over time specified")
                finally:
                    shell.disconnect()
        finally:
            auditIns.setAuditRotateInterval(originalInt)
Esempio n. 6
0
    def initialize(self, params):
        start_time = time.time()
        server = params["server"]
        remote_client = RemoteMachineShellConnection(params["server"])
        replace_127_0_0_1_cmd = "sed -i 's/127.0.0.1/0.0.0.0/g' {0}".format(
            testconstants.COUCHBASE_SINGLE_DEFAULT_INI_PATH)
        o, r = remote_client.execute_command(replace_127_0_0_1_cmd)
        remote_client.log_command_output(o, r)
        remote_client.stop_couchbase()
        remote_client.start_couchbase()
        remote_client.disconnect()
        couchdb_ok = False

        while time.time() < (start_time + 60):
            try:
                couch_ip = "http://{0}:5984/".format(server.ip)
                log.info("connecting to couch @ {0}".format(couch_ip))
                couch = couchdb.Server(couch_ip)
                couch.config()
                # TODO: verify version number and other properties
                couchdb_ok = True
                break
            except Exception as ex:
                msg = "error happened while creating connection to couchbase single server @ {0} , error : {1}"
                log.error(msg.format(server.ip, ex))
            log.info('sleep for 5 seconds before trying again ...')
            time.sleep(5)
        if not couchdb_ok:
            raise Exception("unable to initialize couchbase single server")
Esempio n. 7
0
    def test_AuditEvent(self):
        auditIns = audit(host=self.master)
        ops = self.input.param("ops", None)
        source = 'internal'
        user = '******'
        rest = RestConnection(self.master)
        #status = rest.setAuditSettings(enabled='true')
        auditIns.setAuditEnable('true')
        if (ops in ['enable', 'disable']):
            if ops == 'disable':
                #status = rest.setAuditSettings(enabled='false')
                auditIns.setAuditEnable('false')
            else:
                #status = rest.setAuditSettings(enabled='true')
                auditIns.setAuditEnable('true')

        if ops == 'disable':
            shell = RemoteMachineShellConnection(self.master)
            try:
                result = shell.file_exists(auditIns.getAuditLogPath(), auditIns.AUDITLOGFILENAME)
            finally:
                shell.disconnect()
            self.assertFalse(result, 'Issue with file getting create in new directory')
        else:
            auditIns = audit(host=self.master)
            expectedResults = {"auditd_enabled":auditIns.getAuditStatus(),
                               "descriptors_path":self.changePathWindows(auditIns.getAuditConfigElement('descriptors_path')),
                               "log_path":self.changePathWindows((auditIns.getAuditLogPath())[:-1]), "source":"internal",
                               "user":"******", "rotate_interval":86400, "version":1, 'hostname':self.getHostName(self.master)}
            self.checkConfig(self.AUDITCONFIGRELOAD, self.master, expectedResults)
Esempio n. 8
0
    def test_upgrade(self):
        self._install([self.master])
        self.operations([self.master])
        for upgrade_version in self.upgrade_versions:
            self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for upgrade to {0} version".\
                       format(upgrade_version))
            upgrade_threads = self._async_update(upgrade_version, [self.master])
            #wait upgrade statuses
            for upgrade_thread in upgrade_threads:
                upgrade_thread.join()
            success_upgrade = True
            while not self.queue.empty():
                success_upgrade &= self.queue.get()
            if not success_upgrade:
                self.fail("Upgrade failed!")


            self.sleep(self.expire_time)
#            if not self.is_linux:
#                self.wait_node_restarted(self.master, wait_time=1200, wait_if_warmup=True, check_service=True)
            remote = RemoteMachineShellConnection(self.master)
            for bucket in self.buckets:
                remote.execute_cbepctl(bucket, "", "set flush_param", "exp_pager_stime", 5)
            remote.disconnect()
            self.sleep(30)
            self.verification([self.master])
Esempio n. 9
0
 def _kill_nodes(self, nodes, servers, bucket_name):
     self.reboot = self.input.param("reboot", True)
     if not self.reboot:
         for node in nodes:
             _node = {
                 "ip": node.ip,
                 "port": node.port,
                 "username": self.servers[0].rest_username,
                 "password": self.servers[0].rest_password,
             }
             node_rest = RestConnection(_node)
             _mc = MemcachedClientHelper.direct_client(_node, bucket_name)
             self.log.info("restarted the node %s:%s" % (node.ip, node.port))
             pid = _mc.stats()["pid"]
             command = 'os:cmd("kill -9 {0} ")'.format(pid)
             self.log.info(command)
             killed = node_rest.diag_eval(command)
             self.log.info("killed ??  {0} ".format(killed))
             _mc.close()
     else:
         for server in servers:
             shell = RemoteMachineShellConnection(server)
             command = "reboot"
             output, error = shell.execute_command(command)
             shell.log_command_output(output, error)
             shell.disconnect()
             time.sleep(self.wait_timeout * 8)
             shell = RemoteMachineShellConnection(server)
             command = "/sbin/iptables -F"
             output, error = shell.execute_command(command)
             shell.log_command_output(output, error)
             shell.disconnect()
Esempio n. 10
0
 def test_warmup(self):
     index_field = self.input.param("index_field", 'name')
     indexes = []
     try:
         indexes = self._create_multiple_indexes(index_field)
         num_srv_warm_up = self.input.param("srv_warm_up", 1)
         if self.input.tuq_client is None:
             self.fail("For this test external tuq server is requiered. " + \
                       "Please specify one in conf")
         self.test_union_all()
         for server in self.servers[self.nodes_init - num_srv_warm_up:self.nodes_init]:
             remote = RemoteMachineShellConnection(server)
             remote.stop_server()
             remote.start_server()
             remote.disconnect()
         #run query, result may not be as expected, but tuq shouldn't fail
         try:
             self.test_union_all()
         except:
             pass
         ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
         self.sleep(5)
         self.test_union_all()
     finally:
         self._delete_multiple_indexes(indexes)
Esempio n. 11
0
    def testClusterInit(self):
        cluster_init_username = self.input.param("cluster_init_username", "Administrator")
        cluster_init_password = self.input.param("cluster_init_password", "password")
        cluster_init_port = self.input.param("cluster_init_port", 8091)
        cluster_init_ramsize = self.input.param("cluster_init_ramsize", 300)
        command_init = self.input.param("command_init", "cluster-init")
        param_prefix = self.input.param("param_prefix", "--cluster-init")
        server = self.servers[-1]
        remote_client = RemoteMachineShellConnection(server)
        rest = RestConnection(server)
        rest.force_eject_node()
        self.sleep(5)

        try:
            cli_command = command_init
            options = "--cluster-init-username={0} {1}-password={2} {3}-port={4} {5}-ramsize={6}".\
                format(cluster_init_username, param_prefix, cluster_init_password, param_prefix, cluster_init_port, param_prefix, cluster_init_ramsize)
            output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user="******", password="******")
            self.assertEqual(output[0], "SUCCESS: init localhost")

            options = "{0}-username={1} {2}-password={3} {4}-port={5}".\
                format(param_prefix, cluster_init_username + "1", param_prefix, cluster_init_password + "1", param_prefix, str(cluster_init_port)[:-1] + "9")
            output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=cluster_init_username, password=cluster_init_password)
            # MB-8202 cluster-init/edit doesn't provide status
            self.assertTrue(output == [])
            server.rest_username = cluster_init_username + "1"
            server.rest_password = cluster_init_password + "1"
            server.port = str(cluster_init_port)[:-1] + "9"


            cli_command = "server-list"
            output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, cluster_host="localhost", cluster_port=str(cluster_init_port)[:-1] + "9", user=cluster_init_username + "1", password=cluster_init_password + "1")
            self.assertTrue("{0} healthy active".format(str(cluster_init_port)[:-1] + "9") in output[0])
            server_info = self._get_cluster_info(remote_client, cluster_port=server.port, user=server.rest_username, password=server.rest_password)
            result = server_info["otpNode"] + " " + server_info["hostname"] + " " + server_info["status"] + " " + server_info["clusterMembership"]
            self.assertTrue("{0} healthy active".format(str(cluster_init_port)[:-1] + "9") in result)

            cli_command = command_init
            options = "{0}-username={1} {2}-password={3} {4}-port={5}".\
                format(param_prefix, cluster_init_username, param_prefix, cluster_init_password, param_prefix, cluster_init_port)
            output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", cluster_port=str(cluster_init_port)[:-1] + "9", user=(cluster_init_username + "1"), password=cluster_init_password + "1")
            # MB-8202 cluster-init/edit doesn't provide status
            self.assertTrue(output == [])

            server.rest_username = cluster_init_username
            server.rest_password = cluster_init_password
            server.port = cluster_init_port
            remote_client = RemoteMachineShellConnection(server)
            cli_command = "server-list"
            output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, cluster_host="localhost", user=cluster_init_username, password=cluster_init_password)
            self.assertTrue("{0} healthy active".format(str(cluster_init_port)) in output[0])
            server_info = self._get_cluster_info(remote_client, cluster_port=server.port, user=server.rest_username, password=server.rest_password)
            result = server_info["otpNode"] + " " + server_info["hostname"] + " " + server_info["status"] + " " + server_info["clusterMembership"]
            self.assertTrue("{0} healthy active".format(str(cluster_init_port)) in result)
            remote_client.disconnect()
        finally:
            rest = RestConnection(server)
            rest.force_eject_node()
            self.sleep(5)
            rest.init_cluster()
Esempio n. 12
0
    def replication_while_rebooting_a_non_master_destination_node(self):
        self.set_xdcr_param("xdcrFailureRestartInterval", 1)

        self._load_all_buckets(self.src_master, self.gen_create, "create", 0)
        self._async_modify_data()
        self.sleep(self._timeout)

        i = len(self.dest_nodes) - 1
        shell = RemoteMachineShellConnection(self.dest_nodes[i])
        type = shell.extract_remote_info().type.lower()
        if type == 'windows':
            o, r = shell.execute_command("shutdown -r -f -t 0")
        elif type == 'linux':
            o, r = shell.execute_command("reboot")
        shell.log_command_output(o, r)
        shell.disconnect()
        self.sleep(60, "after rebooting node")
        num = 0
        while num < 10:
            try:
                shell = RemoteMachineShellConnection(self.dest_nodes[i])
            except BaseException, e:
                self.log.warn("node {0} is unreachable".format(self.dest_nodes[i].ip))
                self.sleep(60, "still can't connect to node")
                num += 1
            else:
                break
Esempio n. 13
0
 def _stop_moxi(self):
     self.log.info("kill moxi server at %s " % self.moxi_server.ip)
     command = "kill -9 $(ps aux | grep -v grep | grep {0} | awk '{{print $2}}')".format(self.moxi_port)
     shell = RemoteMachineShellConnection(self.moxi_server)
     output, error = shell.execute_command_raw(command)
     shell.log_command_output(output, error)
     shell.disconnect()
Esempio n. 14
0
    def user_manage(self, delete, list, set, ro_username, ro_password):
        options = self._get_default_options()
        if delete:
            options += " --delete "
        if list:
            options += " --list "
        if set:
            options += " --set "
        if ro_username is not None:
            options += " --ro-username " + str(ro_username)
        if ro_password:
            options += " --ro-password " + str(ro_password)

        remote_client = RemoteMachineShellConnection(self.server)
        stdout, stderr = remote_client.couchbase_cli("user-manage",
                                                     self.hostname, options)
        remote_client.disconnect()

        if delete:
            return stdout, stderr, self._was_success(stdout, "Local read-only"
                                                             "user deleted")
        elif set:
            return stdout, stderr, self._was_success(stdout, "Local read-only"
                                                             "user deleted")
        else:
            return stdout, stderr, self._no_error_in_output(stdout)
Esempio n. 15
0
    def test_folderMisMatchCluster(self):
        auditIns = audit(host=self.master)
        orginalPath = auditIns.getAuditLogPath()
        newPath = originalPath + 'testFolderMisMatch'
        shell = RemoteMachineShellConnection(self.servers[0])
        try:
            shell.create_directory(newPath)
            command = 'chown couchbase:couchbase ' + newPath
            shell.execute_command(command)
        finally:
            shell.disconnect()

        auditIns.setsetAuditLogPath(newPath)

        for server in self.servers:
            rest = RestConnection(sever)
            #Create an Event for Bucket Creation
            expectedResults = {'name':'TestBucket ' + server.ip, 'ram_quota':536870912, 'num_replicas':1,
                                       'replica_index':False, 'eviction_policy':'value_only', 'type':'membase', \
                                       'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", \
                                        "flush_enabled":False, "num_threads":3, "source":source, \
                                       "user":user, "ip":self.ipAddress, "port":57457, 'sessionid':'' }
            rest.create_bucket(expectedResults['name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \
                                       '11211', 'membase', 0, expectedResults['num_threads'], expectedResults['flush_enabled'], 'valueOnly')

            #Check on Events
            try:
                self.checkConfig(self.eventID, self.servers[0], expectedResults)
            except:
                self.log.info ("Issue reading the file at Node {0}".format(server.ip))
Esempio n. 16
0
    def _setting_cluster(self, cmd, data_ramsize, index_ramsize, fts_ramsize,
                         cluster_name, cluster_username,
                         cluster_password, cluster_port):
        options = self._get_default_options()
        if cluster_username is not None:
            options += " --cluster-username " + str(cluster_username)
        if cluster_password is not None:
            options += " --cluster-password " + str(cluster_password)
        if data_ramsize:
            options += " --cluster-ramsize " + str(data_ramsize)
        if index_ramsize:
            options += " --cluster-index-ramsize " + str(index_ramsize)
        if fts_ramsize:
            options += " --cluster-fts-ramsize " + str(fts_ramsize)
        if cluster_name:
            options += " --cluster-name " + str(cluster_name)
        if cluster_port:
            options += " --cluster-port " + str(cluster_port)

        remote_client = RemoteMachineShellConnection(self.server)
        stdout, stderr = remote_client.couchbase_cli(cmd, self.hostname,
                                                     options)
        remote_client.disconnect()
        return stdout, stderr, self._was_success(stdout,
                                                 "Cluster settings modified")
Esempio n. 17
0
    def setting_compaction(self, db_frag_perc, db_frag_size, view_frag_perc,
                           view_frag_size, from_period, to_period,
                           abort_outside, parallel_compact, purgeint):
        options = self._get_default_options()
        if db_frag_perc is not None:
            options += " --compaction-db-percentage " + str(db_frag_perc)
        if db_frag_size is not None:
            options += " --compaction-db-size " + str(db_frag_size)
        if view_frag_perc is not None:
            options += " --compaction-view-percentage " + str(view_frag_perc)
        if view_frag_size is not None:
            options += " --compaction-view-size " + str(view_frag_size)
        if from_period is not None:
            options += " --compaction-period-from " + str(from_period)
        if to_period is not None:
            options += " --compaction-period-to " + str(to_period)
        if abort_outside is not None:
            options += " --enable-compaction-abort " + str(abort_outside)
        if parallel_compact is not None:
            options += " --enable-compaction-parallel " + str(parallel_compact)
        if purgeint is not None:
            options += " --metadata-purge-interval " + str(purgeint)

        remote_client = RemoteMachineShellConnection(self.server)
        stdout, stderr = remote_client.couchbase_cli("setting-compaction",
                                                     self.hostname, options)
        remote_client.disconnect()
        return stdout, stderr, self._was_success(stdout, "Compaction "
                                                         "settings modified")
Esempio n. 18
0
    def cluster_init(self, data_ramsize, index_ramsize, fts_ramsize, services,
                     index_storage_mode, cluster_name,
                     cluster_username, cluster_password, cluster_port):
        options = ""
        if cluster_username:
            options += " --cluster-username " + str(cluster_username)
        if cluster_password:
            options += " --cluster-password " + str(cluster_password)
        if data_ramsize:
            options += " --cluster-ramsize " + str(data_ramsize)
        if index_ramsize:
            options += " --cluster-index-ramsize " + str(index_ramsize)
        if fts_ramsize:
            options += " --cluster-fts-ramsize " + str(fts_ramsize)
        if cluster_name:
            options += " --cluster-name " + str(cluster_name)
        if index_storage_mode:
            options += " --index-storage-setting " + str(index_storage_mode)
        if cluster_port:
            options += " --cluster-port " + str(cluster_port)
        if services:
            options += " --services " + str(services)

        remote_client = RemoteMachineShellConnection(self.server)
        stdout, stderr = remote_client.couchbase_cli("cluster-init",
                                                     self.hostname, options)
        remote_client.disconnect()
        return stdout, stderr, self._was_success(stdout, "Cluster initialized")
Esempio n. 19
0
    def testClusterInitNegative(self):
        cluster_init_username = self.input.param("cluster_init_username", None)
        cluster_init_password = self.input.param("cluster_init_password", None)
        cluster_init_port = self.input.param("cluster_init_port", None)
        cluster_init_ramsize = self.input.param("cluster_init_ramsize", None)
        command_init = self.input.param("command_init", "cluster-init")
        server = self.servers[-1]
        remote_client = RemoteMachineShellConnection(server)
        rest = RestConnection(server)
        rest.force_eject_node()
        self.sleep(5)

        try:
            cli_command = command_init
            options = ""
            if  cluster_init_username is not None:
                options += "--cluster-init-username={0} ".format(cluster_init_username)
            if cluster_init_password is not None:
                options += "--cluster-init-password={0} ".format(cluster_init_password)
            if cluster_init_port is not None:
                options += "--cluster-init-port={0} ".format(cluster_init_port)
            if cluster_init_ramsize is None:
                options += "--cluster-init-ramsize={0} ".format(cluster_init_ramsize)

            output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=None, password=None)
            self.assertEqual(output[0], 'ERROR: unable to init localhost (400) Bad Request')
            self.assertTrue(output[1] == "[u'Username and password are required.']" or output[1] == "[u'The password must be at least six characters.']")
            remote_client.disconnect()
        finally:
            rest = RestConnection(server)
            rest.force_eject_node()
            self.sleep(5)
            rest.init_cluster()
Esempio n. 20
0
 def rebalance_stop(self):
     options = self._get_default_options()
     remote_client = RemoteMachineShellConnection(self.server)
     stdout, stderr = remote_client.couchbase_cli("rebalance-stop",
                                                  self.hostname, options)
     remote_client.disconnect()
     return stdout, stderr, self._was_success(stdout, "Rebalance stopped")
Esempio n. 21
0
 def mutate_and_check_error404(self, n=1):
     # get vb0 active source node
     active_src_node = self.get_active_vb0_node(self.src_master)
     shell = RemoteMachineShellConnection(active_src_node)
     os_type = shell.extract_remote_info().distribution_type
     if os_type.lower() == 'windows':
         trace_log = "C:/Program Files/Couchbase/Server/var/lib/couchbase/logs/xdcr_trace.log"
     else:
         trace_log = "/opt/couchbase/var/lib/couchbase/logs/xdcr_trace.*"
     num_404_errors_before_load, error = shell.execute_command("grep \"error,404\" {} | wc -l"
                                                                  .format(trace_log))
     num_get_remote_bkt_failed_before_load, error = shell.execute_command("grep \"get_remote_bucket_failed\" \"{}\" | wc -l"
                                                                  .format(trace_log))
     self.log.info("404 errors: {}, get_remote_bucket_failed errors : {}".
                   format(num_404_errors_before_load, num_get_remote_bkt_failed_before_load))
     self.sleep(60)
     self.log.info("################ New mutation:{} ##################".format(self.key_counter+1))
     self.load_one_mutation_into_source_vb0(active_src_node)
     self.sleep(5)
     num_404_errors_after_load, error = shell.execute_command("grep \"error,404\" {} | wc -l"
                                                                  .format(trace_log))
     num_get_remote_bkt_failed_after_load, error = shell.execute_command("grep \"get_remote_bucket_failed\" \"{}\" | wc -l"
                                                                  .format(trace_log))
     self.log.info("404 errors: {}, get_remote_bucket_failed errors : {}".
                   format(num_404_errors_after_load, num_get_remote_bkt_failed_after_load))
     shell.disconnect()
     if (int(num_404_errors_after_load[0]) > int(num_404_errors_before_load[0])) or \
        (int(num_get_remote_bkt_failed_after_load[0]) > int(num_get_remote_bkt_failed_before_load[0])):
         self.log.info("Checkpointing error-404 verified after dest failover/rebalance out")
         return True
     else:
         self.log.info("404 errors on source node before last load : {}, after last node: {}".
                       format(int(num_404_errors_after_load[0]), int(num_404_errors_before_load[0])))
         self.log.error("Checkpoint 404 error NOT recorded at source following dest failover or rebalance!")
Esempio n. 22
0
 def rebalance_in_with_DB_time_compaction(self):
     remote_client = RemoteMachineShellConnection(self.master)
     rest = RestConnection(self.master)
     currTime = datetime.datetime.now()
     fromTime = currTime + datetime.timedelta(hours=1)
     toTime = currTime + datetime.timedelta(hours=24)
     self.set_auto_compaction(rest, dbFragmentThresholdPercentage=self.autocompaction_value, allowedTimePeriodFromHour=fromTime.hour,
                              allowedTimePeriodFromMin=fromTime.minute, allowedTimePeriodToHour=toTime.hour, allowedTimePeriodToMin=toTime.minute,
                              allowedTimePeriodAbort="false")
     self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
     self._monitor_DB_fragmentation()
     for i in xrange(10):
         active_tasks = self.cluster.async_monitor_active_task(self.master, "bucket_compaction", "bucket", wait_task=False)
         for active_task in active_tasks:
             result = active_task.result()
             self.assertTrue(result)
             self.sleep(2)
     currTime = datetime.datetime.now()
     #Need to make it configurable
     newTime = currTime + datetime.timedelta(minutes=5)
     self.set_auto_compaction(rest, dbFragmentThresholdPercentage=self.autocompaction_value, allowedTimePeriodFromHour=currTime.hour,
                              allowedTimePeriodFromMin=currTime.minute, allowedTimePeriodToHour=newTime.hour, allowedTimePeriodToMin=newTime.minute,
                              allowedTimePeriodAbort="false")
     servs_in = self.servers[self.nodes_init:self.nodes_in + 1]
     rebalance = self.cluster.async_rebalance([self.master], servs_in, [])
     compact_run = remote_client.wait_till_compaction_end(rest, self.default_bucket_name,
                                                                  timeout_in_seconds=(self.wait_timeout * 5))
     rebalance.result()
     if compact_run:
         self.log.info("auto compaction run successfully")
     else:
         self.fail("auto compaction does not run")
     remote_client.disconnect()
Esempio n. 23
0
 def verify_for_recovery_type(self, chosen = [], serverMap = {}, buckets = [], recoveryTypeMap = {}, fileMap = {}, deltaRecoveryBuckets = []):
     """ Verify recovery type is delta or full """
     summary = ""
     logic = True
     for server in self.chosen:
         shell = RemoteMachineShellConnection(serverMap[server.ip])
         os_type = shell.extract_remote_info()
         if os_type.type.lower() == 'windows':
             return
         for bucket in buckets:
             path = fileMap[server.ip][bucket.name]
             exists = shell.file_exists(path,"check.txt")
             if deltaRecoveryBuckets != None:
                 if recoveryTypeMap[server.ip] == "delta" and (bucket.name in deltaRecoveryBuckets) and not exists:
                     logic = False
                     summary += "\n Failed Condition :: node {0}, bucket {1} :: Expected Delta, Actual Full".format(server.ip,bucket.name)
                 elif recoveryTypeMap[server.ip] == "delta" and (bucket.name not in deltaRecoveryBuckets) and exists:
                     summary += "\n Failed Condition :: node {0}, bucket {1} :: Expected Full, Actual Delta".format(server.ip,bucket.name)
                     logic = False
             else:
                 if recoveryTypeMap[server.ip] == "delta"  and not exists:
                     logic = False
                     summary += "\n Failed Condition :: node {0}, bucket {1} :: Expected Delta, Actual Full".format(server.ip,bucket.name)
                 elif recoveryTypeMap[server.ip] == "full" and exists:
                     logic = False
                     summary += "\n Failed Condition :: node {0}, bucket {1}  :: Expected Full, Actual Delta".format(server.ip,bucket.name)
         shell.disconnect()
     self.assertTrue(logic, summary)
Esempio n. 24
0
 def rebalance_in_out_with_auto_DB_compaction(self):
     remote_client = RemoteMachineShellConnection(self.master)
     rest = RestConnection(self.master)
     self.assertTrue(self.num_servers > self.nodes_in + self.nodes_out,
                         "ERROR: Not enough nodes to do rebalance in and out")
     servs_init = self.servers[:self.nodes_init]
     servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)]
     servs_out = [self.servers[self.nodes_init - i - 1] for i in range(self.nodes_out)]
     result_nodes = set(servs_init + servs_in) - set(servs_out)
     self.set_auto_compaction(rest, dbFragmentThresholdPercentage=self.autocompaction_value)
     self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
     rebalance = self.cluster.async_rebalance(servs_init, servs_in, servs_out)
     while rebalance.state != "FINISHED":
         self._monitor_DB_fragmentation()
         compact_run = remote_client.wait_till_compaction_end(rest, self.default_bucket_name,
                                                              timeout_in_seconds=(self.wait_timeout * 5))
     rebalance.result()
     monitor_fragm = self.cluster.async_monitor_db_fragmentation(self.master, 0, self.default_bucket_name)
     result = monitor_fragm.result()
     if compact_run:
         self.log.info("auto compaction run successfully")
     elif result:
         self.log.info("Compaction is already completed")
     else:
         self.fail("auto compaction does not run")
     self.verify_cluster_stats(result_nodes)
     remote_client.disconnect()
Esempio n. 25
0
 def offline_cluster_upgrade_with_reinstall(self):
     self._install(self.servers[:self.nodes_init])
     self.operations(self.servers[:self.nodes_init])
     if self.ddocs_num:
         self.create_ddocs_and_views()
     if self.during_ops:
         for opn in self.during_ops:
             getattr(self, opn)()
     num_nodes_reinstall = self.input.param('num_nodes_reinstall', 1)
     stoped_nodes = self.servers[self.nodes_init - (self.nodes_init - num_nodes_reinstall):self.nodes_init]
     nodes_reinstall = self.servers[:num_nodes_reinstall]
     for upgrade_version in self.upgrade_versions:
         self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for upgrade to {0} version".\
                    format(upgrade_version))
         for server in stoped_nodes:
             remote = RemoteMachineShellConnection(server)
             remote.stop_server()
             remote.disconnect()
         self.sleep(self.sleep_time)
         upgrade_threads = self._async_update(upgrade_version, stoped_nodes)
         self.force_reinstall(nodes_reinstall)
         for upgrade_thread in upgrade_threads:
             upgrade_thread.join()
         success_upgrade = True
         while not self.queue.empty():
             success_upgrade &= self.queue.get()
         if not success_upgrade:
             self.fail("Upgrade failed!")
         self.dcp_rebalance_in_offline_upgrade_from_version2_to_version3()
         self.verification(self.servers[:self.nodes_init])
Esempio n. 26
0
 def offline_cluster_upgrade(self):
     self._install(self.servers[:self.nodes_init])
     self.operations(self.servers[:self.nodes_init])
     seqno_expected = 1
     if self.ddocs_num:
         self.create_ddocs_and_views()
         if self.input.param('run_index', False):
             self.verify_all_queries()
     if not self.initial_version.startswith("1.") and self.input.param('check_seqno', True):
         self.check_seqno(seqno_expected)
     if self.during_ops:
         for opn in self.during_ops:
             if opn != 'add_back_failover':
                 getattr(self, opn)()
     num_stoped_nodes = self.input.param('num_stoped_nodes', self.nodes_init)
     upgrade_nodes = self.servers[self.nodes_init - num_stoped_nodes :self.nodes_init]
     for upgrade_version in self.upgrade_versions:
         self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for upgrade to {0} version".\
                    format(upgrade_version))
         for server in upgrade_nodes:
             remote = RemoteMachineShellConnection(server)
             remote.stop_server()
             self.sleep(self.sleep_time)
             if self.wait_expire:
                 self.sleep(self.expire_time)
             if self.input.param('remove_manifest_files', False):
                 for file in ['manifest.txt', 'manifest.xml', 'VERSION.txt,']:
                     output, error = remote.execute_command("rm -rf /opt/couchbase/{0}".format(file))
                     remote.log_command_output(output, error)
             if self.input.param('remove_config_files', False):
                 for file in ['config', 'couchbase-server.node', 'ip', 'couchbase-server.cookie']:
                     output, error = remote.execute_command("rm -rf /opt/couchbase/var/lib/couchbase/{0}".format(file))
                     remote.log_command_output(output, error)
                 self.buckets = []
             remote.disconnect()
         upgrade_threads = self._async_update(upgrade_version, upgrade_nodes)
         #wait upgrade statuses
         for upgrade_thread in upgrade_threads:
             upgrade_thread.join()
         success_upgrade = True
         while not self.queue.empty():
             success_upgrade &= self.queue.get()
         if not success_upgrade:
             self.fail("Upgrade failed. See logs above!")
         self.sleep(self.expire_time)
         if self.during_ops:
             if "add_back_failover" in self.during_ops:
                 getattr(self, 'add_back_failover')()
                 self.cluster.rebalance(self.servers[:self.nodes_init], [], [])
             elif "failover" in self.during_ops:
                 self.cluster.rebalance(self.servers[:self.nodes_init], [], [])
                 rem = [server for server in self.servers[:self.nodes_init]
                      if self.failover_node.ip == server.ip and str(self.failover_node.port) == server.port]
                 self.dcp_rebalance_in_offline_upgrade_from_version2_to_version3()
                 self.verification(list(set(self.servers[:self.nodes_init]) - set(rem)))
                 return
         self.dcp_rebalance_in_offline_upgrade_from_version2_to_version3()
         self.verification(self.servers[:self.nodes_init])
         if self.input.param('check_seqno', True):
             self.check_seqno(seqno_expected)
Esempio n. 27
0
    def setting_index(self, max_rollbacks, stable_snap_interval,
                      mem_snap_interval, storage_mode, threads,
                      log_level):
        options = self._get_default_options()
        if max_rollbacks:
            options += " --index-max-rollback-points " + str(max_rollbacks)
        if stable_snap_interval:
            options += " --index-stable-snapshot-interval " + str(
                stable_snap_interval)
        if mem_snap_interval:
            options += " --index-memory-snapshot-interval " + str(
                mem_snap_interval)
        if storage_mode:
            options += " --index-storage-setting " + str(storage_mode)
        if threads:
            options += " --index-threads " + str(threads)
        if log_level:
            options += " --index-log-level " + str(log_level)

        remote_client = RemoteMachineShellConnection(self.server)
        stdout, stderr = remote_client.couchbase_cli("setting-index",
                                                     self.hostname, options)
        remote_client.disconnect()
        return stdout, stderr, self._was_success(stdout,
                                                 "Indexer settings modified")
Esempio n. 28
0
    def bucket_create(self, name, password, bucket_type, quota,
                      eviction_policy, replica_count, enable_replica_indexes,
                      priority, enable_flush, wait):
        options = self._get_default_options()
        if name is not None:
            options += " --bucket " + name
        if password is not None:
            options += " --bucket-password " + password
        if bucket_type is not None:
            options += " --bucket-type " + bucket_type
        if quota is not None:
            options += " --bucket-ramsize " + str(quota)
        if eviction_policy is not None:
            options += " --bucket-eviction-policy " + eviction_policy
        if replica_count is not None:
            options += " --bucket-replica " + str(replica_count)
        if enable_replica_indexes is not None:
            options += " --enable-index-replica " + str(enable_replica_indexes)
        if priority is not None:
            options += " --bucket-priority " + priority
        if enable_flush is not None:
            options += " --enable-flush " + str(enable_flush)
        if wait:
            options += " --wait"

        remote_client = RemoteMachineShellConnection(self.server)
        stdout, stderr = remote_client.couchbase_cli("bucket-create",
                                                     self.hostname, options)
        remote_client.disconnect()
        return stdout, stderr, self._was_success(stdout, "Bucket created")
Esempio n. 29
0
 def run_ops(self):
     tasks = []
     if not self.ops:
         return tasks
     if self.ops == 'rebalance_in':
         tasks.append(self.cluster.async_rebalance(self.servers[:self.nodes_init],
                                             self.servers[self.nodes_init:self.nodes_init + self.nodes_in], []))
         self.nodes_init += self.nodes_in
     elif self.ops == 'rebalance_out':
         tasks.append(self.cluster.async_rebalance(self.servers[:self.nodes_init],
                 [], self.servers[(self.nodes_init - self.nodes_out):self.nodes_init]))
         self.nodes_init -= self.nodes_out
     elif self.ops == 'failover':
         tasks.append(self.cluster.failover(self.servers[:self.nodes_init],
                 self.servers[(self.nodes_init - self.nodes_out):self.nodes_init]))
         self.sleep(20)
         self.nodes_init -= self.nodes_out
     if self.ops == 'create_views':
         views_num = 10
         views = self.make_default_views(self.view_name, views_num, different_map=True)
         tasks.extend(self.async_create_views(self.master, self.ddoc_name, views))
     if self.ops == 'restart':
         servers_to_choose = [serv for serv in self.servers if self.master.ip != serv.ip]
         self.assertTrue(servers_to_choose, "There is only one node in cluster")
         shell = RemoteMachineShellConnection(servers_to_choose[0])
         try:
             shell.stop_couchbase()
             shell.start_couchbase()
         finally:
             shell.disconnect()
         self.sleep(5, "Server %s is starting..." % servers_to_choose[0].ip)
     return tasks
     
Esempio n. 30
0
    def test_large_file_version(self):
        rest = RestConnection(self.master)
        remote_client = RemoteMachineShellConnection(self.master)
        remote_client.extract_remote_info()

        self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
        self.disable_compaction()
        self._monitor_DB_fragmentation()

        # rename here

        remote_client.stop_couchbase()
        time.sleep(5)
        remote_client.execute_command("cd /opt/couchbase/var/lib/couchbase/data/default;rename .1 .65535 *.1")
        remote_client.execute_command("cd /opt/couchbase/var/lib/couchbase/data/default;rename .2 .65535 *.2")
        remote_client.start_couchbase()

        for i in range(5):
            self.log.info("starting a compaction iteration")
            compaction_task = self.cluster.async_compact_bucket(self.master, self.default_bucket_name)

            compact_run = remote_client.wait_till_compaction_end(rest, self.default_bucket_name, timeout_in_seconds=self.wait_timeout)
            res = compaction_task.result(self.wait_timeout)


        if compact_run:
            self.log.info("auto compaction run successfully")
        else:
            self.fail("auto compaction does not run")

        remote_client.disconnect()
Esempio n. 31
0
 def create_user(self):
     userCreateCmmd = 'dn: cn=' + self.user_name + "," + self.LDAP_DN + "\n" \
                         "cn:" + self.user_name + "\n" \
                         "sn: " + self.user_name + "\n" \
                         'objectClass: ' + self.LDAP_OBJECT_CLASS + "\n" \
                         "userPassword :"******"\n" \
                         "uid: " + self.user_name + "\n"
     fileName = 'name.ldif'
     # Execute ldapadd command to add users to the system
     shell = RemoteMachineShellConnection(self.host)
     o = ''
     r = ''
     try:
         shell.write_remote_file("/tmp", fileName, userCreateCmmd)
         command = "ldapadd -h " + self.LDAP_HOST + " -p " + self.LDAP_PORT + " -f /tmp/" + fileName + " -D " + \
                   self.LDAP_ADMIN_USER + " -w " + self.LDAP_ADMIN_PASS
         o, r = shell.execute_command(command)
         shell.log_command_output(o, r)
         command = "rm -rf /tmp/*.ldif"
         o, r = shell.execute_command(command)
         shell.log_command_output(o, r)
     finally:
         shell.disconnect()
         return o
Esempio n. 32
0
    def testSGServiceInstallHelp(self):
        shell = RemoteMachineShellConnection(self.master)
        self.kill_processes_gateway(shell)
        self.uninstall_gateway(shell)
        self.install_gateway(shell)
        output, error = self.run_sync_gateway_service_install(shell, "-h")
        self.assertEqual(error, [])
        self.assertEqual(output, help)

        output, error = self.run_sync_gateway_service_install(shell)
        self.assertEqual(
            error[0],
            "The sync_gateway runtime user account does not exist \"sync_gateway\"."
        )
        self.assertEqual(output, [])

        output, error = self.run_sync_gateway_service_install(
            shell, "bla-bla-bla")
        temp_help = ["ERROR: unknown parameter \"bla-bla-bla\""]
        temp_help.extend(help)

        self.assertEqual(error, [])
        self.assertEqual(output, temp_help)
        shell.disconnect()
Esempio n. 33
0
 def test_software_version(self):
     """
       This test requires to pass 3 params to run:
         software_name
         software_version
         check_in_file
     """
     self.software_name = self.input.param("software_name", None)
     self.software_version = self.input.param("software_version", None)
     self.check_in_file = self.input.param("check_in_file", "manifest.xml")
     if self.software_name is None or self.software_version is None:
         self.fail("This test needs to pass param 'software_name'\
                                              and software_version to run")
     go_software = ["gocb", "gocbcore"]
     go_sw_in_version = ["5.1.2", "5.5.1"]
     if self.software_name in go_software and \
         (self.cb_version[:5] in go_sw_in_version or 6.0 <= float(self.cb_version[:3])):
         shell = RemoteMachineShellConnection(self.master)
         output, error = shell.execute_command(
             'cat {0}/{1} | grep \'"{2}"\' '.format(self.base_cb_path,
                                                    self.check_in_file,
                                                    self.software_name))
         shell.disconnect()
         found_version = False
         if output:
             for ele in output:
                 if "gocb" in ele and self.software_version in ele:
                     found_version = True
                     self.log.info("software info: {0}".format(ele))
                     break
         if not found_version:
             self.fail("version of {0} does not match as in: {0}"\
                               .format(self.software_name, output))
     else:
         self.log.info(
             "software name/version are not in running cb version")
Esempio n. 34
0
 def test_offline_upgrade(self):
     upgrade_nodes = self.servers[:self.nodes_init]
     if self.disable_plasma_upgrade:
         self._install(self.nodes_in_list, version=self.upgrade_to)
         rebalance = self.cluster.async_rebalance(
             self.servers[:self.nodes_init], [self.nodes_in_list[0]], [],
             services=["index"])
         rebalance.result()
         self.sleep(100)
         self.disable_upgrade_to_plasma(self.nodes_in_list[0])
     for server in upgrade_nodes:
         remote = RemoteMachineShellConnection(server)
         remote.stop_server()
         remote.disconnect()
         upgrade_threads = self._async_update(self.upgrade_to, [server])
         for upgrade_thread in upgrade_threads:
             upgrade_thread.join()
         self.upgrade_servers.append(server)
     self.sleep(100)
     msg = "Cluster is not healthy after upgrade"
     self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
     log.info("Cluster is healthy")
     self.assertTrue(self.wait_until_indexes_online(),
                     "Some indexes are not online")
     log.info("All indexes are online")
     self.add_built_in_server_user()
     self.sleep(20)
     if self.initial_version.split("-")[0] in UPGRADE_VERS:
         self.multi_drop_index()
         self.sleep(100)
         self._create_indexes()
         self.sleep(100)
     self._query_index("post_upgrade")
     self._verify_post_upgrade_results()
     self._update_int64_dataset()
     self._query_for_long_num()
Esempio n. 35
0
    def setting_index(self, max_rollbacks, stable_snap_interval,
                      mem_snap_interval, storage_mode, threads, log_level):
        options = self._get_default_options()
        if max_rollbacks:
            options += " --index-max-rollback-points " + str(max_rollbacks)
        if stable_snap_interval:
            options += " --index-stable-snapshot-interval " + str(
                stable_snap_interval)
        if mem_snap_interval:
            options += " --index-memory-snapshot-interval " + str(
                mem_snap_interval)
        if storage_mode:
            options += " --index-storage-setting " + str(storage_mode)
        if threads:
            options += " --index-threads " + str(threads)
        if log_level:
            options += " --index-log-level " + str(log_level)

        remote_client = RemoteMachineShellConnection(self.server)
        stdout, stderr = remote_client.couchbase_cli("setting-index",
                                                     self.hostname, options)
        remote_client.disconnect()
        return stdout, stderr, self._was_success(stdout,
                                                 "Indexer settings modified")
Esempio n. 36
0
 def get_views_definition_from_backup_file(self, server, backup_dir,
                                           buckets):
     """
         Extract key value from database file shard_0.fdb
         Return: key, kv store name, status and value
         /tmp/entbackup/backup/20*/default-*/views.json
     """
     conn = RemoteMachineShellConnection(server)
     result = False
     backup_data = {}
     for bucket in buckets:
         backup_data[bucket.name] = {}
         output, _ = conn.execute_command("ls %s/backup/20*/%s* " %
                                          (backup_dir, bucket.name))
         if "views.json" in output:
             cmd = "cat %s/backup/20*/%s*/views.json" \
                   % (backup_dir, bucket.name)
             views_output, _ = conn.execute_command(cmd)
             views_output = [x.strip(' ') for x in views_output]
             if views_output:
                 views_output = " ".join(views_output)
                 result = views_output
     conn.disconnect()
     return result
Esempio n. 37
0
 def chmod(self, server, path, mod="000"):
     '''
         # (Base-10)    Binary    Sum (in binary)    Sum (in decimal)    rwx    Permission
         7    111    = 100 + 10 + 1    = 4(r) + 2(w) + 1(x)    rwx    read, write and execute
         6    110    = 100 + 10    = 4(r) + 2(w)    rw-    read and write
         5    101    = 100      + 1    = 4(r)        + 1(x)    r-x    read and execute
         4    100    = 100    = 4(r)    r--    read only
         3    011    =       10 + 1    =        2(w) + 1(x)    -wx    write and execute
         2    010    =       10    =        2(w)    -w-    write only
         1    001    =            1    =               1(x)    --x    execute only
         0    000    = 0    = 0    ---    none
     '''
     self.stop_chmod = False
     while self.stop_chmod is False:
         shell = RemoteMachineShellConnection(server)
         self.log.debug("{}: changing mod to {} for {}".format(
             server.ip, mod, path))
         shell.execute_command("chmod {} {}".format(mod, path))
         self.sleep(5)
         self.log.debug("{}: changing mod to {} for {}".format(
             server.ip, "777", path))
         shell.execute_command("chmod {} {}".format("777", path))
         self.sleep(5)
         shell.disconnect()
Esempio n. 38
0
 def change_port(self, new_port="9090", current_port='8091'):
     nodes = RestConnection(self.cluster.master).node_statuses()
     remote_client = RemoteMachineShellConnection(self.cluster.master)
     options = "--cluster-port=%s" % new_port
     cli_command = "cluster-edit"
     output, error = remote_client.execute_couchbase_cli(
         cli_command=cli_command,
         options=options,
         cluster_host="localhost:%s" % current_port,
         user=self.cluster.master.rest_username,
         password=self.cluster.master.rest_password)
     self.log.info(output)
     remote_client.disconnect()
     # MB-10136 & MB-9991
     if error:
         raise Exception("Port didn't change! %s" % error)
     self.port = new_port
     self.log.info("New port '%s' on nodes: %s" %
                   (new_port, [node.ip for node in nodes]))
     for node in nodes:
         for server in self.cluster.servers:
             if server.ip == node.ip and int(server.port) == int(node.port):
                 server.port = new_port
                 break
Esempio n. 39
0
    def set_metadata_purge_interval(self, value, buckets=[], node=None):
        self.log.info(
            "Changing the bucket properties by changing {0} to {1}".format(
                "purge_interval", value))
        if not buckets:
            buckets = self.buckets
        if node is None:
            node = self.cluster.master
        rest = RestConnection(node)

        shell = RemoteMachineShellConnection(node)
        shell.enable_diag_eval_on_non_local_hosts()
        shell.disconnect()

        for bucket in buckets:
            cmd = '{ok, BC} = ns_bucket:get_bucket(' \
                  '"%s"), BC2 = lists:keyreplace(purge_interval, ' \
                  '1, BC, {purge_interval, %f})' \
                  ', ns_bucket:set_bucket_config("%s", BC2).' \
                  % (bucket.name, value, bucket.name)
            rest.diag_eval(cmd)

        # Restart Memcached in all cluster nodes to reflect the settings
        for server in self.cluster_util.get_kv_nodes(self.cluster,
                                                     master=node):
            shell = RemoteMachineShellConnection(server)
            shell.restart_couchbase()
            shell.disconnect()

        # Check bucket-warm_up after Couchbase restart
        retry_count = 10
        buckets_warmed_up = self.bucket_util.is_warmup_complete(
            self.cluster, buckets, retry_count)
        if not buckets_warmed_up:
            self.log.critical("Few bucket(s) not warmed up "
                              "within expected time")
Esempio n. 40
0
 def test_prompt_enter_correct_password(self):
     try:
         self.secretmgmt_base_obj.set_password(self.master, self.password)
         shell = RemoteMachineShellConnection(self.master)
         shell.execute_command("/opt/couchbase/etc/couchbase_init.d stop")
         shell.disconnect()
         self.secretmgmt_base_obj.start_server_prompt_diff_window(
             self.master)
         self.sleep(10)
         # self.secretmgmt_base_obj.incorrect_password(self.master, cmd="/opt/couchbase/bin/cbmaster_password",
         #                                            retries_number=1, input_correct_pass=True, correct_pass='******')
         cmd = "/opt/couchbase/bin/couchbase-cli master-password -c localhost:8091 -u Administrator -p password --send-password"
         temp_result = self.secretmgmt_base_obj.correct_password_on_prompt(
             self.master, self.password, cmd=cmd)
         self.assertTrue(
             temp_result,
             "Issue with passing in correct password on prompt")
     finally:
         for server in self.servers:
             shell = RemoteMachineShellConnection(server)
             if (RemoteMachineHelper(shell).is_process_running('memcached')
                     is None):
                 shell.set_environment_variable("CB_MASTER_PASSWORD",
                                                self.password)
Esempio n. 41
0
class Examine:
    def __init__(self, server):
        self.remote_connection = RemoteMachineShellConnection(server)

    def examine(self, examine_arguments):
        """ Returns an ExamineResults given an ExamineArguments object """
        if not examine_arguments.json:
            raise ValueError(
                "Currently the non-JSON data output from the examine sub-command is not supported for testing."
            )

        output, error, exit_code = self.remote_connection.execute_command(
            examine_arguments.to_command(), get_exit_code=True)

        if exit_code != 0 or not output:
            return None, error

        return ExamineResult.from_output(output[0])

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_value, traceback):
        self.remote_connection.disconnect()
Esempio n. 42
0
    def run(self):
        remote_client = RemoteMachineShellConnection(self.server)
        now = datetime.now()
        day = now.day
        month = now.month
        year = now.year
        hour = now.timetuple().tm_hour
        min = now.timetuple().tm_min
        file_name = "%s-%s%s%s-%s%s-diag.zip" % (self.server.ip, month, day, year, hour, min)
        print "Collecting logs from %s\n" % (self.server.ip)
        output, error = remote_client.execute_cbcollect_info(file_name)
        print "\n".join(output)
        print "\n".join(error)

        user_path = "/home/"
        if self.server.ssh_username == "root":
            user_path = "/"
        if not remote_client.file_exists("%s%s" % (user_path, self.server.ssh_username), file_name):
            raise Exception("%s doesn't exists on server" % (file_name))
        if remote_client.get_file("%s%s" % (user_path, self.server.ssh_username), file_name, "%s/%s" % (self.path, file_name)):
            print "Downloading zipped logs from %s" % (self.server.ip)
        else:
            raise Exception("Fail to download zipped logs from %s" % (self.server.ip))
        remote_client.disconnect()
Esempio n. 43
0
    def configure_bucket_backups(self):
        self.cluster_util.update_cluster_nodes_service_list(self.cluster)
        backup_node = self.cluster.backup_nodes[0]
        backup_helper = BackupHelper(backup_node)

        self.log.info("Creating permissions for backup folder")
        backup_configs = self.cluster_config["cb_cluster"]["backup"]
        shell = RemoteMachineShellConnection(backup_node)
        for backup_config in backup_configs:
            plan_params = dict()
            repo_params = dict()
            if "plan" in backup_config:
                plan_params["plan"] = backup_config["plan"]
                repo_params["plan"] = backup_config["plan"]
            if "description" in backup_config:
                plan_params["description"] = backup_config["description"]
            if "archive_path" in backup_config:
                repo_params["archive"] = backup_config["archive_path"]
                shell.execute_command("mkdir -p {0} ; chmod 777 {0}"
                                      .format(backup_config["archive_path"]))
            if "bucket" in backup_config:
                repo_params["bucket"] = backup_config["bucket"]

            if plan_params["plan"] not in ["_hourly_backups",
                                           "_daily_backups"]:
                self.log.info("Updating custom plan %s" % plan_params["plan"])
                status = backup_helper.create_edit_plan("create", plan_params)
                if status is False:
                    self.fail("Backup %s create failed" % backup_config)

            # Create repo
            status = backup_helper.create_repo(backup_config["repo_id"],
                                               repo_params)
            if status is False:
                self.fail("Create repo failed for %s" % backup_config)
        shell.disconnect()
Esempio n. 44
0
 def get_checkpoint_call_history(self, node):
     shell = RemoteMachineShellConnection(node)
     os_type = shell.extract_remote_info().distribution_type
     if os_type.lower() == 'windows':
         couchdb_log = "C:/Program Files/Couchbase/Server/var/lib/couchbase/logs/couchdb.log"
     else:
         couchdb_log = "/opt/couchbase/var/lib/couchbase/logs/couchdb.log"
     total_chkpt_calls, error = shell.execute_command(
         "grep \"POST /_commit_for_checkpoint\" \"{0}\" | wc -l".format(
             couchdb_log))
     total_successful_chkpts, error = shell.execute_command(
         "grep \"POST /_commit_for_checkpoint 200\" \"{0}\" | wc -l".format(
             couchdb_log))
     self.log.info(int(total_successful_chkpts[0]))
     if self.num_successful_chkpts_so_far != 0:
         checkpoint_number = int(total_successful_chkpts[0]
                                 ) - self.num_successful_chkpts_beginning
         self.log.info("Checkpoint on this node (this run): {0}".format(
             checkpoint_number))
     shell.disconnect()
     total_commit_failures = int(total_chkpt_calls[0]) - int(
         total_successful_chkpts[0])
     return int(total_chkpt_calls[0]), int(
         total_successful_chkpts[0]), total_commit_failures
Esempio n. 45
0
    def checkTLS1_1_blocking(self):
        self.get_the_testssl_script(self.TEST_SSL_FILENAME)
        command = "ns_config:set(ssl_minimum_protocol, 'tlsv1.2')"
        self.log.info("posting: %s" % command)
        rest = RestConnection(self.master)
        res = rest.diag_eval(command)

        # do the initial check
        self.check_all_servers(rest)

        # restart the server
        try:
            for server in self.servers:
                shell = RemoteMachineShellConnection(server)
                shell.stop_couchbase()
                time.sleep(10)  # Avoid using sleep like this on further calls
                shell.start_couchbase()
                shell.disconnect()
        except Exception as e:
            self.log.error(traceback.format_exc())

        # and check again
        time.sleep(30)
        self.check_all_servers(rest)
Esempio n. 46
0
    def retrieve_request_status_using_handle(self, server, handle):
        """
        Retrieves status of a request from /analytics/status endpoint
        """
        shell = RemoteMachineShellConnection(server)

        output, error = shell.execute_command("""curl -v {0}""".format(handle))

        response = ""
        for line in output:
            response = response + line
        if response:
            response = json.loads(response)
        shell.disconnect()

        status = ""
        handle = ""
        if 'status' in response:
            status = response['status']
        if 'handle' in response:
            handle = response['handle']

        self.log.info("status=%s, handle=%s" % (status, handle))
        return status, handle
Esempio n. 47
0
    def test_stream_after_warmup(self):
        nodeA = self.cluster.servers[0]
        bucket = self.bucket_util.buckets[1]

        shell_conn = RemoteMachineShellConnection(nodeA)
        cb_stat_obj = Cbstats(shell_conn)

        # load all buckets
        doc_gen = doc_generator(self.key, 0, self.num_items)
        self._load_all_buckets(self.master, doc_gen, "create", 0)
        self._wait_for_stats_all_buckets()

        # store expected vb seqnos
        originalVbInfo = self.all_vb_info(cb_stat_obj, bucket.name)

        # restart node
        self.assertTrue(self.stop_node(0), msg="Failed during stop_node")
        self.sleep(5, "Wait after stop_node")
        self.assertTrue(self.start_node(0), msg="Failed during start_node")
        rest = RestHelper(RestConnection(nodeA))
        self.assertTrue(rest.is_ns_server_running(),
                        msg="Failed while is_ns_server_running")
        self.sleep(2, "Wait after ns_server_start")

        # verify original vbInfo can be streamed
        dcp_client = self.dcp_client(nodeA, PRODUCER, bucket_name=bucket.name)
        for vbucket in originalVbInfo:
            vb_uuid, _, high_seqno = originalVbInfo[vbucket]
            stream = dcp_client.stream_req(vbucket, 0, 0, high_seqno, vb_uuid)
            _ = stream.run()
            self.assertTrue(high_seqno == stream.last_by_seqno,
                            msg="Mismatch in high_seqno. {0} == {1}".format(
                                high_seqno, stream.last_by_seqno))

        # Disconnect the shell_conn
        shell_conn.disconnect()
Esempio n. 48
0
    def rebalance_in_with_warming_up(self):
        servs_in = self.servers[self.nodes_init:self.nodes_init +
                                self.nodes_in]
        servs_init = self.servers[:self.nodes_init]
        warmup_node = servs_init[-1]
        shell = RemoteMachineShellConnection(warmup_node)
        shell.stop_couchbase()
        self.sleep(20)
        shell.start_couchbase()
        shell.disconnect()
        try:
            rebalance = self.cluster.async_rebalance(servs_init, servs_in, [])
            rebalance.result()
        except RebalanceFailedException:
            self.log.info("rebalance was failed as expected")
            self.assertTrue(ClusterOperationHelper._wait_warmup_completed(self, [warmup_node], \
                            self.default_bucket_name, wait_time=self.wait_timeout * 10))

            self.log.info("second attempt to rebalance")
            rebalance = self.cluster.async_rebalance(servs_init + servs_in, [],
                                                     [])
            rebalance.result()
        self.verify_cluster_stats(self.servers[:self.nodes_in +
                                               self.nodes_init])
Esempio n. 49
0
    def execute_statement_on_cbas(self, statement, server, mode=None):
        """
        Executes a statement on CBAS using the REST API through curl command
        """
        shell = RemoteMachineShellConnection(server)
        if mode:
            output, error = shell.execute_command(
                """curl -s --data pretty=true --data mode={2} --data-urlencode 'statement={1}' http://{0}:8095/analytics/service -v"""
                .format(self.cbas_node.ip, statement, mode))
        else:
            output, error = shell.execute_command(
                """curl -s --data pretty=true --data-urlencode 'statement={1}' http://{0}:8095/analytics/service -v"""
                .format(self.cbas_node.ip, statement))
        response = ""
        for line in output:
            response = response + line
        response = json.loads(response)
        self.log.info(response)
        shell.disconnect()

        if "errors" in response:
            errors = response["errors"]
        else:
            errors = None

        if "results" in response:
            results = response["results"]
        else:
            results = None

        if "handle" in response:
            handle = response["handle"]
        else:
            handle = None

        return response["status"], response["metrics"], errors, results, handle
Esempio n. 50
0
 def initialize(self, params):
     start_time = time.time()
     cluster_initialized = False
     server = params["server"]
     while time.time() < (start_time + (5 * 60)):
         rest = RestConnection(server)
         try:
             if server.data_path:
                 remote_client = RemoteMachineShellConnection(server)
                 remote_client.execute_command('rm -rf {0}/*'.format(server.data_path))
                 # Make sure that data_path is writable by membase user
                 remote_client.execute_command("chown -R membase.membase {0}".format(server.data_path))
                 remote_client.disconnect()
                 rest.set_data_path(data_path=server.data_path)
             rest.init_cluster(username=server.rest_username, password=server.rest_password)
             rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
             cluster_initialized = True
             break
         except ServerUnavailableException:
             log.error("error happened while initializing the cluster @ {0}".format(server.ip))
         log.info('sleep for 5 seconds before trying again ...')
         time.sleep(5)
     if not cluster_initialized:
         raise Exception("unable to initialize membase node")
Esempio n. 51
0
    def testSettingCompacttion(self):
        '''setting-compacttion OPTIONS:
        --compaction-db-percentage=PERCENTAGE     at which point database compaction is triggered
        --compaction-db-size=SIZE[MB]             at which point database compaction is triggered
        --compaction-view-percentage=PERCENTAGE   at which point view compaction is triggered
        --compaction-view-size=SIZE[MB]           at which point view compaction is triggered
        --compaction-period-from=HH:MM            allow compaction time period from
        --compaction-period-to=HH:MM              allow compaction time period to
        --enable-compaction-abort=[0|1]           allow compaction abort when time expires
        --enable-compaction-parallel=[0|1]        allow parallel compaction for database and view'''
        compaction_db_percentage = self.input.param("compaction-db-percentage", None)
        compaction_db_size = self.input.param("compaction-db-size", None)
        compaction_view_percentage = self.input.param("compaction-view-percentage", None)
        compaction_view_size = self.input.param("compaction-view-size", None)
        compaction_period_from = self.input.param("compaction-period-from", None)
        compaction_period_to = self.input.param("compaction-period-to", None)
        enable_compaction_abort = self.input.param("enable-compaction-abort", None)
        enable_compaction_parallel = self.input.param("enable-compaction-parallel", None)
        bucket = self.input.param("bucket", "default")
        output = self.input.param("output", '')
        remote_client = RemoteMachineShellConnection(self.master)
        cli_command = "setting-compaction"
        options = (" --compaction-db-percentage={0}".format(compaction_db_percentage), "")[compaction_db_percentage is None]
        options += (" --compaction-db-size={0}".format(compaction_db_size), "")[compaction_db_size is None]
        options += (" --compaction-view-percentage={0}".format(compaction_view_percentage), "")[compaction_view_percentage is None]
        options += (" --compaction-view-size={0}".format(compaction_view_size), "")[compaction_view_size is None]
        options += (" --compaction-period-from={0}".format(compaction_period_from), "")[compaction_period_from is None]
        options += (" --compaction-period-to={0}".format(compaction_period_to), "")[compaction_period_to is None]
        options += (" --enable-compaction-abort={0}".format(enable_compaction_abort), "")[enable_compaction_abort is None]
        options += (" --enable-compaction-parallel={0}".format(enable_compaction_parallel), "")[enable_compaction_parallel is None]

        output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
        expectedResults = {"parallel_db_and_view_compaction":False, "database_fragmentation_threshold:size":10485760, "database_fragmentation_threshold:view_fragmentation_threshold":{},
                           "real_userid:source":self.source, "real_userid:user":self.ldapUser, "remote:ip":"::1", "remote:port":60019}
        self.checkConfig(8225, self.master, expectedResults)
        remote_client.disconnect()
 def validate_backup_create(self):
     """
     Validates that the backup directory is created as expected
     Validates the backup metadata using backup-meta.json
     :return: status and message
     """
     remote_client = RemoteMachineShellConnection(
         self.backupset.backup_host)
     info = remote_client.extract_remote_info().type.lower()
     if info == 'linux' or info == 'mac':
         command = "ls -R {0}/{1}".format(self.backupset.directory,
                                          self.backupset.name)
         o, e = remote_client.execute_command(command)
     elif info == 'windows':
         o = remote_client.list_files("{0}/{1}".format(
             self.backupset.directory, self.backupset.name))
     if not o and len(o) != 2 and o[1] != "backup-meta.json":
         return False, "Backup create did not create backup-meta file."
     remote_client.disconnect()
     files_validations = BackupRestoreFilesValidations(self.backupset)
     status, msg = files_validations.validate_backup_meta_json()
     if status:
         msg += "\nBackup create validation success."
     return status, msg
Esempio n. 53
0
 def test_max_ttl_bucket(self):
     """
         From vulcan, EE bucket has has an option to set --max-ttl, not it CE.
         This test is make sure CE could not create bucket with option --max-ttl
         This test must pass default_bucket=False
     """
     if self.cb_version[:5] not in COUCHBASE_FROM_VULCAN:
         self.log.info("This test only for vulcan and later")
         return
     cmd = 'curl -X POST -u Administrator:password \
                                 http://{0}:8091/pools/default/buckets \
                              -d name=bucket0 \
                              -d maxTTL=100 \
                              -d ramQuotaMB=100 '.format(self.master.ip)
     if self.cli_test:
         cmd = "{0}couchbase-cli bucket-create -c {1}:8091 --username Administrator \
             --password password --bucket bucket0 --bucket-type couchbase \
             --bucket-ramsize 512 --bucket-replica 1 --bucket-priority high \
             --bucket-eviction-policy fullEviction --enable-flush 0 \
             --enable-index-replica 1 --max-ttl 200".format(
             self.bin_path, self.master.ip)
     conn = RemoteMachineShellConnection(self.master)
     output, error = conn.execute_command(cmd)
     conn.log_command_output(output, error)
     mesg = "Max TTL is supported in enterprise edition only"
     if self.cli_test:
         mesg = "Maximum TTL can only be configured on enterprise edition"
     if output and mesg not in str(output[0]):
         self.fail("max ttl feature should not in Community Edition")
     buckets = RestConnection(self.master).get_buckets()
     if buckets:
         for bucket in buckets:
             self.log.info("bucekt in cluser: {0}".format(bucket.name))
             if bucket.name == "bucket0":
                 self.fail("Failed to enforce feature max ttl in CE.")
     conn.disconnect()
Esempio n. 54
0
    def test_large_file_version(self):
        rest = RestConnection(self.cluster.master)
        remote_client = RemoteMachineShellConnection(self.cluster.master)
        remote_client.extract_remote_info()

        self._load_all_buckets(self.gen_load, "create")
        self.disable_compaction()
        self._monitor_DB_fragmentation()

        # rename here
        remote_client.stop_couchbase()
        time.sleep(5)
        remote_client.execute_command(
            "cd /opt/couchbase/var/lib/couchbase/data/default;rename .1 .65535 *.1"
        )
        remote_client.execute_command(
            "cd /opt/couchbase/var/lib/couchbase/data/default;rename .2 .65535 *.2"
        )
        remote_client.start_couchbase()

        for _ in range(5):
            self.log.info("starting a compaction iteration")
            compaction_task = self.cluster.async_compact_bucket(
                self.cluster.master, self.default_bucket_name)

            compact_run = remote_client.wait_till_compaction_end(
                rest,
                self.default_bucket_name,
                timeout_in_seconds=self.wait_timeout)
            res = compaction_task.result(self.wait_timeout)

        if compact_run:
            self.log.info("auto compaction run successfully")
        else:
            self.fail("auto compaction does not run")
        remote_client.disconnect()
Esempio n. 55
0
 def eventing_operation(self, subcommand, function_name, boundary="from-everything", file_name=None, name=True):
     options = self._get_default_options()
     options += " --name " + function_name
     if boundary:
         options += " --boundary " + boundary
     if name:
         options += " --file " + file_name
     remote_client = RemoteMachineShellConnection(self.server)
     stdout, stderr = remote_client.couchbase_cli(subcommand, self.hostname, options)
     remote_client.disconnect()
     if subcommand == "import":
         return stdout, stderr, self._was_success(stdout, "Events imported")
     elif subcommand == "deploy":
         return stdout, stderr, self._was_success(stdout, "Request to deploy the function was accepted")
     elif subcommand == "pause":
         return stdout, stderr, self._was_success(stdout, "Function was paused")
     elif subcommand == "resume":
         return stdout, stderr, self._was_success(stdout, "Function was resumed")
     elif subcommand == "undeploy":
         return stdout, stderr, self._was_success(stdout, "Request to undeploy the function was accepted")
     elif subcommand == "delete":
         return stdout, stderr, self._was_success(stdout, "Request to undeploy the function was accepted")
     elif subcommand == "list":
         return stdout, stderr
Esempio n. 56
0
    def test_start_stop_DB_compaction(self):
        rest = RestConnection(self.master)
        remote_client = RemoteMachineShellConnection(self.master)
        self.log.info('loading the buckets')
        self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
        self.log.info('disabling compaction')
        self.disable_compaction()
        self.log.info('monitor db fragmentation')
        self._monitor_DB_fragmentation()
        self.log.info('async compact the bucket')
        compaction_task = self.cluster.async_compact_bucket(self.master, self.default_bucket_name)
        self.log.info('cancel bucket compaction')
        self._cancel_bucket_compaction(rest, self.default_bucket_name)
        #compaction_task.result(self.wait_timeout)
        self.log.info('compact again')
        self.cluster.async_compact_bucket(self.master, self.default_bucket_name)
        self.log.info('waiting for compaction to end')
        compact_run = remote_client.wait_till_compaction_end(rest, self.default_bucket_name, timeout_in_seconds=self.wait_timeout)
 
        if compact_run:
            self.log.info("auto compaction run successfully")
        else:
            self.fail("auto compaction does not run")
        remote_client.disconnect()
Esempio n. 57
0
    def rebalance_out_with_warming_up(self):
        master_restart = self.input.param("master_restart", False)
        if master_restart:
            warmup_node = self.cluster.master
        else:
            warmup_node = self.cluster.servers[len(self.cluster.servers) -
                                               self.nodes_out - 1]
        servs_out = self.cluster.servers[len(self.cluster.servers) -
                                         self.nodes_out:]
        shell = RemoteMachineShellConnection(warmup_node)
        shell.stop_couchbase()
        self.sleep(20)
        shell.start_couchbase()
        shell.disconnect()
        try:
            rebalance = self.task.async_rebalance(self.cluster.servers, [],
                                                  servs_out)
            self.task.jython_task_manager.get_task_result(rebalance)
            self.cluster.nodes_in_cluster = list(
                set(self.cluster.nodes_in_cluster) - set(servs_out))
        except RebalanceFailedException:
            self.log.info("rebalance was failed as expected")
            self.assertTrue(
                self.bucket_util._wait_warmup_completed(
                    self, [warmup_node],
                    'default',
                    wait_time=self.wait_timeout * 10))

            self.log.info("second attempt to rebalance")
            rebalance = self.task.async_rebalance(self.cluster.servers, [],
                                                  servs_out)
            self.task.jython_task_manager.get_task_result(rebalance)
            self.cluster.nodes_in_cluster = list(
                set(self.cluster.nodes_in_cluster) - set(servs_out))
        self.bucket_util.verify_cluster_stats(self.num_items)
        self.bucket_util.verify_unacked_bytes_all_buckets()
Esempio n. 58
0
 def disable_IPV6_grub_level(self):
     """
     Disable IPV6 at grub level for all nodes in the cluster
     """
     for server in self.servers:
         shell = RemoteMachineShellConnection(server)
         shell.execute_command(
             '''sed -i 's/ipv6.disable=0 //; s/ipv6.disable=1 //; s/GRUB_CMDLINE_LINUX="/GRUB_CMDLINE_LINUX="ipv6.disable=1 /' /etc/default/grub'''
         )
         shell.execute_command("grub2-mkconfig -o /boot/grub2/grub.cfg")
         shell.reboot_node()
         time.sleep(10)
         shell = RemoteMachineShellConnection(server)
         output, error = shell.execute_command("ifconfig | grep inet6")
         if output == [] and error == []:
             log.info("IPv6 Successfully Disabled for {0}".format(
                 server.ip))
         else:
             log.info("Cant disable IPv6")
             log.info(
                 "Output message is {0} and error message is {1}".format(
                     output, error))
         output, error = shell.execute_command("iptables -F")
         shell.disconnect()
Esempio n. 59
0
    def testInfoCommands(self):
        remote_client = RemoteMachineShellConnection(self.master)

        cli_command = "server-list"
        output, error = remote_client.execute_couchbase_cli(
            cli_command=cli_command,
            cluster_host="localhost",
            user="******",
            password="******")
        server_info = self._get_cluster_info(remote_client)
        result = server_info["otpNode"] + " " + server_info[
            "hostname"] + " " + server_info["status"] + " " + server_info[
                "clusterMembership"]
        self.assertEqual(
            result, "ns_1@{0} {0}:8091 healthy active".format("127.0.0.1"))

        cli_command = "bucket-list"
        output, error = remote_client.execute_couchbase_cli(
            cli_command=cli_command,
            cluster_host="localhost",
            user="******",
            password="******")
        self.assertEqual([], output)
        remote_client.disconnect()
Esempio n. 60
0
    def test_rollback_and_persistence_race_condition(self):
        cluster = self.cluster
        gen_load = doc_generator(self.key, 0, self.num_items)
        for bucket in self.bucket_util.buckets:
            task = self.task.async_load_gen_docs(
                self.cluster, bucket, gen_load, "create", 0,
                batch_size=10, process_concurrency=8,
                replicate_to=self.replicate_to, persist_to=self.persist_to,
                timeout_secs=self.sdk_timeout, retries=self.sdk_retries)
            self.task.jython_task_manager.get_task_result(task)

        # Stop persistence
        for server in cluster.servers[:self.nodes_init]:
            # Create cbepctl command object
            node_shell_conn = RemoteMachineShellConnection(server)
            cbepctl_obj = Cbepctl(node_shell_conn)

            for bucket in self.bucket_util.buckets:
                cbepctl_obj.persistence(bucket.name, "stop")

            # Disconnect the shell_connection
            node_shell_conn.disconnect()

        self.sleep(10, "Wait after stop_persistence")

        # more (non-intersecting) load
        gen_load = doc_generator(self.key, 0, self.num_items, doc_size=64)
        for bucket in self.bucket_util.buckets:
            task = self.task.async_load_gen_docs(
                self.cluster, bucket, gen_load, "create", 0,
                batch_size=10, process_concurrency=8,
                replicate_to=self.replicate_to, persist_to=self.persist_to,
                timeout_secs=self.sdk_timeout, retries=self.sdk_retries)
            self.task.jython_task_manager.get_task_result(task)

        shell = RemoteMachineShellConnection(cluster.servers[0])
        shell.kill_memcached()

        self.sleep(10, "Wait after kill memcached")

        node1_shell_conn = RemoteMachineShellConnection(cluster.servers[0])
        node2_shell_conn = RemoteMachineShellConnection(cluster.servers[1])
        node1_cb_stat_obj = Cbstats(node1_shell_conn)
        node2_cb_stat_obj = Cbstats(node2_shell_conn)

        node1_items = node1_cb_stat_obj.all_stats(bucket, "curr_items_tot")
        node2_items = node2_cb_stat_obj.all_stats(bucket, "curr_items_tot")

        # Disconnect the opened connections
        node1_shell_conn.disconnect()
        node2_shell_conn.disconnect()

        self.assertTrue(node1_items == node2_items,
                        'Node items not equal. Node 1:{0}, node 2:{1}'
                        .format(node1_items, node2_items))