예제 #1
0
 def parallel_install(self, servers, params):
     uninstall_threads = []
     install_threads = []
     initializer_threads = []
     queue = Queue.Queue()
     success = True
     for server in servers:
         _params = copy.deepcopy(params)
         _params["server"] = server
         u_t = Thread(target=installer_factory(params).uninstall,
                    name="uninstaller-thread-{0}".format(server.ip),
                    args=(_params,))
         i_t = Thread(target=installer_factory(params).install,
                    name="installer-thread-{0}".format(server.ip),
                    args=(_params, queue))
         init_t = Thread(target=installer_factory(params).initialize,
                    name="initializer-thread-{0}".format(server.ip),
                    args=(_params,))
         uninstall_threads.append(u_t)
         install_threads.append(i_t)
         initializer_threads.append(init_t)
     for t in uninstall_threads:
         t.start()
     for t in uninstall_threads:
         t.join()
         print "thread {0} finished".format(t.name)
     if "product" in params and params["product"] in ["couchbase", "couchbase-server", "cb"]:
         success = True
         for server in servers:
             success &= not RemoteMachineShellConnection(server).is_couchbase_installed()
         if not success:
             print "Server:{0}.Couchbase is still installed after uninstall".format(server)
             return success
     for t in install_threads:
         t.start()
     for t in install_threads:
         t.join()
         print "thread {0} finished".format(t.name)
     while not queue.empty():
         success &= queue.get()
     if not success:
         print "installation failed. initializer threads were skipped"
         return success
     for t in initializer_threads:
         t.start()
     for t in initializer_threads:
         t.join()
         print "thread {0} finished".format(t.name)
     """ remove any capture files left after install windows """
     remote_client = RemoteMachineShellConnection(servers[0])
     type = remote_client.extract_remote_info().distribution_type
     remote_client.disconnect()
     if type.lower() == 'windows':
         for server in servers:
             shell = RemoteMachineShellConnection(server)
             shell.execute_command("rm -f /cygdrive/c/automation/*_172.23*")
             shell.execute_command("rm -f /cygdrive/c/automation/*_10.17*")
             os.system("rm -f resources/windows/automation/*_172.23*")
             os.system("rm -f resources/windows/automation/*_10.17*")
     return success
예제 #2
0
 def setUp(self):
     super(GatewayWebhookBaseTest, self).setUp()
     self.log = logger.Logger.get_logger()
     self.input = TestInputSingleton.input
     self.version = self.input.param("version", "0.0.0-358")
     self.extra_param = self.input.param("extra_param", "")
     self.configfile = self.input.param("config", "config_webhook_basic.json")
     self.doc_id = self.input.param("doc_id", "doc1")
     self.doc_content = self.input.param("doc_content", "{'a':1}")
     self.expected_error = self.input.param("expected_error", "")
     self.servers = self.input.servers
     self.master = self.servers[0]
     for server in self.servers:
         shell = RemoteMachineShellConnection(server)
         if self.case_number == 1:
             shell.execute_command("rm -rf {0}/tmp/*".format(self.folder_prefix))
             # will install sg only the first time
             self.install(shell)
             pid = self.is_sync_gateway_process_running(shell)
             self.assertNotEqual(pid, 0)
             exist = shell.file_exists("{0}/tmp/".format(self.folder_prefix), "gateway.log")
             self.assertTrue(exist)
             shell.copy_files_local_to_remote("pytests/sg/resources", "/tmp")
         self.start_simpleServe(shell)
         shell.disconnect()
    def backup_restore_validate(self, compare_uuid=False, seqno_compare_function="==",
                                replicas=False, mode="memory", expected_error=None):
        output, error =self.backup_restore()
        if expected_error:
            output.extend(error)
            error_found = False
            if expected_error:
                for line in output:
                    if line.find(expected_error) != -1:
                        error_found = True
                        break
            self.assertTrue(error_found, "Expected error not found: %s" % expected_error)
            return
        remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
        command = "grep 'Transfer plan finished successfully' " + self.backupset.directory + "/logs/backup.log"
        output, error = remote_client.execute_command(command)
        remote_client.log_command_output(output, error)
        if not output:
            self.fail("Restoring backup failed.")
        command = "grep 'Transfer failed' " + self.backupset.directory + "/logs/backup.log"
        output, error = remote_client.execute_command(command)
        remote_client.log_command_output(output, error)
        if output:
            self.fail("Restoring backup failed.")

        self.log.info("Finished restoring backup")

        current_vseqno = self.get_vbucket_seqnos(self.cluster_to_restore, self.buckets, self.skip_consistency, self.per_node)
        status, msg = self.validation_helper.validate_restore(self.backupset.end, self.vbucket_seqno, current_vseqno,
                                                              compare_uuid=compare_uuid, compare=seqno_compare_function,
                                                              get_replica=replicas, mode=mode)

        if not status:
            self.fail(msg)
        self.log.info(msg)
예제 #4
0
 def tearDown(self):
     if hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \
                 and 'stop-on-failure' in TestInputSingleton.input.test_params and \
                 str(TestInputSingleton.input.test_params['stop-on-failure']).lower() == 'true':
                 #supported starting with python2.7
                 log.warn("CLEANUP WAS SKIPPED")
                 self.cluster.shutdown()
                 self._log_finish(self)
     else:
         try:
             self.log.info("==============  tearDown was started for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
             RemoteUtilHelper.common_basic_setup(self.servers)
             self.log.info("10 seconds delay to wait for membase-server to start")
             time.sleep(10)
             for server in self._cleanup_nodes:
                 shell = RemoteMachineShellConnection(server)
                 o, r = shell.execute_command("iptables -F")
                 shell.log_command_output(o, r)
                 o, r = shell.execute_command("/sbin/iptables -A INPUT -p tcp -i eth0 --dport 1000:60000 -j ACCEPT")
                 shell.log_command_output(o, r)
                 o, r = shell.execute_command("/sbin/iptables -A OUTPUT -p tcp -o eth0 --dport 1000:60000 -j ACCEPT")
                 shell.log_command_output(o, r)
                 o, r = shell.execute_command("/etc/init.d/couchbase-server start")
                 shell.log_command_output(o, r)
                 shell.disconnect()
             self.log.info("==============  tearDown was finished for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
         finally:
             super(FailoverBaseTest, self).tearDown()
예제 #5
0
    def replication_while_rebooting_a_non_master_destination_node(self):
        self._load_all_buckets(self.src_master, self.gen_create, "create", 0)
        self._load_all_buckets(self.dest_master, self.gen_create2, "create", 0)
        self._async_update_delete_data()
        self.sleep(self._timeout)

        reboot_node_dest = self.dest_nodes[len(self.dest_nodes) - 1]
        shell = RemoteMachineShellConnection(reboot_node_dest)
        if shell.extract_remote_info().type.lower() == 'windows':
            o, r = shell.execute_command("shutdown -r -f -t 0")
        elif shell.extract_remote_info().type.lower() == 'linux':
            o, r = shell.execute_command("reboot")
        shell.log_command_output(o, r)
        reboot_node_src = self.src_nodes[len(self.src_nodes) - 1]
        shell = RemoteMachineShellConnection(reboot_node_src)
        if shell.extract_remote_info().type.lower() == 'windows':
            o, r = shell.execute_command("shutdown -r -f -t 0")
        elif shell.extract_remote_info().type.lower() == 'linux':
            o, r = shell.execute_command("reboot")
        shell.log_command_output(o, r)

        self.sleep(360)
        ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_dest], self, wait_if_warmup=True)
        ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_src], self, wait_if_warmup=True)
        self.merge_buckets(self.src_master, self.dest_master, bidirection=True)
        self.verify_results(verify_src=True)
예제 #6
0
 def offline_cluster_upgrade(self):
     self._install(self.servers[:self.nodes_init])
     self.operations(self.servers[:self.nodes_init])
     seqno_expected = 1
     if self.ddocs_num:
         self.create_ddocs_and_views()
         if self.input.param('run_index', False):
             self.verify_all_queries()
     if not self.initial_version.startswith("1.") and self.input.param('check_seqno', True):
         self.check_seqno(seqno_expected)
     if self.during_ops:
         for opn in self.during_ops:
             if opn != 'add_back_failover':
                 getattr(self, opn)()
     num_stoped_nodes = self.input.param('num_stoped_nodes', self.nodes_init)
     upgrade_nodes = self.servers[self.nodes_init - num_stoped_nodes :self.nodes_init]
     for upgrade_version in self.upgrade_versions:
         self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for upgrade to {0} version".\
                    format(upgrade_version))
         for server in upgrade_nodes:
             remote = RemoteMachineShellConnection(server)
             remote.stop_server()
             self.sleep(self.sleep_time)
             if self.wait_expire:
                 self.sleep(self.expire_time)
             if self.input.param('remove_manifest_files', False):
                 for file in ['manifest.txt', 'manifest.xml', 'VERSION.txt,']:
                     output, error = remote.execute_command("rm -rf /opt/couchbase/{0}".format(file))
                     remote.log_command_output(output, error)
             if self.input.param('remove_config_files', False):
                 for file in ['config', 'couchbase-server.node', 'ip', 'couchbase-server.cookie']:
                     output, error = remote.execute_command("rm -rf /opt/couchbase/var/lib/couchbase/{0}".format(file))
                     remote.log_command_output(output, error)
                 self.buckets = []
             remote.disconnect()
         upgrade_threads = self._async_update(upgrade_version, upgrade_nodes)
         #wait upgrade statuses
         for upgrade_thread in upgrade_threads:
             upgrade_thread.join()
         success_upgrade = True
         while not self.queue.empty():
             success_upgrade &= self.queue.get()
         if not success_upgrade:
             self.fail("Upgrade failed. See logs above!")
         self.sleep(self.expire_time)
         if self.during_ops:
             if "add_back_failover" in self.during_ops:
                 getattr(self, 'add_back_failover')()
                 self.cluster.rebalance(self.servers[:self.nodes_init], [], [])
             elif "failover" in self.during_ops:
                 self.cluster.rebalance(self.servers[:self.nodes_init], [], [])
                 rem = [server for server in self.servers[:self.nodes_init]
                      if self.failover_node.ip == server.ip and str(self.failover_node.port) == server.port]
                 self.dcp_rebalance_in_offline_upgrade_from_version2_to_version3()
                 self.verification(list(set(self.servers[:self.nodes_init]) - set(rem)))
                 return
         self.dcp_rebalance_in_offline_upgrade_from_version2_to_version3()
         self.verification(self.servers[:self.nodes_init])
         if self.input.param('check_seqno', True):
             self.check_seqno(seqno_expected)
 def backup_merge(self):
     self.log.info("backups before merge: " + str(self.backups))
     self.log.info("number_of_backups_taken before merge: " + str(self.number_of_backups_taken))
     try:
         backup_start = self.backups[int(self.backupset.start) - 1]
     except IndexError:
         backup_start = "{0}{1}".format(self.backups[-1], self.backupset.start)
     try:
         backup_end = self.backups[int(self.backupset.end) - 1]
     except IndexError:
         backup_end = "{0}{1}".format(self.backups[-1], self.backupset.end)
     args = "merge --dir {0} --name {1} --start {2} --end {3}".format(self.backupset.directory, self.backupset.name,
                                                                      backup_start, backup_end)
     remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
     command = "{0}/backup {1}".format(self.cli_command_location, args)
     output, error = remote_client.execute_command(command)
     remote_client.log_command_output(output, error)
     if error or "Merge completed successfully" not in output[0]:
         return False, error, "Merging backup failed"
     del self.backups[self.backupset.start - 1:self.backupset.end]
     command = "ls -tr {0}/{1} | tail -1".format(self.backupset.directory, self.backupset.name)
     o, e = remote_client.execute_command(command)
     if o:
         self.backups.insert(self.backupset.start - 1, o[0])
     self.number_of_backups_taken -= (self.backupset.end - self.backupset.start + 1)
     self.number_of_backups_taken += 1
     self.log.info("backups after merge: " + str(self.backups))
     self.log.info("number_of_backups_taken after merge: " + str(self.number_of_backups_taken))
     return True, output, "Merging backup succeeded"
예제 #8
0
    def _execute_boot_op(self, server):
        try:
            shell = RemoteMachineShellConnection(server)
            if self.boot_op == "warmup":
                shell.set_environment_variable(None, None)
                shell.disconnect()
            elif self.boot_op == "reboot":
                if shell.extract_remote_info().type.lower() == 'windows':
                    o, r = shell.execute_command("shutdown -r -f -t 0")
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} is being stopped".format(server.ip))
                elif shell.extract_remote_info().type.lower() == 'linux':
                    o, r = shell.execute_command("reboot")
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} is being stopped".format(server.ip))

                    time.sleep(self.wait_timeout * 2)
                    shell = RemoteMachineShellConnection(server)
                    command = "/sbin/iptables -F"
                    o, r = shell.execute_command(command)
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} backup".format(server.ip))
        finally:
            self.log.info("Warmed-up server .. ".format(server.ip))
예제 #9
0
    def test_large_file_version(self):
        rest = RestConnection(self.master)
        remote_client = RemoteMachineShellConnection(self.master)
        remote_client.extract_remote_info()

        self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
        self.disable_compaction()
        self._monitor_DB_fragmentation()

        # rename here

        remote_client.stop_couchbase()
        time.sleep(5)
        remote_client.execute_command("cd /opt/couchbase/var/lib/couchbase/data/default;rename .1 .65535 *.1")
        remote_client.execute_command("cd /opt/couchbase/var/lib/couchbase/data/default;rename .2 .65535 *.2")
        remote_client.start_couchbase()

        for i in range(5):
            self.log.info("starting a compaction iteration")
            compaction_task = self.cluster.async_compact_bucket(self.master, self.default_bucket_name)

            compact_run = remote_client.wait_till_compaction_end(rest, self.default_bucket_name, timeout_in_seconds=self.wait_timeout)
            res = compaction_task.result(self.wait_timeout)


        if compact_run:
            self.log.info("auto compaction run successfully")
        else:
            self.fail("auto compaction does not run")

        remote_client.disconnect()
예제 #10
0
 def tearDown(self):
     try:
         self._cluster_helper.shutdown()
         log = logger.Logger.get_logger()
         log.info("==============  tearDown was started for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
         RemoteUtilHelper.common_basic_setup(self._servers)
         log.info("10 seconds delay to wait for membase-server to start")
         time.sleep(10)
         for server in self._cleanup_nodes:
             shell = RemoteMachineShellConnection(server)
             o, r = shell.execute_command("iptables -F")
             shell.log_command_output(o, r)
             o, r = shell.execute_command("/sbin/iptables -A INPUT -p tcp -i eth0 --dport 1000:60000 -j ACCEPT")
             shell.log_command_output(o, r)
             o, r = shell.execute_command("/sbin/iptables -A OUTPUT -p tcp -o eth0 --dport 1000:60000 -j ACCEPT")
             shell.log_command_output(o, r)
             o, r = shell.execute_command("/etc/init.d/couchbase-server start")
             shell.log_command_output(o, r)
             shell.disconnect()
         BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
         ClusterOperationHelper.cleanup_cluster(self._servers)
         ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self)
         log.info("==============  tearDown was finished for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
     finally:
         pass
예제 #11
0
    def test_verify_mb21369(self):
        repeat = self._input.param("repeat", 5)
        load_tasks = self.setup_xdcr_async_load()

        conn = RemoteMachineShellConnection(self.src_cluster.get_master_node())
        output, error = conn.execute_command("netstat -an | grep " + self.src_cluster.get_master_node().ip
                                             + ":11210 | wc -l")
        conn.log_command_output(output, error)
        before = output[0]
        self.log.info("No. of memcached connections before: {0}".format(output[0]))

        for i in range(0, repeat):
            self.src_cluster.pause_all_replications()
            self.sleep(30)
            self.src_cluster.resume_all_replications()

            self.sleep(self._wait_timeout)

            output, error = conn.execute_command("netstat -an | grep " + self.src_cluster.get_master_node().ip
                                                 + ":11210 | wc -l")
            conn.log_command_output(output, error)
            self.log.info("No. of memcached connections in iteration {0}:  {1}".format(i+1, output[0]))
            self.assertLessEqual(abs(int(output[0]) - int(before)), 5, "Number of memcached connections changed beyond allowed limit")

        for task in load_tasks:
            task.result()

        self.log.info("No. of memcached connections did not increase with pausing and resuming replication multiple times")
예제 #12
0
 def setUp(self):
     super(SGConfigTests, self).setUp()
     for server in self.servers:
         if self.case_number == 1:
             with open('pytests/sg/resources/gateway_config_walrus_template.json', 'r') as file:
                 filedata = file.read()
                 filedata = filedata.replace('LOCAL_IP', server.ip)
             with open('pytests/sg/resources/gateway_config_walrus.json', 'w') as file:
                 file.write(filedata)
             shell = RemoteMachineShellConnection(server)
             shell.execute_command("rm -rf {0}/tmp/*".format(self.folder_prefix))
             shell.copy_files_local_to_remote('pytests/sg/resources', '{0}/tmp'.format(self.folder_prefix))
             # will install sg only the first time
             self.install(shell)
             pid = self.is_sync_gateway_process_running(shell)
             self.assertNotEqual(pid, 0)
             exist = shell.file_exists('{0}/tmp/'.format(self.folder_prefix), 'gateway.log')
             self.assertTrue(exist)
             shell.disconnect()
     if self.case_number == 1:
         shutil.copy2('pytests/sg/resources/gateway_config_backup.json', 'pytests/sg/resources/gateway_config.json')
         BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
         self.cluster = Cluster()
         self.cluster.create_default_bucket(self.master, 150)
         task = self.cluster.async_create_sasl_bucket(self.master, 'test_%E-.5', 'password', 150, 1)
         task.result()
         task = self.cluster.async_create_standard_bucket(self.master, 'db', 11219, 150, 1)
         task.result()
예제 #13
0
    def run_failover_operations(self, chosen, failover_reason):
        """ Method to run fail over operations used in the test scenario based on failover reason """
        # Perform Operations relalted to failover
        for node in chosen:
            if failover_reason == 'stop_server':
                self.stop_server(node)
                self.log.info("10 seconds delay to wait for membase-server to shutdown")
                # wait for 5 minutes until node is down
                self.assertTrue(RestHelper(self.rest).wait_for_node_status(node, "unhealthy", 300),
                                    msg="node status is not unhealthy even after waiting for 5 minutes")
            elif failover_reason == "firewall":
                server = [srv for srv in self.servers if node.ip == srv.ip][0]
                RemoteUtilHelper.enable_firewall(server, bidirectional=self.bidirectional)
                status = RestHelper(self.rest).wait_for_node_status(node, "unhealthy", 300)
                if status:
                    self.log.info("node {0}:{1} is 'unhealthy' as expected".format(node.ip, node.port))
                else:
                    # verify iptables on the node if something wrong
                    for server in self.servers:
                        if server.ip == node.ip:
                            shell = RemoteMachineShellConnection(server)
                            info = shell.extract_remote_info()
                            if info.type.lower() == "windows":
                                o, r = shell.execute_command("netsh advfirewall show allprofiles")
                                shell.log_command_output(o, r)
                            else:
                                o, r = shell.execute_command("/sbin/iptables --list")
                                shell.log_command_output(o, r)
                            shell.disconnect()
                    self.rest.print_UI_logs()
                    api = self.rest.baseUrl + 'nodeStatuses'
                    status, content, header = self.rest._http_request(api)
                    json_parsed = json.loads(content)
                    self.log.info("nodeStatuses: {0}".format(json_parsed))
                    self.fail("node status is not unhealthy even after waiting for 5 minutes")

        # define precondition check for failover
        failed_over = self.rest.fail_over(node.id, graceful=self.graceful)

        # Check for negative cases
        if self.graceful and (failover_reason in ['stop_server', 'firewall']):
            if failed_over:
                # MB-10479
                self.rest.print_UI_logs()
            self.assertFalse(failed_over, "Graceful Falover was started for unhealthy node!!! ")
            return
        elif self.gracefulFailoverFail and failed_over:
            """ Check if the fail_over fails as expected """
            self.assertTrue(not failed_over,""" Graceful failover should fail due to not enough replicas """)
            return

        # Check if failover happened as expected or re-try one more time
        if not failed_over:
            self.log.info("unable to failover the node the first time. try again in  60 seconds..")
            # try again in 75 seconds
            self.sleep(75)
            failed_over = self.rest.fail_over(node.id, graceful=self.graceful)
        if self.graceful and (failover_reason not in ['stop_server', 'firewall']):
            reached = RestHelper(self.rest).rebalance_reached()
            self.assertTrue(reached, "rebalance failed for Graceful Failover, stuck or did not completed")
예제 #14
0
파일: install.py 프로젝트: vmx/testrunner
    def initialize(self, params):
#        log = logger.new_logger("Installer")
        start_time = time.time()
        cluster_initialized = False
        server = params["server"]
        remote_client = RemoteMachineShellConnection(params["server"])
        while time.time() < (start_time + (10 * 60)):
            rest = RestConnection(server)
            try:
                rest.init_cluster(username=server.rest_username, password=server.rest_password)
                rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                if server.data_path:
                    time.sleep(3)
                    # Make sure that data_path is writable by couchbase user
                    #remote_client.stop_couchbase()
                    remote_client.execute_command('rm -rf {0}/*'.format(server.data_path))
                    remote_client.execute_command("chown -R couchbase:couchbase {0}".format(server.data_path))
                    rest.set_data_path(data_path=server.data_path)
                    # Symlink data-dir to custom path
                    #remote_client.execute_command('mv /opt/couchbase/var {0}'.format(server.data_path))
                    #remote_client.execute_command('ln -s {0}/var /opt/couchbase/var'.format(server.data_path))
                    #remote_client.execute_command("chown -h couchbase:couchbase /opt/couchbase/var")
                    #remote_client.start_couchbase()
                    time.sleep(3)
                cluster_initialized = True
                break
            except ServerUnavailableException:
                log.error("error happened while initializing the cluster @ {0}".format(server.ip))
            log.info('sleep for 5 seconds before trying again ...')
            time.sleep(5)
        if not cluster_initialized:
            raise Exception("unable to initialize membase node")
예제 #15
0
    def replication_while_rebooting_a_non_master_destination_node(self):
        self.set_xdcr_param("xdcrFailureRestartInterval", 1)

        self._load_all_buckets(self.src_master, self.gen_create, "create", 0)
        self._async_modify_data()
        self.sleep(self._timeout)

        i = len(self.dest_nodes) - 1
        shell = RemoteMachineShellConnection(self.dest_nodes[i])
        type = shell.extract_remote_info().type.lower()
        if type == 'windows':
            o, r = shell.execute_command("shutdown -r -f -t 0")
        elif type == 'linux':
            o, r = shell.execute_command("reboot")
        shell.log_command_output(o, r)
        shell.disconnect()
        self.sleep(60, "after rebooting node")
        num = 0
        while num < 10:
            try:
                shell = RemoteMachineShellConnection(self.dest_nodes[i])
            except BaseException, e:
                self.log.warn("node {0} is unreachable".format(self.dest_nodes[i].ip))
                self.sleep(60, "still can't connect to node")
                num += 1
            else:
                break
예제 #16
0
    def setUp(self):



        self._cleanup_nodes = []
        self._failed_nodes = []
        super(LWW_EP_Engine, self).setUp()


        # need to enable set drift counter and get adjusted time for the clients. This is only enabled for the
        # XDCR user so we need to do a bit of a hack by using sed to edit the rbac.json file
        # TODO: implement for Windows

        if self.master.ip != '127.0.0.1' and not LWW_EP_Engine.have_modified_rbac_file:
            # first stop the servers
            for s in self.servers:
                self.stop_server(s)

            CMD =  'sed -i -e \'s/"SET_WITH_META",/"SET_WITH_META","SET_DRIFT_COUNTER_STATE","GET_ADJUSTED_TIME",/\' /opt/couchbase/etc/security/rbac.json'
            # do the sed thing
            for s in self.servers:
                shell = RemoteMachineShellConnection(s)
                shell.execute_command(CMD)
            for s in self.servers:
                self.start_server(s)

            LWW_EP_Engine.have_modified_rbac_file = True
예제 #17
0
 def load_with_dir(self, generators_load, exp=0, flag=0,
          kv_store=1, only_store_hash=True, batch_size=1, pause_secs=1,
          timeout_secs=30, op_type='create', start_items=0):
     gens_load = {}
     for bucket in self.buckets:
         tmp_gen = []
         for generator_load in generators_load:
             tmp_gen.append(copy.deepcopy(generator_load))
         gens_load[bucket] = copy.deepcopy(tmp_gen)
     items = 0
     for gen_load in gens_load[self.buckets[0]]:
             items += (gen_load.end - gen_load.start)
     shell = RemoteMachineShellConnection(self.master)
     try:
         for bucket in self.buckets:
             self.log.info("%s %s to %s documents..." % (op_type, items, bucket.name))
             self.log.info("Delete directory's content %s/data/default/%s ..." % (self.directory, bucket.name))
             shell.execute_command('rm -rf %s/data/default/*' % self.directory)
             self.log.info("Create directory %s/data/default/%s..." % (self.directory, bucket.name))
             shell.execute_command('mkdir -p %s/data/default/%s' % (self.directory, bucket.name))
             self.log.info("Load %s documents to %s/data/default/%s..." % (items, self.directory, bucket.name))
             for gen_load in gens_load:
                 for i in xrange(gen_load.end):
                     key, value = gen_load.next()
                     out = shell.execute_command("echo '%s' > %s/data/default/%s/%s.json" % (value, self.directory,
                                                                                             bucket.name, key))
             self.log.info("LOAD IS FINISHED")
     finally:
         shell.disconnect()
     self.num_items = items + start_items
     self.log.info("LOAD IS FINISHED")
 def backup_cluster(self):
     args = "backup --archive {0} --repo {1} {6} http://{2}:{3} --username {4} --password {5}".format(
         self.backupset.directory,
         self.backupset.name,
         self.backupset.cluster_host.ip,
         self.backupset.cluster_host.port,
         self.backupset.cluster_host_username,
         self.backupset.cluster_host_password,
         self.cluster_flag,
     )
     if self.backupset.resume:
         args += " --resume"
     if self.backupset.purge:
         args += " --purge"
     if self.no_progress_bar:
         args += " --no-progress-bar"
     remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
     command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, args)
     output, error = remote_client.execute_command(command)
     remote_client.log_command_output(output, error)
     if error or "Backup successfully completed" not in output[0]:
         return output, error
     command = "ls -tr {0}/{1} | tail -1".format(self.backupset.directory, self.backupset.name)
     o, e = remote_client.execute_command(command)
     if o:
         self.backups.append(o[0])
     self.number_of_backups_taken += 1
     self.log.info("Finished taking backup  with args: {0}".format(args))
     return output, error
 def tearDown(self):
     super(EnterpriseBackupRestoreBase, self).tearDown()
     if not self.input.param("skip_cleanup", False):
         remote_client = RemoteMachineShellConnection(self.input.clusters[1][0])
         info = remote_client.extract_remote_info().type.lower()
         if info == 'linux' or info == 'mac':
             backup_directory = "/tmp/entbackup"
             validation_files_location = "/tmp/backuprestore"
         elif info == 'windows':
             backup_directory = testconstants.WIN_TMP_PATH + "entbackup"
             validation_files_location = testconstants.WIN_TMP_PATH + "backuprestore"
         else:
             raise Exception("OS not supported.")
         command = "rm -rf {0}".format(backup_directory)
         output, error = remote_client.execute_command(command)
         remote_client.log_command_output(output, error)
         if info == 'linux':
             command = "rm -rf /cbqe3043/entbackup".format(backup_directory)
             output, error = remote_client.execute_command(command)
             remote_client.log_command_output(output, error)
         if self.input.clusters:
             for key in self.input.clusters.keys():
                 servers = self.input.clusters[key]
                 self.backup_reset_clusters(servers)
         if os.path.exists(validation_files_location):
             shutil.rmtree(validation_files_location)
예제 #20
0
 def mutate_and_check_error404(self, n=1):
     # get vb0 active source node
     active_src_node = self.get_active_vb0_node(self.src_master)
     shell = RemoteMachineShellConnection(active_src_node)
     os_type = shell.extract_remote_info().distribution_type
     if os_type.lower() == 'windows':
         trace_log = "C:/Program Files/Couchbase/Server/var/lib/couchbase/logs/xdcr_trace.log"
     else:
         trace_log = "/opt/couchbase/var/lib/couchbase/logs/xdcr_trace.*"
     num_404_errors_before_load, error = shell.execute_command("grep \"error,404\" {} | wc -l"
                                                                  .format(trace_log))
     num_get_remote_bkt_failed_before_load, error = shell.execute_command("grep \"get_remote_bucket_failed\" \"{}\" | wc -l"
                                                                  .format(trace_log))
     self.log.info("404 errors: {}, get_remote_bucket_failed errors : {}".
                   format(num_404_errors_before_load, num_get_remote_bkt_failed_before_load))
     self.sleep(60)
     self.log.info("################ New mutation:{} ##################".format(self.key_counter+1))
     self.load_one_mutation_into_source_vb0(active_src_node)
     self.sleep(5)
     num_404_errors_after_load, error = shell.execute_command("grep \"error,404\" {} | wc -l"
                                                                  .format(trace_log))
     num_get_remote_bkt_failed_after_load, error = shell.execute_command("grep \"get_remote_bucket_failed\" \"{}\" | wc -l"
                                                                  .format(trace_log))
     self.log.info("404 errors: {}, get_remote_bucket_failed errors : {}".
                   format(num_404_errors_after_load, num_get_remote_bkt_failed_after_load))
     shell.disconnect()
     if (int(num_404_errors_after_load[0]) > int(num_404_errors_before_load[0])) or \
        (int(num_get_remote_bkt_failed_after_load[0]) > int(num_get_remote_bkt_failed_before_load[0])):
         self.log.info("Checkpointing error-404 verified after dest failover/rebalance out")
         return True
     else:
         self.log.info("404 errors on source node before last load : {}, after last node: {}".
                       format(int(num_404_errors_after_load[0]), int(num_404_errors_before_load[0])))
         self.log.error("Checkpoint 404 error NOT recorded at source following dest failover or rebalance!")
예제 #21
0
    def test_folderMisMatchCluster(self):
        auditIns = audit(host=self.master)
        orginalPath = auditIns.getAuditLogPath()
        newPath = originalPath + 'testFolderMisMatch'
        shell = RemoteMachineShellConnection(self.servers[0])
        try:
            shell.create_directory(newPath)
            command = 'chown couchbase:couchbase ' + newPath
            shell.execute_command(command)
        finally:
            shell.disconnect()

        auditIns.setsetAuditLogPath(newPath)

        for server in self.servers:
            rest = RestConnection(sever)
            #Create an Event for Bucket Creation
            expectedResults = {'name':'TestBucket ' + server.ip, 'ram_quota':536870912, 'num_replicas':1,
                                       'replica_index':False, 'eviction_policy':'value_only', 'type':'membase', \
                                       'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", \
                                        "flush_enabled":False, "num_threads":3, "source":source, \
                                       "user":user, "ip":self.ipAddress, "port":57457, 'sessionid':'' }
            rest.create_bucket(expectedResults['name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \
                                       '11211', 'membase', 0, expectedResults['num_threads'], expectedResults['flush_enabled'], 'valueOnly')

            #Check on Events
            try:
                self.checkConfig(self.eventID, self.servers[0], expectedResults)
            except:
                self.log.info ("Issue reading the file at Node {0}".format(server.ip))
예제 #22
0
    def validateTimeStamp(self, actualTime=None):
        try:
            date = actualTime[:10]
            hourMin = actualTime[11:16]
            tempTimeZone = actualTime[-6:]
            shell = RemoteMachineShellConnection(self.host)
            try:
                currDate = shell.execute_command('date +"%Y-%m-%d"')
                currHourMin = shell.execute_command('date +"%H:%M"')
                currTimeZone = shell.execute_command('date +%z')
            finally:
                shell.disconnect()
            log.info (" Matching expected date - currDate {0}; actual Date - {1}".format(currDate[0][0], date))
            log.info (" Matching expected time - currTime {0} ; actual Time - {1}".format(currHourMin[0][0], hourMin))
            if ((date != currDate[0][0])):
                log.info ("Mis-match in values for timestamp - date")
                return False
                #Compare time and minutes, will fail if time is 56 mins or above
            else:
                if ((int((hourMin.split(":"))[0])) != (int((currHourMin[0][0].split(":"))[0]))) or ((int((hourMin.split(":"))[1]) + 10) < (int((currHourMin[0][0].split(":"))[1]))):
                    log.info ("Mis-match in values for timestamp - time")
                    return False
                else:
                    tempTimeZone = tempTimeZone.replace(":", "")
                    if (tempTimeZone != currTimeZone[0][0]):
                        log.info ("Mis-match in value of timezone")
                        return False

        except Exception, e:
            log.info ("Value of execption is {0}".format(e))
            return False
예제 #23
0
    def test_restart_node_with_full_disk(self):
        def _get_disk_usage_percentage(remote_client):
            disk_info = remote_client.get_disk_info()
            percentage = disk_info[1] + disk_info[2];
            for item in percentage.split():
                if "%" in item:
                    self.log.info("disk usage {0}".format(item))
                    return item[:-1]

        remote_client = RemoteMachineShellConnection(self.master)
        output, error = remote_client.execute_command_raw("rm -rf full_disk*", use_channel=True)
        remote_client.log_command_output(output, error)
        percentage = _get_disk_usage_percentage(remote_client)
        try:
            while int(percentage) < 99:
                output, error = remote_client.execute_command("dd if=/dev/zero of=full_disk{0} bs=3G count=1".format(percentage + str(time.time())), use_channel=True)
                remote_client.log_command_output(output, error)
                percentage = _get_disk_usage_percentage(remote_client)
            processes1 = remote_client.get_running_processes()
            output, error = remote_client.execute_command("/etc/init.d/couchbase-server restart", use_channel=True)
            remote_client.log_command_output(output, error)
        finally:
            output, error = remote_client.execute_command_raw("rm -rf full_disk*", use_channel=True)
            remote_client.log_command_output(output, error)
            remote_client.disconnect()
예제 #24
0
 def test_node_reboot(self):
     wait_timeout = 120
     timeout = self.timeout / 2
     status = self.rest.update_autoreprovision_settings(True, 1)
     if not status:
         self.fail('failed to change autoreprovision_settings!')
     self.sleep(5)
     shell = RemoteMachineShellConnection(self.server_fail)
     if shell.extract_remote_info().type.lower() == 'windows':
         o, r = shell.execute_command("shutdown -r -f -t 0")
     elif shell.extract_remote_info().type.lower() == 'linux':
         o, r = shell.execute_command("reboot")
     shell.log_command_output(o, r)
     if shell.extract_remote_info().type.lower() == 'windows':
         time.sleep(wait_timeout * 5)
     else:
         time.sleep(wait_timeout)
     # disable firewall on the node
     shell = RemoteMachineShellConnection(self.server_fail)
     shell.disable_firewall()
     AutoReprovisionBaseTest.wait_for_failover_or_assert(self.master, 0,
                                                         timeout + AutoReprovisionBaseTest.MAX_FAIL_DETECT_TIME,
                                                         self)
     helper = RestHelper(self.rest)
     self.assertTrue(helper.is_cluster_healthy(), "cluster status is not healthy")
     self.assertFalse(helper.is_cluster_rebalanced(), "cluster is balanced")
     self.rest.rebalance(otpNodes=[node.id for node in self.rest.node_statuses()], ejectedNodes=[])
     self.assertTrue(self.rest.monitorRebalance())
     buckets = self.rest.get_buckets()
     for bucket in buckets:
         self.verify_loaded_data(self.master, bucket.name, self.loaded_items[bucket.name])
예제 #25
0
    def test_non_default_case_sensitive_same_port(self):
        postfix = uuid.uuid4()
        name = 'uppercase_{0}'.format(postfix)
        master = self.servers[0]
        rest = RestConnection(master)
        proxyPort = rest.get_nodes_self().moxi + 100
        shell = RemoteMachineShellConnection(master)
        url = "http://%s:8091/pools/default/buckets" % master.ip
        params = "name=%s&ramQuotaMB=200&authType=none&replicaNumber=1&proxyPort=%s" \
                                                                   % (name, proxyPort)
        cmd = "curl -X POST -u Administrator:password  -d '%s' %s" % (params, url)
        output, error = shell.execute_command(cmd)
        if output and "error" in output[0]:
            self.fail("Fail to create bucket %s" % name)

        msg = 'create_bucket succeeded but bucket {0} does not exist'.format(name)
        self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)

        name = 'UPPERCASE{0}'.format(postfix)
        params = "name=%s&ramQuotaMB=200&authType=none&replicaNumber=1&proxyPort=%s" \
                                                                   % (name, proxyPort)
        cmd = "curl -X POST -u Administrator:password  -d '%s' %s" % (params, url)
        output, error = shell.execute_command(cmd)
        if output and 'port is already in use' not in output[0]:
            self.log.error(output)
            self.fail('create-bucket on same port failed as expected.')
예제 #26
0
    def _reboot_cluster(self, data_set):
        try:
            for server in self.servers[0:self.helper.num_nodes_reboot]:
                shell = RemoteMachineShellConnection(server)
                if shell.extract_remote_info().type.lower() == 'windows':
                    o, r = shell.execute_command("shutdown -r -f -t 0")
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} is being stopped".format(server.ip))
                elif shell.extract_remote_info().type.lower() == 'linux':
                    o, r = shell.execute_command("reboot")
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} is being stopped".format(server.ip))

                    time.sleep(120)
                    shell = RemoteMachineShellConnection(server)
                    command = "/sbin/iptables -F"
                    o, r = shell.execute_command(command)
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} backup".format(server.ip))
        finally:
            self.log.info("Warming-up server ..".format(server.ip))
            time.sleep(100)
예제 #27
0
 def _kill_nodes(self, nodes, servers, bucket_name):
     self.reboot = self.input.param("reboot", True)
     if not self.reboot:
         for node in nodes:
             _node = {
                 "ip": node.ip,
                 "port": node.port,
                 "username": self.servers[0].rest_username,
                 "password": self.servers[0].rest_password,
             }
             node_rest = RestConnection(_node)
             _mc = MemcachedClientHelper.direct_client(_node, bucket_name)
             self.log.info("restarted the node %s:%s" % (node.ip, node.port))
             pid = _mc.stats()["pid"]
             command = 'os:cmd("kill -9 {0} ")'.format(pid)
             self.log.info(command)
             killed = node_rest.diag_eval(command)
             self.log.info("killed ??  {0} ".format(killed))
             _mc.close()
     else:
         for server in servers:
             shell = RemoteMachineShellConnection(server)
             command = "reboot"
             output, error = shell.execute_command(command)
             shell.log_command_output(output, error)
             shell.disconnect()
             time.sleep(self.wait_timeout * 8)
             shell = RemoteMachineShellConnection(server)
             command = "/sbin/iptables -F"
             output, error = shell.execute_command(command)
             shell.log_command_output(output, error)
             shell.disconnect()
예제 #28
0
    def replication_while_rebooting_a_non_master_destination_node(self):
        self._load_all_buckets(self.src_master, self.gen_create, "create", 0)
        self._load_all_buckets(self.dest_master, self.gen_create2, "create", 0)
        self._async_update_delete_data()
        time.sleep(self._timeout)

        i = len(self.dest_nodes) - 1
        shell = RemoteMachineShellConnection(self.dest_nodes[i])
        if shell.extract_remote_info().type.lower() == 'windows':
            o, r = shell.execute_command("shutdown -r -f -t 0")
        elif shell.extract_remote_info().type.lower() == 'linux':
            o, r = shell.execute_command("reboot")
        shell.log_command_output(o, r)
        i = len(self.src_nodes) - 1
        shell = RemoteMachineShellConnection(self.src_nodes[i])
        if shell.extract_remote_info().type.lower() == 'windows':
            o, r = shell.execute_command("shutdown -r -f -t 0")
        elif shell.extract_remote_info().type.lower() == 'linux':
            o, r = shell.execute_command("reboot")
        shell.log_command_output(o, r)
        time.sleep(self._timeout * 2)

        self.merge_buckets(self.src_master, self.dest_master, bidirection=True)

        self.verify_results(verify_src=True)
예제 #29
0
 def createRemoteFolder(self, host, newPath):
     shell = RemoteMachineShellConnection(host)
     try:
         shell.create_directory(newPath)
         command = 'chown couchbase:couchbase ' + newPath
         shell.execute_command(command)
     finally:
         shell.disconnect()
예제 #30
0
 def run_failover_operations_with_ops(self, chosen, failover_reason):
     """ Method to run fail over operations used in the test scenario based on failover reason """
     # Perform Operations relalted to failover
     failed_over = True
     for node in chosen:
         unreachable = False
         if failover_reason == 'stop_server':
             unreachable=True
             self.stop_server(node)
             self.log.info("10 seconds delay to wait for membase-server to shutdown")
             # wait for 5 minutes until node is down
             self.assertTrue(RestHelper(self.rest).wait_for_node_status(node, "unhealthy", 300),
                                 msg="node status is not unhealthy even after waiting for 5 minutes")
         elif failover_reason == "firewall":
             unreachable=True
             self.filter_list.append (node.ip)
             server = [srv for srv in self.servers if node.ip == srv.ip][0]
             RemoteUtilHelper.enable_firewall(server, bidirectional=self.bidirectional)
             status = RestHelper(self.rest).wait_for_node_status(node, "unhealthy", 300)
             if status:
                 self.log.info("node {0}:{1} is 'unhealthy' as expected".format(node.ip, node.port))
             else:
                 # verify iptables on the node if something wrong
                 for server in self.servers:
                     if server.ip == node.ip:
                         shell = RemoteMachineShellConnection(server)
                         info = shell.extract_remote_info()
                         if info.type.lower() == "windows":
                             o, r = shell.execute_command("netsh advfirewall show allprofiles")
                             shell.log_command_output(o, r)
                         else:
                             o, r = shell.execute_command("/sbin/iptables --list")
                             shell.log_command_output(o, r)
                         shell.disconnect()
                 self.rest.print_UI_logs()
                 api = self.rest.baseUrl + 'nodeStatuses'
                 status, content, header = self.rest._http_request(api)
                 json_parsed = json.loads(content)
                 self.log.info("nodeStatuses: {0}".format(json_parsed))
                 self.fail("node status is not unhealthy even after waiting for 5 minutes")
     nodes = self.filter_servers(self.servers,chosen)
     failed_over = self.cluster.async_failover([self.master], failover_nodes = chosen, graceful=self.graceful)
     # Perform Compaction
     compact_tasks = []
     if self.compact:
         for bucket in self.buckets:
             compact_tasks.append(self.cluster.async_compact_bucket(self.master,bucket))
     # Run View Operations
     if self.withViewsOps:
         self.query_and_monitor_view_tasks(nodes)
     # Run mutation operations
     if self.withMutationOps:
         self.run_mutation_operations()
     failed_over.result()
     for task in compact_tasks:
         task.result()
     msg = "rebalance failed while removing failover nodes {0}".format(node.id)
     self.assertTrue(self.rest.monitorRebalance(stop_if_loop=True), msg=msg)
예제 #31
0
 def test_software_version(self):
     """
       This test requires to pass 3 params to run:
         software_name
         software_version
         check_in_file
     """
     self.software_name = self.input.param("software_name", None)
     self.software_version = self.input.param("software_version", None)
     self.check_in_file = self.input.param("check_in_file", "manifest.xml")
     if self.software_name is None or self.software_version is None:
         self.fail("This test needs to pass param 'software_name'\
                                              and software_version to run")
     go_software = ["gocb", "gocbcore"]
     go_sw_in_version = ["5.1.2", "5.5.1"]
     if self.software_name in go_software and \
         (self.cb_version[:5] in go_sw_in_version or 6.0 <= float(self.cb_version[:3])):
         shell = RemoteMachineShellConnection(self.master)
         output, error = shell.execute_command(
             'cat {0}/{1} | grep \'"{2}"\' '.format(self.base_cb_path,
                                                    self.check_in_file,
                                                    self.software_name))
         shell.disconnect()
         found_version = False
         if output:
             self.log.info("\ngocb version: {0} ".format(output))
             for ele in output:
                 if "gocb" in ele and self.software_version in ele:
                     found_version = True
                     self.log.info("software info: {0}".format(ele))
                     break
         if not found_version:
             self.fail("version of {0} does not match as in: {0}"\
                               .format(self.software_name, output))
     else:
         self.log.info(
             "software name/version are not in running cb version")
예제 #32
0
 def test_rotateInterval(self):
     intervalSec = self.input.param("intervalSec", None)
     auditIns = audit(host=self.master)
     rest = RestConnection(self.master)
     originalInt = auditIns.getAuditRotateInterval()
     try:
         firstEventTime = self.getTimeStampForFile(auditIns)
         self.log.info("first time evetn is {0}".format(firstEventTime))
         auditIns.setAuditRotateInterval(intervalSec)
         self.sleep(intervalSec + 20, 'Sleep for log roll over to happen')
         status, content = rest.validateLogin(self.master.rest_username,
                                              self.master.rest_password,
                                              True,
                                              getContent=True)
         self.sleep(120)
         shell = RemoteMachineShellConnection(self.master)
         try:
             hostname = shell.execute_command("hostname")
             archiveFile = hostname[0][
                 0] + '-' + firstEventTime + "-audit.log"
             self.log.info("Archive File Name is {0}".format(archiveFile))
             result = shell.file_exists(auditIns.pathLogFile, archiveFile)
             self.assertTrue(
                 result,
                 "Archive Audit.log is not created on time interval")
             self.log.info(
                 "Validation of archive File created is True, Audit archive File is created {0}"
                 .format(archiveFile))
             result = shell.file_exists(auditIns.pathLogFile,
                                        auditIns.AUDITLOGFILENAME)
             self.assertTrue(
                 result,
                 "Audit.log is not created when memcached server is killed")
         finally:
             shell.disconnect()
     finally:
         auditIns.setAuditRotateInterval(originalInt)
예제 #33
0
    def test_nc_node_reconnect_time_post_cc_kill(self):
        """
        Test to verify the fix for https://issues.couchbase.com/browse/MB-29277
        """
        self.log.info("Add CBAS nodes")
        self.cluster_util.add_node(self.servers[1],
                                   services=["cbas"],
                                   rebalance=True)

        self.log.info("Kill CBAS service on CC")
        service_to_kill = self.input.param("service_to_kill", "cbas")
        shell = RemoteMachineShellConnection(self.cbas_node)
        shell.kill_process(service_to_kill, service_to_kill)
        self.sleep(2, message="Wait for CBAS service to get kill")

        self.log.info(
            "Verify analytics service accepts request in less than 30 seconds after its killed"
        )
        start_time = time.time()
        shell = RemoteMachineShellConnection(self.servers[1])
        cluster_recovered = False
        url = "http://{0}:{1}/analytics/service".format(
            self.servers[1].ip, 8095)
        while time.time() < start_time + 20:
            output, error = shell.execute_command(
                "curl -X POST {0} -u {1}:{2} -d 'statement={3}'".format(
                    url, "Administrator", "password", 'select "1"'))
            if "errors" not in str(output):
                cluster_recovered = True
                end_time = time.time()
                self.log.info("Time taken to recover %s" %
                              (end_time - start_time))
                break
        self.assertTrue(
            cluster_recovered,
            "Cluster failed to recover despite waiting for 20 seconds.")
예제 #34
0
 def validate_backup_create(self):
     """
     Validates that the backup directory is created as expected
     Validates the backup metadata using backup-meta.json
     :return: status and message
     """
     remote_client = RemoteMachineShellConnection(
         self.backupset.backup_host)
     info = remote_client.extract_remote_info().type.lower()
     if info == 'linux' or info == 'mac':
         command = "ls -R {0}/{1}".format(self.backupset.directory,
                                          self.backupset.name)
         o, e = remote_client.execute_command(command)
     elif info == 'windows':
         o = remote_client.list_files("{0}/{1}".format(
             self.backupset.directory, self.backupset.name))
     if not o and len(o) != 2 and o[1] != "backup-meta.json":
         return False, "Backup create did not create backup-meta file."
     remote_client.disconnect()
     files_validations = BackupRestoreFilesValidations(self.backupset)
     status, msg = files_validations.validate_backup_meta_json()
     if status:
         msg += "\nBackup create validation success."
     return status, msg
예제 #35
0
 def test_max_ttl_bucket(self):
     """
         From vulcan, EE bucket has has an option to set --max-ttl, not it CE.
         This test is make sure CE could not create bucket with option --max-ttl
         This test must pass default_bucket=False
     """
     if self.cb_version[:5] not in COUCHBASE_FROM_VULCAN:
         self.log.info("This test only for vulcan and later")
         return
     cmd = 'curl -X POST -u Administrator:password \
                                 http://{0}:8091/pools/default/buckets \
                              -d name=bucket0 \
                              -d maxTTL=100 \
                              -d ramQuotaMB=100 '.format(self.master.ip)
     if self.cli_test:
         cmd = "{0}couchbase-cli bucket-create -c {1}:8091 --username Administrator \
             --password password --bucket bucket0 --bucket-type couchbase \
             --bucket-ramsize 512 --bucket-replica 1 --bucket-priority high \
             --bucket-eviction-policy fullEviction --enable-flush 0 \
             --enable-index-replica 1 --max-ttl 200".format(
             self.bin_path, self.master.ip)
     conn = RemoteMachineShellConnection(self.master)
     output, error = conn.execute_command(cmd)
     conn.log_command_output(output, error)
     mesg = "Max TTL is supported in enterprise edition only"
     if self.cli_test:
         mesg = "Maximum TTL can only be configured on enterprise edition"
     if output and mesg not in str(output[0]):
         self.fail("max ttl feature should not in Community Edition")
     buckets = RestConnection(self.master).get_buckets()
     if buckets:
         for bucket in buckets:
             self.log.info("bucekt in cluser: {0}".format(bucket.name))
             if bucket.name == "bucket0":
                 self.fail("Failed to enforce feature max ttl in CE.")
     conn.disconnect()
예제 #36
0
 def check_fragmentation_using_magma_stats(self, bucket, servers=None):
     result = dict()
     time_end = time.time() + 60 * 5
     if servers is None:
         servers = self.cluster.nodes_in_cluster
     if type(servers) is not list:
         servers = [servers]
     while time.time() < time_end:
         stats = list()
         for server in servers:
             fragmentation_values = list()
             shell = RemoteMachineShellConnection(server)
             output = shell.execute_command(
                 "lscpu | grep 'CPU(s)' | head -1 | awk '{print $2}'"
             )[0][0].split('\n')[0]
             self.log.debug("machine: {} - core(s): {}\
             ".format(server.ip, output))
             for i in range(min(int(output), 64)):
                 grep_field = "rw_{}:magma".format(i)
                 _res = self.get_magma_stats(bucket, [server],
                                             field_to_grep=grep_field)
                 fragmentation_values.append(
                     float(_res[server.ip][grep_field]["Fragmentation"]))
                 stats.append(_res)
             result.update({server.ip: fragmentation_values})
         res = list()
         for value in result.values():
             res.append(max(value))
         if max(res) < float(self.fragmentation) / 100:
             self.log.info("magma stats fragmentation result {} \
             ".format(result))
             return True
     self.log.info("magma stats fragmentation result {} \
     ".format(result))
     self.log.info(stats)
     return False
예제 #37
0
    def retrieve_request_status_using_handle(self, server, handle):
        """
        Retrieves status of a request from /analytics/status endpoint
        """
        shell = RemoteMachineShellConnection(server)

        output, error = shell.execute_command("""curl -v {0}""".format(handle))

        response = ""
        for line in output:
            response = response + line
        if response:
            response = json.loads(response)
        shell.disconnect()

        status = ""
        handle = ""
        if 'status' in response:
            status = response['status']
        if 'handle' in response:
            handle = response['handle']

        self.log.info("status=%s, handle=%s" % (status, handle))
        return status, handle
예제 #38
0
    def test_verify_memcache_connections(self):
        allowed_memcached_conn = self._input.param("allowed_connections", 100)
        max_ops_per_second = self._input.param("max_ops_per_second", 2500)
        min_item_size = self._input.param("min_item_size", 128)
        num_docs = self._input.param("num_docs", 30000)
        # start load, max_ops_per_second is the combined limit for all buckets
        mcsodaLoad = LoadWithMcsoda(self.src_master, num_docs, prefix='')
        mcsodaLoad.cfg["max-ops"] = 0
        mcsodaLoad.cfg["max-ops-per-sec"] = max_ops_per_second
        mcsodaLoad.cfg["exit-after-creates"] = 1
        mcsodaLoad.cfg["min-value-size"] = min_item_size
        mcsodaLoad.cfg["json"] = 0
        mcsodaLoad.cfg["batch"] = 100
        loadDataThread = Thread(target=mcsodaLoad.load_data,
                                  name='mcloader_default')
        loadDataThread.daemon = True
        loadDataThread.start()

        src_remote_shell = RemoteMachineShellConnection(self.src_master)
        machine_type = src_remote_shell.extract_remote_info().type.lower()
        while (loadDataThread.isAlive() and machine_type == 'linux'):
            command = "netstat -lpnta | grep 11210 | grep TIME_WAIT | wc -l"
            output, _ = src_remote_shell.execute_command(command)
            if int(output[0]) > allowed_memcached_conn:
                # stop load
                mcsodaLoad.load_stop()
                loadDataThread.join()
                self.fail("Memcached connections {0} are increased above {1} \
                            on Source node".format(
                                                   allowed_memcached_conn,
                                                   int(output[0])))
            self.sleep(5)

        # stop load
        mcsodaLoad.load_stop()
        loadDataThread.join()
예제 #39
0
    def change_erlang_threads_values(servers,
                                     sync_threads=True,
                                     num_threads='16:16'):
        """Change the the type of sync erlang threads and its value
           sync_threads=True means sync threads +S with default threads number equal 16:16
           sync_threads=False means async threads: +A 16, for instance

        Default: +S 16:16
        """
        log = logger.Logger.get_logger()
        for server in servers:
            sh = RemoteMachineShellConnection(server)
            product = "membase"
            if sh.is_couchbase_installed():
                product = "couchbase"

            sync_type = sync_threads and "S" or "A"

            command = "sed -i 's/+[A,S] .*/+%s %s \\\/g' /opt/%s/bin/%s-server" % \
                 (sync_type, num_threads, product, product)
            o, r = sh.execute_command(command)
            sh.log_command_output(o, r)
            msg = "modified erlang +%s to %s for server %s"
            log.info(msg % (sync_type, num_threads, server.ip))
예제 #40
0
    def test_analytics_request_exceeding_max_request_size_is_rejected(self):
        
        self.log.info("Fetch maxWebRequestSize value")
        status, content, _ = self.cbas_util.fetch_service_parameter_configuration_on_cbas()
        self.assertTrue(status, msg="Failed to fetch configs")
        max_web_request_size = json.loads((content.decode("utf-8")))['maxWebRequestSize']

        self.log.info("Update storageMaxActiveWritableDatasets")
        update_config_map = {"maxWebRequestSize": 1}
        status, _, _ = self.cbas_util.update_service_parameter_configuration_on_cbas(update_config_map)
        self.assertTrue(status, msg="Failed to update maxWebRequestSize")

        self.log.info("Analytics node restart")
        status, _, _ = self.cbas_util.restart_analytics_node_uri(self.cbas_node.ip)
        self.assertTrue(status, msg="Failed to restart analytics node")

        self.log.info("Wait for node to be active")
        self.sleep(30, message="Wait for service to be up")

        self.log.info("Verify request is rejected")
        shell = RemoteMachineShellConnection(self.cbas_node)
        url = "http://{0}:{1}/analytics/service".format(self.cbas_node.ip, 8095)
        _, error = shell.execute_command("curl -v -X POST {0} -u {1}:{2} -d 'statement={3}'".format(url, "Administrator", "password", 'select "a"'))
        self.assertTrue("413 Request Entity Too Large" in str(error), msg="Request must be rejected")
예제 #41
0
class Examine:
    def __init__(self, server):
        self.remote_connection = RemoteMachineShellConnection(server)

    def examine(self, examine_arguments):
        """ Returns an ExamineResults given an ExamineArguments object """
        if not examine_arguments.json:
            raise ValueError(
                "Currently the non-JSON data output from the examine sub-command is not supported for testing."
            )

        output, error, exit_code = self.remote_connection.execute_command(
            examine_arguments.to_command(), get_exit_code=True)

        if exit_code != 0 or not output:
            return None, error

        return ExamineResult.from_output(output[0])

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_value, traceback):
        self.remote_connection.disconnect()
예제 #42
0
 def _couchbase_cli_eventing(self,
                             host,
                             function_name,
                             operation,
                             result,
                             file_name=None,
                             name=True):
     remote_client = RemoteMachineShellConnection(host)
     cmd = "couchbase-cli eventing-function-setup -c {0} -u {1} -p {2} --{3} ".format(
         host.ip, host.rest_username, host.rest_password, operation)
     if name:
         cmd += " --name {0}".format(function_name)
     if file_name:
         cmd += " --file {0}".format(file_name)
     command = "{0}/{1}".format(self.cli_command_location, cmd)
     log.info(command)
     output, error = remote_client.execute_command(command)
     if error or not [x for x in output if result in x]:
         self.fail(
             "couchbase-cli event-setup function {0} failed: {1}".format(
                 operation, output))
     else:
         log.info("couchbase-cli event-setup function {0} succeeded : {1}".
                  format(operation, output))
예제 #43
0
    def disable_firewall(server):
        """Disable firewall to put restriction to replicate items in XDCR.
        @param server: server object to disable firewall
        @param rep_direction: replication direction unidirection/bidirection
        """
        shell = RemoteMachineShellConnection(server)
        shell.info = shell.extract_remote_info()

        if shell.info.type.lower() == "windows":
            output, error = shell.execute_command(
                'netsh advfirewall set publicprofile state off')
            shell.log_command_output(output, error)
            output, error = shell.execute_command(
                'netsh advfirewall set privateprofile state off')
            shell.log_command_output(output, error)
            # for details see RemoteUtilHelper.enable_firewall for windows
            output, error = shell.execute_command(
                'netsh advfirewall firewall delete rule name="block erl.exe in"'
            )
            shell.log_command_output(output, error)
            output, error = shell.execute_command(
                'netsh advfirewall firewall delete rule name="block erl.exe out"'
            )
            shell.log_command_output(output, error)
        else:
            o, r = shell.execute_command("/sbin/iptables --list")
            shell.log_command_output(o, r)
            if not o:
                raise ("Node not reachable yet")


#             o, r = shell.execute_command(
#                 "/sbin/iptables -A INPUT -p tcp -i eth0 --dport 1000:65535 -j ACCEPT")
#             shell.log_command_output(o, r)
#             o, r = shell.execute_command(
#                 "/sbin/iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT")
#             shell.log_command_output(o, r)
#             # self.log.info("enabled firewall on {0}".format(server))
            o, r = shell.execute_command("iptables -F")
            shell.log_command_output(o, r)
        shell.disconnect()
예제 #44
0
 def enable_IPV6_grub_level(self):
     """
     Enable IPV6 at grub level for all nodes in the cluster
     """
     for server in self.servers:
         shell = RemoteMachineShellConnection(server)
         shell.execute_command(
             "sed -i 's/ipv6.disable=1/ipv6.disable=0/' /etc/default/grub")
         shell.execute_command("grub2-mkconfig -o /boot/grub2/grub.cfg")
         shell.reboot_node()
         time.sleep(10)
         shell = RemoteMachineShellConnection(server)
         output, error = shell.execute_command("ifconfig | grep inet6")
         if output == []:
             log.info("Cant enable IPv6")
             log.info(
                 "Output message is {0} and error message is {1}".format(
                     output, error))
         elif output != []:
             log.info("IPv6 Successfully Enabled for {0}".format(server.ip))
         output, error = shell.execute_command("iptables -F")
         shell.disconnect()
예제 #45
0
 def test_permissions(self):
     shell = RemoteMachineShellConnection(self.master)
     info = shell.extract_remote_info()
     if info.type.lower() == 'windows':
         self.log.info('Test is designed for linux only')
         return
     shell.execute_command('chmod 000 %s' % LINUX_CB_PATH)
     self.sleep(10, 'wait for couchbase stopping')
     shell.execute_command('chmod 755 %s' % LINUX_CB_PATH)
     self.sleep(10, 'wait for couchbase start')
     try:
         rest = RestConnection(self.master)
         self.assertTrue(
             RestHelper(rest).is_ns_server_running(timeout_in_seconds=60),
             'NS server is not up')
     except Exception, ex:
         self.log.error('Couchbase is not running')
         shell.execute_command('reboot')
         self.sleep(60, 'wait for reboot of VM')
         rest = RestConnection(self.master)
         self.assertTrue(
             RestHelper(rest).is_ns_server_running(timeout_in_seconds=60),
             'NS server is not up')
         raise ex
예제 #46
0
class CBASErrorValidator(CBASBaseTest):

    def setUp(self):
        super(CBASErrorValidator, self).setUp()

        self.log.info("Read input param : error id")
        self.error_id = self.input.param('error_id', None)
        self.error_response = CBASError(self.error_id).get_error()
        self.log.info("Test to validate error response :\ %s" % self.error_response)

        self.log.info("Create connection")
        self.cbas_util.createConn(self.cb_bucket_name)

        self.log.info("Establish remote connection to CBAS node")
        self.shell = RemoteMachineShellConnection(self.cbas_node)
        self.shell_kv = RemoteMachineShellConnection(self.cluster.master)
        self.cbas_url = "http://{0}:{1}/analytics/service".format(self.cbas_node.ip, 8095)

    def create_dataset_connect_link(self):
        self.log.info("Create dataset on the CBAS")
        self.cbas_util.create_dataset_on_bucket(self.cb_bucket_name, self.cbas_dataset_name)
        
        self.log.info("Connect to Local link")
        self.cbas_util.connect_link()

    def validate_error_response(self, status, errors, expected_error, expected_error_code):
        if errors is None:
            return False
        return self.cbas_util.validate_error_in_response(status, errors, expected_error, expected_error_code)

    """
    test_error_response,default_bucket=True,cb_bucket_name=default,cbas_bucket_name=cbas,cbas_dataset_name=ds
    """
    def test_error_response(self):

        self.log.info("Create dataset and connect link")
        self.create_dataset_connect_link()

        self.log.info("Execute queries and validate error response")
        error_response_mismatch = []
        count = 0

        for error_object in (x for x in CBASError.errors if "run_in_loop" in x):
            count += 1
            self.log.info("-------------------------------------------- Running --------------------------------------------------------------------------------")
            self.log.info(error_object)

            if "param" in error_object:
                status, _, errors, cbas_result, _ = self.cbas_util.execute_parameter_statement_on_cbas_util(error_object["query"], parameters=error_object["param"])

            else:
                time_out = error_object["time_out"] if "time_out" in error_object else 120
                time_unit = error_object["time_unit"] if "time_unit" in error_object else "s"
                status, _, errors, _, _ = self.cbas_util.execute_statement_on_cbas_util(error_object["query"], analytics_timeout=time_out, time_out_unit=time_unit)

            self.log.info(status)
            self.log.info(errors)


            if not self.validate_error_response(status, errors, error_object["msg"], error_object["code"]):
                error_response_mismatch.append([error_object['id']])

            self.log.info("-------------------------------------------- Completed ------------------------------------------------------------------------------")

        self.log.info("Run summary")
        self.log.info("Total:%d Passed:%d Failed:%d" % (count, count - len(error_response_mismatch), len(error_response_mismatch)))
        if len(error_response_mismatch):
            self.log.info(error_response_mismatch)
            self.fail("Failing test error msg/code mismatch.")

    """
    test_error_response_index_name_exist,default_bucket=True,cb_bucket_name=default,cbas_bucket_name=cbas,cbas_dataset_name=ds,error_id=index_already_exist
    """
    def test_error_response_index_name_exist(self):

        self.log.info("Create dataset and connect link")
        self.create_dataset_connect_link()
        
        self.log.info("Disconnect Local link")
        self.assertTrue(self.cbas_util.disconnect_from_bucket(), msg="Failed to disconnect connected bucket")
        
        self.log.info("Create a secondary index")
        self.assertTrue(self.cbas_util.execute_statement_on_cbas_util(self.error_response["query"]), msg="Failed to create secondary index")

        self.log.info("Execute query and validate error response")
        status, _, errors, _, _ = self.cbas_util.execute_statement_on_cbas_util(self.error_response["query"])
        self.validate_error_response(status, errors, self.error_response["msg"], self.error_response["code"])
    
    """
    test_error_response_index_on_meta_fields,default_bucket=True,cb_bucket_name=default,cbas_bucket_name=cbas,cbas_dataset_name=ds,error_id=index_on_meta_id
    test_error_response_index_on_meta_fields,default_bucket=True,cb_bucket_name=default,cbas_bucket_name=cbas,cbas_dataset_name=ds,error_id=index_on_meta_cas
    test_error_response_index_on_meta_fields,default_bucket=True,cb_bucket_name=default,cbas_bucket_name=cbas,cbas_dataset_name=ds,error_id=composite_index_on_meta_and_document_field
    """
    def test_error_response_index_on_meta_fields(self):

        self.log.info("Create dataset and connect link")
        self.create_dataset_connect_link()
        
        self.log.info("Disconnect Local link")
        self.assertTrue(self.cbas_util.disconnect_from_bucket(), msg="Failed to disconnect connected bucket")
        
        self.log.info("Create a secondary index")
        self.assertTrue(self.cbas_util.execute_statement_on_cbas_util(self.error_response["query"]), msg="Failed to create secondary index")

        self.log.info("Execute query and validate error response")
        status, _, errors, _, _ = self.cbas_util.execute_statement_on_cbas_util(self.error_response["query"])
        self.validate_error_response(status, errors, self.error_response["msg"], self.error_response["code"])
    
    """
    test_error_response_user_permission,default_bucket=True,cb_bucket_name=default,cbas_bucket_name=cbas,cbas_dataset_name=ds,error_id=user_permission
    """
    def test_error_response_user_permission(self):
        
        self.log.info("Create dataset and connect link")
        self.create_dataset_connect_link()
        
        self.log.info("Create a user with analytics reader role")
        rbac_util = RbacUtils(self.cluster.master)
        rbac_util._create_user_and_grant_role("reader_admin", "analytics_reader")

        self.log.info("Execute query and validate error response")
        status, _, errors, _, _ = self.cbas_util.execute_statement_on_cbas_util(self.error_response["query"], username="******", password="******")
        self.validate_error_response(status, errors, self.error_response["msg"], self.error_response["code"])
    
    """
    test_error_response_user_unauthorized,default_bucket=True,cb_bucket_name=default,cbas_bucket_name=cbas,cbas_dataset_name=ds,error_id=user_unauthorized
    """
    def test_error_response_user_unauthorized(self):
        
        self.log.info("Create dataset and connect link")
        self.create_dataset_connect_link()
        
        self.log.info("Create remote connection and execute cbas query using curl")
        output, _ = self.shell.execute_command("curl -X POST {0} -u {1}:{2}".format(self.cbas_url, "Administrator", "pass"))

        self.log.info("Execute query and validate error response")
        self.assertTrue(self.error_response["msg"] in str(output), msg="Error message mismatch")
        self.assertTrue(str(self.error_response["code"]) in str(output), msg="Error code mismatch")
    
    """
    test_error_response_connect_link_failed,default_bucket=True,cb_bucket_name=default,cbas_bucket_name=cbas,cbas_dataset_name=ds,error_id=connect_link_fail
    """
    def test_error_response_connect_link_failed(self):

        self.log.info("Create dataset and connect link")
        self.create_dataset_connect_link()

        self.log.info("Delete KV bucket")
        self.assertTrue(self.bucket_util.delete_bucket(
            self.cluster.master, self.bucket_util.buckets[0].name),
            "Bucket deletion failed")

        self.log.info("Execute query and validate error response")
        status, _, errors, _, _ = self.cbas_util.execute_statement_on_cbas_util(self.error_response["query"])
        self.validate_error_response(status, errors, self.error_response["msg"], self.error_response["code"])

    """
    test_error_response_drop_dataverse,default_bucket=True,cb_bucket_name=default,cbas_bucket_name=cbas,cbas_dataset_name=ds,error_id=dataverse_drop_link_connected
    """
    def test_error_response_drop_dataverse(self):

        self.log.info("Create dataverse")
        status, metrics, _, cbas_result, _ = self.cbas_util.execute_statement_on_cbas_util("create dataverse custom")
        self.assertEquals(status, "success", msg="Create dataverse query failed")

        self.log.info("Use dataverse")
        status, metrics, _, cbas_result, _ = self.cbas_util.execute_statement_on_cbas_util("use custom")
        self.assertEquals(status, "success", msg="Use dataverse query failed")

        self.log.info("Create dataset and connect link")
        self.create_dataset_connect_link()

        self.log.info("Execute query and validate error response")
        status, _, errors, _, _ = self.cbas_util.execute_statement_on_cbas_util(self.error_response["query"])
        self.validate_error_response(status, errors, self.error_response["msg"], self.error_response["code"])
    
    """
    test_analytics_service_tmp_unavailable,default_bucket=True,cb_bucket_name=default,cbas_bucket_name=cbas,cbas_dataset_name=ds,error_id=service_unavailable
    """
    def test_analytics_service_tmp_unavailable(self):

        self.log.info("Add CBAS nodes")
        self.cluster_util.add_node(self.servers[1], services=["cbas"], rebalance=False)
        self.cluster_util.add_node(self.cluster.cbas_nodes[0], services=["cbas"], rebalance=True)

        self.log.info("Create dataset and connect link")
        self.create_dataset_connect_link()

        self.log.info("Kill Java process")
        self.shell.execute_command("pkill java")

        self.log.info("Wait until we get into the state analytics service is unavailable")
        service_unavailable = False
        cluster_recovery_time = time.time()
        while time.time() < cluster_recovery_time + 120:
            output, error = self.shell.execute_command("curl -X POST {0} -u {1}:{2} -d 'statement={3}'".format(self.cbas_url, "Administrator", "password", self.error_response["query"]))
            self.log.info(output)
            self.log.info(error)
            if self.error_response["msg"][0] in str(output):
                self.log.info("Hit service unavailable condition")
                service_unavailable = True
                break

        self.log.info("Validate error response")
        self.assertTrue(service_unavailable, msg="Failed to get into the state analytics service is unavailable")
        self.assertTrue(self.error_response["msg"] in str(output), msg="Error message mismatch")
        self.assertTrue(str(self.error_response["code"]) in str(output), msg="Error code mismatch")
    
    """
    test_error_response_rebalance_in_progress,default_bucket=True,cb_bucket_name=default,cbas_bucket_name=cbas,cbas_dataset_name=ds,error_id=rebalance_in_progress,items=10000
    """
    def test_error_response_rebalance_in_progress(self):

        self.log.info("Load documents in KV bucket")
        self.perform_doc_ops_in_all_cb_buckets("create", 0, self.num_items, batch_size=5000)

        self.log.info("Create dataset and connect link")
        self.create_dataset_connect_link()

        self.log.info("Assert document count")
        self.assertTrue(self.cbas_util.validate_cbas_dataset_items_count(self.cbas_dataset_name, self.num_items), msg="Count mismatch on CBAS")

        self.log.info("Rebalance in a cbas node")
        self.cluster_util.add_node(self.cluster.cbas_nodes[0], wait_for_rebalance_completion=False)

        self.log.info("Execute query and validate error response")
        start_time = time.time()
        while time.time() < start_time + 120:
            status, _, errors, _, _ = self.cbas_util.execute_statement_on_cbas_util(self.error_response["query"])
            if errors is not None:
               break 
        self.validate_error_response(status, errors, self.error_response["msg"], self.error_response["code"])

    """
    test_error_response_using_curl,default_bucket=True,cb_bucket_name=default,cbas_bucket_name=cbas,cbas_dataset_name=ds,error_id=job_requirement
    """    
    def test_error_response_using_curl(self):
        
        self.log.info("Create dataset and connect link")
        self.create_dataset_connect_link()
        
        self.log.info("Execute query using CURL")
        output, _ = self.shell.execute_command("curl -X POST {0} -u {1}:{2} -d 'statement={3}'".format(self.cbas_url, "Administrator", "password", self.error_response["query"]))
            
        self.assertTrue(self.error_response["msg"] in str(output), msg="Error message mismatch")
        self.assertTrue(str(self.error_response["code"]) in str(output), msg="Error code mismatch")

    """
    test_error_response_memcached_bucket,default_bucket=False,cb_bucket_name=default,error_id=memcached_bucket
    """
    def test_error_response_memcached_bucket(self):

        self.log.info("create memcached bucket")
        self.shell_kv.execute_command(
            "curl 'http://{0}:8091/pools/default/buckets' --data 'name={1}&bucketType=memcached&ramQuotaMB=100' -u Administrator:password".format(self.cluster.master.ip, self.cb_bucket_name))
        self.log.info("Execute query and validate error response")
        status, _, errors, _, _ = self.cbas_util.execute_statement_on_cbas_util(self.error_response["query"])
        self.validate_error_response(status, errors, self.error_response["msg"], self.error_response["code"])

    """
    test_error_response_index_not_found,default_bucket=True,cb_bucket_name=default,cbas_bucket_name=cbas,cbas_dataset_name=ds,error_id=index_not_found
    """
    def test_error_response_index_not_found(self):

        self.log.info("Create dataset and connect link")
        self.create_dataset_connect_link()

        self.log.info("Disconnect Local link")
        self.assertTrue(self.cbas_util.disconnect_link(), msg="Failed to disconnect connected bucket")

        self.log.info("Execute query and validate error response")
        status, _, errors, _, _ = self.cbas_util.execute_statement_on_cbas_util(self.error_response["query"])
        self.validate_error_response(status, errors, self.error_response["msg"], self.error_response["code"])

    """
    test_error_response_max_writable_dataset_exceeded,default_bucket=True,cb_bucket_name=default,cbas_bucket_name=cbas,cbas_dataset_name=ds,error_id=max_writable_datasets
    """
    def test_error_response_max_writable_dataset_exceeded(self):

        self.log.info("Create dataset and connect link")
        self.create_dataset_connect_link()

        self.log.info("Disconnect Local link")
        self.assertTrue(self.cbas_util.disconnect_from_bucket(), msg="Failed to disconnect Local link")

        self.log.info("Create 8 more datasets on CBAS bucket")
        for i in range(1, 9):
            self.assertTrue(self.cbas_util.create_dataset_on_bucket(self.cb_bucket_name, self.cbas_dataset_name + str(i)),
                            msg="Create dataset %s failed" % self.cbas_dataset_name + str(i))

        self.log.info("Connect back Local link and verify error response for max dataset exceeded")
        status, _, errors, _, _ = self.cbas_util.execute_statement_on_cbas_util(self.error_response["query"])
        self.validate_error_response(status, errors, self.error_response["msg"], self.error_response["code"])

    """
    test_error_response_for_bucket_uuid_change,default_bucket=True,cb_bucket_name=default,cbas_bucket_name=cbas,cbas_dataset_name=ds,error_id=bucket_uuid_change
    """
    def test_error_response_for_bucket_uuid_change(self):

        self.log.info("Create dataset and connect link")
        self.create_dataset_connect_link()

        self.log.info("Disconnect link")
        self.cbas_util.disconnect_link()

        self.log.info("Delete KV bucket")
        self.assertTrue(self.bucket_util.delete_bucket(
            self.cluster.master, self.bucket_util.buckets[0].name),
            "Bucket deletion failed")

        self.log.info("Recreate KV bucket")
        self.bucket_util.create_default_bucket(storage=self.bucket_storage)

        status, _, errors, _, _ = self.cbas_util.execute_statement_on_cbas_util(self.error_response["query"])
        self.validate_error_response(status, errors, self.error_response["msg"], self.error_response["code"])

    """
    test_error_response_no_statement,default_bucket=True,cb_bucket_name=default,cbas_bucket_name=cbas,cbas_dataset_name=ds,error_id=no_statement
    """
    def test_error_response_no_statement(self):

        self.log.info("Create dataset and connect link")
        self.create_dataset_connect_link()

        self.log.info("Execute query on CBAS")
        output, _ = self.shell.execute_command("curl -X POST {0} -u {1}:{2} ".format(self.cbas_url, "Administrator", "password"))

        self.assertTrue(self.error_response["msg"] in str(output), msg="Error message mismatch")
        self.assertTrue(str(self.error_response["code"]) in str(output), msg="Error code mismatch")

    """
    test_error_response_type_mismatch_object,default_bucket=True,cb_bucket_name=default,cbas_bucket_name=cbas,cbas_dataset_name=ds,error_id=type_mismatch_for_object
    """
    def test_error_response_type_mismatch_object(self):
        self.log.info("Create a reference to SDK client")
        client = SDKClient(hosts=[self.cluster.master.ip], bucket=self.cb_bucket_name, password=self.cluster.master.rest_password)

        self.log.info("Insert documents in KV bucket")
        documents = ['{"address":{"city":"NY"}}']
        client.insert_json_documents("id-", documents)

        self.log.info("Create dataset and connect link")
        self.create_dataset_connect_link()

        status, _, errors, _, _ = self.cbas_util.execute_statement_on_cbas_util(self.error_response["query"])
        self.validate_error_response(status, errors, self.error_response["msg"], self.error_response["code"])

    """
    test_error_response_for_kv_bucket_delete,default_bucket=True,cb_bucket_name=default,cbas_bucket_name=cbas,cbas_dataset_name=ds,error_id=kv_bucket_does_not_exist
    """
    def test_error_response_for_kv_bucket_delete(self):

        self.log.info("Create dataset and connect link")
        self.create_dataset_connect_link()

        self.log.info("Disconnect link")
        self.cbas_util.disconnect_link()

        self.log.info("Delete KV bucket")
        self.assertTrue(self.bucket_util.delete_bucket(
            self.cluster.master, self.bucket_util.buckets[0].name),
            "Bucket deletion failed")

        status, _, errors, _, _ = self.cbas_util.execute_statement_on_cbas_util(self.error_response["query"])
        self.validate_error_response(status, errors, self.error_response["msg"], self.error_response["code"])

    def tearDown(self):
        super(CBASErrorValidator, self).tearDown()
    def run_failover_operations(self, chosen, failover_reason):
        """ Method to run fail over operations used in the test scenario based on failover reason """
        # Perform Operations relalted to failover
        graceful_count = 0
        graceful_failover = True
        failed_over = True
        for node in chosen:
            unreachable = False
            if failover_reason == 'stop_server':
                unreachable = True
                self.stop_server(node)
                self.log.info("10 seconds delay to wait for membase-server to shutdown")
                # wait for 5 minutes until node is down
                self.assertTrue(RestHelper(self.rest).wait_for_node_status(node, "unhealthy", self.wait_timeout * 10),
                                    msg="node status is not unhealthy even after waiting for 5 minutes")
            elif failover_reason == "firewall":
                unreachable = True
                self.filter_list.append (node.ip)
                server = [srv for srv in self.servers if node.ip == srv.ip][0]
                RemoteUtilHelper.enable_firewall(server, bidirectional=self.bidirectional)
                status = RestHelper(self.rest).wait_for_node_status(node, "unhealthy", self.wait_timeout * 10)
                if status:
                    self.log.info("node {0}:{1} is 'unhealthy' as expected".format(node.ip, node.port))
                else:
                    # verify iptables on the node if something wrong
                    for server in self.servers:
                        if server.ip == node.ip:
                            shell = RemoteMachineShellConnection(server)
                            info = shell.extract_remote_info()
                            if info.type.lower() == "windows":
                                o, r = shell.execute_command("netsh advfirewall show allprofiles")
                                shell.log_command_output(o, r)
                            else:
                                o, r = shell.execute_command("/sbin/iptables --list")
                                shell.log_command_output(o, r)
                            shell.disconnect()
                    self.rest.print_UI_logs()
                    api = self.rest.baseUrl + 'nodeStatuses'
                    status, content, header = self.rest._http_request(api)
                    json_parsed = json.loads(content)
                    self.log.info("nodeStatuses: {0}".format(json_parsed))
                    self.fail("node status is not unhealthy even after waiting for 5 minutes")
            # verify the failover type
            if self.check_verify_failover_type:
                graceful_count, graceful_failover = self.verify_failover_type(node, graceful_count, self.num_replicas, unreachable)
            # define precondition check for failover
            success_failed_over = self.rest.fail_over(node.id, graceful=(self.graceful and graceful_failover))
            if self.graceful and graceful_failover:
                if self.stopGracefulFailover or self.killNodes or self.stopNodes or self.firewallOnNodes:
                    self.victim_node_operations(node)
                    # Start Graceful Again
                    self.log.info(" Start Graceful Failover Again !")
                    self.sleep(120)
                    success_failed_over = self.rest.fail_over(node.id, graceful=(self.graceful and graceful_failover))
                    self.sleep(180)
                    msg = "graceful failover failed for nodes {0}".format(node.id)
                    self.log.info("chosen: {0} get_failover_count: {1}".format(len(chosen),
                                                                               self.get_failover_count()))
                    self.assertEqual(len(chosen), self.get_failover_count(), msg=msg)
                else:
                    msg = "rebalance failed while removing failover nodes {0}".format(node.id)
                    self.assertTrue(self.rest.monitorRebalance(stop_if_loop=True), msg=msg)
            failed_over = failed_over and success_failed_over

        # Check for negative cases
        if self.graceful and (failover_reason in ['stop_server', 'firewall']):
            if failed_over:
                # MB-10479
                self.rest.print_UI_logs()
            self.assertFalse(failed_over, "Graceful Falover was started for unhealthy node!!! ")
            return
        elif self.gracefulFailoverFail and not failed_over:
            """ Check if the fail_over fails as expected """
            self.assertFalse(failed_over, """ Graceful failover should fail due to not enough replicas """)
            return

        # Check if failover happened as expected or re-try one more time
        if not failed_over:
            self.log.info("unable to failover the node the first time. try again in  60 seconds..")
            # try again in 75 seconds
            self.sleep(75)
            failed_over = self.rest.fail_over(node.id, graceful=(self.graceful and graceful_failover))
        if self.graceful and (failover_reason not in ['stop_server', 'firewall']):
            reached = RestHelper(self.rest).rebalance_reached()
            self.assertTrue(reached, "rebalance failed for Graceful Failover, stuck or did not completed")

        # Verify Active and Replica Bucket Count
        if self.num_replicas > 0:
            nodes = self.filter_servers(self.servers, chosen)
            self.vb_distribution_analysis(servers=nodes, buckets=self.buckets, std=20.0 , total_vbuckets=self.total_vbuckets, type="failover", graceful=(self.graceful and graceful_failover))
예제 #48
0
 def run(self):
     remote = RemoteMachineShellConnection(self.server)
     server_type = 'membase'
     if remote.is_couchbase_installed():
         server_type = 'couchbase'
     stamp = time.strftime("%d_%m_%Y_%H_%M")
     try:
         info = remote.extract_remote_info()
         if info.type.lower() != 'windows':
             core_files = []
             print("looking for crashes on {0} ... ".format(info.ip))
             print("erl_crash files under /opt/{0}/var/lib/{0}/".format(server_type))
             core_files.extend(remote.file_starts_with("/opt/{0}/var/lib/{0}/".format(server_type), "erl_crash"))
             print("core* files under /opt/{0}/var/lib/{0}/".format(server_type))
             core_files.extend(remote.file_starts_with("/opt/{0}/var/lib/{0}/".format(server_type), "core"))
             print("core* files under /tmp/")
             core_files.extend(remote.file_starts_with("/tmp/", "core"))
             print("breakpad *dmp files under /opt/{0}/var/lib/{0}/".format(server_type))
             core_files.extend(remote.file_ends_with("/opt/{0}/var/lib/{0}/".format(server_type), ".dmp"))
             if core_files:
                 print("found crashes on {0}: {1}".format(info.ip, core_files))
             else:
                 print("crashes not found on {0}".format(info.ip))
             i = 0
             for core_file in core_files:
                 if core_file.find('erl_crash.dump') != -1:
                     #let's just copy that file back
                     erl_crash_file_name = "erlang-{0}-{1}.log".format(self.server.ip, i)
                     remote_path, file_name = os.path.dirname(core_file), os.path.basename(core_file)
                     if remote.get_file(remote_path, file_name, os.path.join(self.path, erl_crash_file_name)):
                         print('downloaded core file : {0}'.format(core_file))
                         i += 1
                 elif core_file.find('.dmp') != -1:
                     breakpad_crash_file_name = "breakpad-{0}-{1}.dmp".format(self.server.ip, i)
                     remote_path, file_name = os.path.dirname(core_file), os.path.basename(core_file)
                     if remote.get_file(remote_path, file_name, os.path.join(self.path, breakpad_crash_file_name)):
                         print('downloaded breakpad .dmp file : {0}'.format(core_file))
                         i += 1
                 else:
                     command = "/opt/{0}/bin/tools/cbanalyze-core".format(server_type)
                     core_file_name = "core-{0}-{1}.log".format(self.server.ip, i)
                     core_log_output = "/tmp/{0}".format(core_file_name)
                     output, _ = remote.execute_command('{0} {1} -f {2}'.format(command, core_file, core_log_output))
                     print(output)
                     remote_path, file_name = os.path.dirname(core_log_output), os.path.basename(core_log_output)
                     if remote.get_file(remote_path, file_name, os.path.join(self.path, core_file_name)):
                         print('downloaded core backtrace : {0}'.format(core_log_output))
                         i += 1
             if i > 0:
                 command = "mkdir -p /tmp/backup_crash/{0};" \
                           "mv -f /tmp/core* /tmp/backup_crash/{0};" \
                           "mv -f /opt/{1}/var/lib/{1}/erl_crash.dump* /tmp/backup_crash/{0}; " \
                           "mv -f /opt/{1}/var/lib/{1}/*.dmp /tmp/backup_crash/{0};" \
                           "mv -f /opt/{1}/var/lib/{1}/crash/*.dmp /tmp/backup_crash/{0};".\
                     format(stamp, server_type)
                 print("put all crashes on {0} in backup folder: /tmp/backup_crash/{1}".format(self.server.ip, stamp))
                 remote.execute_command(command)
                 output, error = remote.execute_command("ls -la /tmp/backup_crash/{0}".format(stamp))
                 for o in output:
                     print(o)
                 remote.disconnect()
                 return True
             if remote:
                 remote.disconnect()
             return False
     except Exception as ex:
         print(ex)
         return False
예제 #49
0
class unidirectional(XDCRNewBaseTest):
    def setUp(self):
        super(unidirectional, self).setUp()
        self.src_cluster = self.get_cb_cluster_by_name('C1')
        self.src_master = self.src_cluster.get_master_node()
        self.dest_cluster = self.get_cb_cluster_by_name('C2')
        self.dest_master = self.dest_cluster.get_master_node()

    def tearDown(self):
        super(unidirectional, self).tearDown()

    def suite_tearDown(self):
        pass

    def suite_setUp(self):
        pass

    """Testing Unidirectional load( Loading only at source) Verifying whether XDCR replication is successful on
    subsequent destination clusters.Create/Update/Delete operations are performed based on doc-ops specified by the user. """

    def load_with_ops(self):
        self.setup_xdcr_and_load()
        self.perform_update_delete()
        self.verify_results()

    """Testing Unidirectional load( Loading only at source) Verifying whether XDCR replication is successful on
    subsequent destination clusters. Create/Update/Delete are performed in parallel- doc-ops specified by the user. """

    def load_with_async_ops(self):
        self.setup_xdcr_and_load()
        self.async_perform_update_delete()
        self.verify_results()

    def load_with_async_ops_diff_data_size(self):
        # Load 1 item with size 1
        # 52 alphabets (small and capital letter)
        self.src_cluster.load_all_buckets(52, value_size=1)
        # Load items with below sizes of 1M
        self.src_cluster.load_all_buckets(5, value_size=1000000)
        # Load items with size 10MB
        # Getting memory issue with 20MB data on VMs.
        self.src_cluster.load_all_buckets(1, value_size=10000000)

        self.verify_results()

    """Testing Unidirectional load( Loading only at source). Failover node at Source/Destination while
    Create/Update/Delete are performed after based on doc-ops specified by the user.
    Verifying whether XDCR replication is successful on subsequent destination clusters. """

    def load_with_ops_with_warmup(self):
        self.setup_xdcr_and_load()
        warmupnodes = []
        if "C1" in self._warmup:
            warmupnodes.append(self.src_cluster.warmup_node())
        if "C2" in self._warmup:
            warmupnodes.append(self.dest_cluster.warmup_node())

        self.sleep(self._wait_timeout)
        self.perform_update_delete()
        self.sleep(self._wait_timeout // 2)

        NodeHelper.wait_warmup_completed(warmupnodes)

        self.verify_results()

    def load_with_ops_with_warmup_master(self):
        self.setup_xdcr_and_load()
        warmupnodes = []
        if "C1" in self._warmup:
            warmupnodes.append(self.src_cluster.warmup_node(master=True))
        if "C2" in self._warmup:
            warmupnodes.append(self.dest_cluster.warmup_node(master=True))

        self.sleep(self._wait_timeout)
        self.perform_update_delete()
        self.sleep(self._wait_timeout // 2)

        NodeHelper.wait_warmup_completed(warmupnodes)

        self.verify_results()

    def load_with_async_ops_with_warmup(self):
        bucket_type = self._input.param("bucket_type", "membase")
        if bucket_type == "ephemeral":
            "Test case does not apply for Ephemeral buckets"
            return
        self.setup_xdcr_and_load()
        warmupnodes = []
        if "C1" in self._warmup:
            warmupnodes.append(self.src_cluster.warmup_node())
        if "C2" in self._warmup:
            warmupnodes.append(self.dest_cluster.warmup_node())

        self.sleep(self._wait_timeout)
        self.async_perform_update_delete()
        self.sleep(self._wait_timeout // 2)

        NodeHelper.wait_warmup_completed(warmupnodes)

        self.verify_results()

    def load_with_async_ops_with_warmup_master(self):
        bucket_type = self._input.param("bucket_type", "membase")
        if bucket_type == "ephemeral":
            "Test case does not apply for Ephemeral buckets"
            return
        self.setup_xdcr_and_load()
        warmupnodes = []
        if "C1" in self._warmup:
            warmupnodes.append(self.src_cluster.warmup_node(master=True))
        if "C2" in self._warmup:
            warmupnodes.append(self.dest_cluster.warmup_node(master=True))

        self.sleep(self._wait_timeout)
        self.async_perform_update_delete()
        self.sleep(self._wait_timeout // 2)

        NodeHelper.wait_warmup_completed(warmupnodes)

        self.sleep(300)

        self.verify_results()

    def load_with_failover(self):
        self.setup_xdcr_and_load()

        if "C1" in self._failover:
            self.src_cluster.failover_and_rebalance_nodes()
        if "C2" in self._failover:
            self.dest_cluster.failover_and_rebalance_nodes()

        self.sleep(self._wait_timeout // 6)
        self.perform_update_delete()

        self.verify_results()

    def load_with_failover_then_add_back(self):

        self.setup_xdcr_and_load()

        if "C1" in self._failover:
            self.src_cluster.failover_and_rebalance_nodes(rebalance=False)
            self.src_cluster.add_back_node()
        if "C2" in self._failover:
            self.dest_cluster.failover_and_rebalance_nodes(rebalance=False)
            self.dest_cluster.add_back_node()

        self.perform_update_delete()

        self.verify_results()

    """Testing Unidirectional load( Loading only at source). Failover node at Source/Destination while
    Create/Update/Delete are performed in parallel based on doc-ops specified by the user.
    Verifying whether XDCR replication is successful on subsequent destination clusters. """

    def load_with_failover_master(self):
        self.setup_xdcr_and_load()

        if "C1" in self._failover:
            self.src_cluster.failover_and_rebalance_master()
        if "C2" in self._failover:
            self.dest_cluster.failover_and_rebalance_master()

        self.sleep(self._wait_timeout // 6)
        self.perform_update_delete()

        self.sleep(300)

        self.verify_results()

    """Testing Unidirectional load( Loading only at source). Failover node at Source/Destination while
    Create/Update/Delete are performed in parallel based on doc-ops specified by the user.
    Verifying whether XDCR replication is successful on subsequent destination clusters. """

    def load_with_async_failover(self):
        self.setup_xdcr_and_load()

        tasks = []
        if "C1" in self._failover:
            tasks.append(self.src_cluster.async_failover())
        if "C2" in self._failover:
            tasks.append(self.dest_cluster.async_failover())

        self.perform_update_delete()
        self.sleep(self._wait_timeout // 4)

        for task in tasks:
            task.result()

        if "C1" in self._failover:
            self.src_cluster.rebalance_failover_nodes()
        if "C2" in self._failover:
            self.dest_cluster.rebalance_failover_nodes()

        self.verify_results()

    """Replication with compaction ddocs and view queries on both clusters.

        This test begins by loading a given number of items on the source cluster.
        It creates num_views as development/production view with default
        map view funcs(_is_dev_ddoc = True by default) on both clusters.
        Then we disabled compaction for ddoc on src cluster. While we don't reach
        expected fragmentation for ddoc on src cluster we update docs and perform
        view queries for all views. Then we start compaction when fragmentation
        was reached fragmentation_value. When compaction was completed we perform
        a full verification: wait for the disk queues to drain
        and then verify that there has been any data loss on all clusters."""
    def replication_with_ddoc_compaction(self):
        bucket_type = self._input.param("bucket_type", "membase")
        if bucket_type == "ephemeral":
            self.log.info("Test case does not apply to ephemeral")
            return

        self.setup_xdcr_and_load()

        num_views = self._input.param("num_views", 5)
        is_dev_ddoc = self._input.param("is_dev_ddoc", True)
        fragmentation_value = self._input.param("fragmentation_value", 80)
        for bucket in self.src_cluster.get_buckets():
            views = Utility.make_default_views(bucket.name, num_views, is_dev_ddoc)

        ddoc_name = "ddoc1"
        prefix = ("", "dev_")[is_dev_ddoc]

        query = {"full_set": "true", "stale": "false"}

        tasks = self.src_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT)
        tasks += self.dest_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT)
        for task in tasks:
            task.result(self._poll_timeout)

        self.src_cluster.disable_compaction()
        fragmentation_monitor = self.src_cluster.async_monitor_view_fragmentation(prefix + ddoc_name, fragmentation_value, BUCKET_NAME.DEFAULT)

        # generate load until fragmentation reached
        while fragmentation_monitor.state != "FINISHED":
            # update docs to create fragmentation
            self.src_cluster.update_delete_data(OPS.UPDATE)
            for view in views:
                # run queries to create indexes
                self.src_cluster.query_view(prefix + ddoc_name, view.name, query)
        fragmentation_monitor.result()

        compaction_task = self.src_cluster.async_compact_view(prefix + ddoc_name, 'default')

        self.assertTrue(compaction_task.result())

        self.verify_results()

    """Replication with disabled/enabled ddoc compaction on source cluster.

        This test begins by loading a given number of items on the source cluster.
        Then we disabled or enabled compaction on both clusters( set via params).
        Then we mutate and delete data on the source cluster 3 times.
        After deletion we recreate deleted items. When data was changed 3 times
        we perform a full verification: wait for the disk queues to drain
        and then verify that there has been no data loss on both all clusters."""
    def replication_with_disabled_ddoc_compaction(self):
        self.setup_xdcr_and_load()

        if "C1" in self._disable_compaction:
            self.src_cluster.disable_compaction()
        if "C2" in self._disable_compaction:
            self.dest_cluster.disable_compaction()

        # perform doc's ops 3 times to increase rev number
        for _ in range(3):
            self.async_perform_update_delete()
            # restore(re-creating) deleted items
            if 'C1' in self._del_clusters:
                c1_kv_gen = self.src_cluster.get_kv_gen()
                gen_delete = copy.deepcopy(c1_kv_gen[OPS.DELETE])
                self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_delete)
                self.sleep(5)

        self.sleep(600)
        self.verify_results()

    def replication_while_rebooting_a_non_master_destination_node(self):
        self.setup_xdcr_and_load()
        self.src_cluster.set_xdcr_param("xdcrFailureRestartInterval", 1)
        self.perform_update_delete()
        self.sleep(self._wait_timeout // 2)
        rebooted_node = self.dest_cluster.reboot_one_node(self)
        NodeHelper.wait_node_restarted(rebooted_node, self, wait_time=self._wait_timeout * 4, wait_if_warmup=True)

        self.verify_results()

    def replication_with_firewall_enabled(self):
        self.src_cluster.set_xdcr_param("xdcrFailureRestartInterval", 1)
        self.setup_xdcr_and_load()
        self.perform_update_delete()

        NodeHelper.enable_firewall(self.dest_master)
        self.sleep(30)
        NodeHelper.disable_firewall(self.dest_master)
        self.verify_results()

    """Testing Unidirectional append ( Loading only at source) Verifying whether XDCR replication is successful on
    subsequent destination clusters. """

    def test_append(self):
        self.setup_xdcr_and_load()
        self.verify_results()
        loop_count = self._input.param("loop_count", 20)
        for i in range(loop_count):
            self.log.info("Append iteration # %s" % i)
            gen_append = BlobGenerator('loadOne', 'loadOne', self._value_size, end=self._num_items)
            self.src_cluster.load_all_buckets_from_generator(gen_append, ops=OPS.APPEND, batch_size=1)
            self.sleep(self._wait_timeout)
        self.verify_results()

    '''
    This method runs cbcollectinfo tool after setting up uniXDCR and check
    whether the log generated by cbcollectinfo contains xdcr log file or not.
    '''
    def collectinfotest_for_xdcr(self):
        self.load_with_ops()
        self.node_down = self._input.param("node_down", False)
        self.log_filename = self._input.param("file_name", "collectInfo")
        self.shell = RemoteMachineShellConnection(self.src_master)
        self.shell.execute_cbcollect_info("%s.zip" % (self.log_filename))
        from clitest import collectinfotest
        # HACK added self.buckets data member.
        self.buckets = self.src_cluster.get_buckets()
        collectinfotest.CollectinfoTests.verify_results(
            self, self.log_filename
        )

    """ Verify the fix for MB-9548"""
    def verify_replications_deleted_after_bucket_deletion(self):
        self.setup_xdcr_and_load()
        self.verify_results()
        rest_conn = RestConnection(self.src_master)
        replications = rest_conn.get_replications()
        self.assertTrue(replications, "Number of replications should not be 0")
        self.src_cluster.delete_all_buckets()
        self.sleep(60)
        replications = rest_conn.get_replications()
        self.log.info("Replications : %s" % replications)
        self.assertTrue(not replications, "Rest returns replication list even after source bucket is deleted ")

    """ Verify fix for MB-9862"""
    def test_verify_memcache_connections(self):
        allowed_memcached_conn = self._input.param("allowed_connections", 100)
        max_ops_per_second = self._input.param("max_ops_per_second", 2500)
        min_item_size = self._input.param("min_item_size", 128)
        num_docs = self._input.param("num_docs", 30000)
        # start load, max_ops_per_second is the combined limit for all buckets
        mcsodaLoad = LoadWithMcsoda(self.src_master, num_docs, prefix='')
        mcsodaLoad.cfg["max-ops"] = 0
        mcsodaLoad.cfg["max-ops-per-sec"] = max_ops_per_second
        mcsodaLoad.cfg["exit-after-creates"] = 1
        mcsodaLoad.cfg["min-value-size"] = min_item_size
        mcsodaLoad.cfg["json"] = 0
        mcsodaLoad.cfg["batch"] = 100
        loadDataThread = Thread(target=mcsodaLoad.load_data,
                                  name='mcloader_default')
        loadDataThread.daemon = True
        loadDataThread.start()

        src_remote_shell = RemoteMachineShellConnection(self.src_master)
        machine_type = src_remote_shell.extract_remote_info().type.lower()
        while (loadDataThread.isAlive() and machine_type == 'linux'):
            command = "netstat -lpnta | grep 11210 | grep TIME_WAIT | wc -l"
            output, _ = src_remote_shell.execute_command(command)
            if int(output[0]) > allowed_memcached_conn:
                # stop load
                mcsodaLoad.load_stop()
                loadDataThread.join()
                self.fail("Memcached connections {0} are increased above {1} \
                            on Source node".format(
                                                   allowed_memcached_conn,
                                                   int(output[0])))
            self.sleep(5)

        # stop load
        mcsodaLoad.load_stop()
        loadDataThread.join()

    # Test to verify MB-10116
    def verify_ssl_private_key_not_present_in_logs(self):
        zip_file = "%s.zip" % (self._input.param("file_name", "collectInfo"))
        try:
            self.shell = RemoteMachineShellConnection(self.src_master)
            self.load_with_ops()
            self.shell.execute_cbcollect_info(zip_file)
            if self.shell.extract_remote_info().type.lower() != "windows":
                command = "unzip %s" % (zip_file)
                output, error = self.shell.execute_command(command)
                self.shell.log_command_output(output, error)
                if len(error) > 0:
                    raise Exception("unable to unzip the files. Check unzip command output for help")
                cmd = 'grep -R "BEGIN RSA PRIVATE KEY" cbcollect_info*/'
                output, _ = self.shell.execute_command(cmd)
            else:
                cmd = "curl -0 http://{1}:{2}@{0}:8091/diag 2>/dev/null | grep 'BEGIN RSA PRIVATE KEY'".format(
                                                    self.src_master.ip,
                                                    self.src_master.rest_username,
                                                    self.src_master.rest_password)
                output, _ = self.shell.execute_command(cmd)
            self.assertTrue(not output, "XDCR SSL Private Key is found diag logs -> %s" % output)
        finally:
            self.shell.delete_files(zip_file)
            self.shell.delete_files("cbcollect_info*")

    # Buckets States
    def delete_recreate_dest_buckets(self):
        self.setup_xdcr_and_load()

        # Remove destination buckets
        self.dest_cluster.delete_all_buckets()

        # Code for re-create_buckets
        self.create_buckets_on_cluster("C2")

        self._resetup_replication_for_recreate_buckets("C2")

        self.async_perform_update_delete()
        self.verify_results()

    def flush_dest_buckets(self):
        self.setup_xdcr_and_load()

        # flush destination buckets
        self.dest_cluster.flush_buckets()

        self.async_perform_update_delete()
        self.verify_results()

    # Nodes Crashing Scenarios
    def __kill_processes(self, crashed_nodes=[]):
        for node in crashed_nodes:
            try:
                NodeHelper.kill_erlang(node)
            except:
                self.log.info('Could not kill erlang process on node, continuing..')

    def __start_cb_server(self, node):
        shell = RemoteMachineShellConnection(node)
        shell.start_couchbase()
        shell.disconnect()

    def test_node_crash_master(self):
        self.setup_xdcr_and_load()

        crashed_nodes = []
        crash = self._input.param("crash", "").split('-')
        if "C1" in crash:
            crashed_nodes.append(self.src_master)
        if "C2" in crash:
            crashed_nodes.append(self.dest_master)

        self.__kill_processes(crashed_nodes)

        for crashed_node in crashed_nodes:
            self.__start_cb_server(crashed_node)

        bucket_type = self._input.param("bucket_type", "membase")
        if bucket_type == "ephemeral":
            self.sleep(self._wait_timeout)
        else:
            NodeHelper.wait_warmup_completed(crashed_nodes)

        self.async_perform_update_delete()
        self.verify_results()

    # Disaster at site.
    # 1. Crash Source Cluster., Sleep n second
    # 2. Crash Dest Cluster.
    # 3. Wait for Source Cluster to warmup. Load more data and perform mutations on Src.
    # 4. Wait for Dest to warmup.
    # 5. Verify data.
    def test_node_crash_cluster(self):
        self.setup_xdcr_and_load()

        crashed_nodes = []
        crash = self._input.param("crash", "").split('-')
        if "C1" in crash:
            crashed_nodes += self.src_cluster.get_nodes()
            self.__kill_processes(crashed_nodes)
            self.sleep(30)
        if "C2" in crash:
            crashed_nodes += self.dest_cluster.get_nodes()
            self.__kill_processes(crashed_nodes)

        for crashed_node in crashed_nodes:
            self.__start_cb_server(crashed_node)

        bucket_type = self._input.param("bucket_type", "membase")

        if "C1" in crash:
            if bucket_type == "ephemeral":
                self.sleep(self._wait_timeout)
            else:
                NodeHelper.wait_warmup_completed(self.src_cluster.get_nodes())
            gen_create = BlobGenerator('loadTwo', 'loadTwo', self._value_size, end=self._num_items)
            self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.async_perform_update_delete()

        if "C2" in crash:
            if bucket_type == "ephemeral":
                self.sleep(self._wait_timeout)
            else:
                NodeHelper.wait_warmup_completed(self.dest_cluster.get_nodes())

        self.verify_results()

    """ Test if replication restarts 60s after idle xdcr following dest bucket flush """
    def test_idle_xdcr_dest_flush(self):
        self.setup_xdcr_and_load()
        self.verify_results()

        bucket = self.dest_cluster.get_bucket_by_name(BUCKET_NAME.DEFAULT)
        self.dest_cluster.flush_buckets([bucket])

        self.sleep(self._wait_timeout)

        self.verify_results()

    """ Test if replication restarts 60s after idle xdcr following dest bucket recreate """
    def test_idle_xdcr_dest_recreate(self):
        self.setup_xdcr_and_load()
        self.verify_results()

        bucket = self.dest_cluster.get_bucket_by_name(BUCKET_NAME.DEFAULT)
        self.dest_cluster.delete_bucket(BUCKET_NAME.DEFAULT)
        bucket_params=self._create_bucket_params(size=bucket.bucket_size)
        self.dest_cluster.create_default_bucket(bucket_params)
        self.sleep(self._wait_timeout)

        self.verify_results()

    """ Test if replication restarts 60s after idle xdcr following dest failover """
    def test_idle_xdcr_dest_failover(self):
        self.setup_xdcr_and_load()
        self.verify_results()

        self.dest_cluster.failover_and_rebalance_nodes()

        self.sleep(self._wait_timeout)

        self.verify_results()

    def _disable_compression(self):
        shell = RemoteMachineShellConnection(self.src_master)
        for remote_cluster in self.src_cluster.get_remote_clusters():
            for repl in remote_cluster.get_replications():
                src_bucket_name = repl.get_src_bucket().name
                if src_bucket_name in str(repl):
                    repl_id = repl.get_repl_id()
                    repl_id = str(repl_id).replace('/', '%2F')
                    base_url = "http://" + self.src_master.ip + \
                               ":8091/settings/replications/" + repl_id
                    command = "curl -X POST -u Administrator:password " + base_url + \
                              " -d compressionType=" + "None"
                    output, error = shell.execute_command(command)
                    shell.log_command_output(output, error)
        shell.disconnect()

    def test_optimistic_replication(self):
        """Tests with 2 buckets with customized optimisic replication thresholds
           one greater than value_size, other smaller
        """
        from .xdcrnewbasetests import REPL_PARAM
        self.setup_xdcr()
        # To ensure docs size = value_size on target
        self._disable_compression()
        self.load_data_topology()
        self._wait_for_replication_to_catchup()
        for remote_cluster in self.src_cluster.get_remote_clusters():
            for replication in remote_cluster.get_replications():
                src_bucket_name = replication.get_src_bucket().name
                opt_repl_threshold = replication.get_xdcr_setting(REPL_PARAM.OPTIMISTIC_THRESHOLD)
                docs_opt_replicated_stat = 'replications/%s/docs_opt_repd' %replication.get_repl_id()
                opt_replicated = RestConnection(self.src_master).fetch_bucket_xdcr_stats(
                                        src_bucket_name
                                        )['op']['samples'][docs_opt_replicated_stat][-1]
                self.log.info("Bucket: %s, value size: %s, optimistic threshold: %s"
                              " number of mutations optimistically replicated: %s"
                                %(src_bucket_name,
                                  self._value_size,
                                  opt_repl_threshold,
                                  opt_replicated
                                ))
                if self._value_size <= opt_repl_threshold:
                    if opt_replicated == self._num_items:
                        self.log.info("SUCCESS: All keys in bucket %s were optimistically"
                                      " replicated"
                                      %(replication.get_src_bucket().name))
                    else:
                        self.fail("Value size: %s, optimistic threshold: %s,"
                                  " number of docs optimistically replicated: %s"
                          %(self._value_size, opt_repl_threshold, opt_replicated))
                else:
                    if opt_replicated == 0:
                        self.log.info("SUCCESS: No key in bucket %s was optimistically"
                                      " replicated"

                                      %(replication.get_src_bucket().name))
                    else:
                        self.fail("Partial optimistic replication detected!")

    def test_disk_full(self):
        self.setup_xdcr_and_load()
        self.verify_results()

        self.sleep(self._wait_timeout)

        zip_file = "%s.zip" % (self._input.param("file_name", "collectInfo"))
        try:
            for node in [self.src_master, self.dest_master]:
                self.shell = RemoteMachineShellConnection(node)
                self.shell.execute_cbcollect_info(zip_file)
                if self.shell.extract_remote_info().type.lower() != "windows":
                    command = "unzip %s" % (zip_file)
                    output, error = self.shell.execute_command(command)
                    self.shell.log_command_output(output, error)
                    if len(error) > 0:
                        raise Exception("unable to unzip the files. Check unzip command output for help")
                    cmd = 'grep -R "Approaching full disk warning." cbcollect_info*/'
                    output, _ = self.shell.execute_command(cmd)
                else:
                    cmd = "curl -0 http://{1}:{2}@{0}:8091/diag 2>/dev/null | grep 'Approaching full disk warning.'".format(
                                                        self.src_master.ip,
                                                        self.src_master.rest_username,
                                                        self.src_master.rest_password)
                    output, _ = self.shell.execute_command(cmd)
                self.assertNotEqual(len(output), 0, "Full disk warning not generated as expected in %s" % node.ip)
                self.log.info("Full disk warning generated as expected in %s" % node.ip)

                self.shell.delete_files(zip_file)
                self.shell.delete_files("cbcollect_info*")
        except Exception as e:
            self.log.info(e)

    def test_retry_connections_on_errors_before_restart(self):
        """
        CBQE-3373: Do not restart pipeline as soon as connection errors are
        detected, backoff and retry 5 times before trying to restart pipeline.
        """
        passed = False
        # start data load after setting up xdcr
        load_tasks = self.setup_xdcr_async_load()
        goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
                     + '/goxdcr.log*'

        # block port 11210 on target to simulate a connection error
        shell = RemoteMachineShellConnection(self.dest_master)
        out, err = shell.execute_command("/sbin/iptables -A INPUT -p tcp --dport"
                                         " 11210 -j DROP")
        shell.log_command_output(out, err)
        out, err = shell.execute_command("/sbin/iptables -L")
        shell.log_command_output(out, err)

        # complete loading
        for task in load_tasks:
            task.result()

        # wait for goxdcr to detect i/o timeout and try repairing
        self.sleep(self._wait_timeout*5)

        # unblock port 11210 so replication can continue
        out, err = shell.execute_command("/sbin/iptables -D INPUT -p tcp --dport"
                                         " 11210 -j DROP")
        shell.log_command_output(out, err)
        out, err = shell.execute_command("/sbin/iptables -L")
        shell.log_command_output(out, err)
        shell.disconnect()

        # check logs for traces of retry attempts
        for node in self.src_cluster.get_nodes():
            _, count1 = NodeHelper.check_goxdcr_log(
                            node,
                            "Failed to repair connections to target cluster",
                            goxdcr_log)
            _, count2 = NodeHelper.check_goxdcr_log(
                            node,
                            "Failed to set up connections to target cluster",
                            goxdcr_log)
            count = count1 + count2
            if count > 0:
                self.log.info('SUCCESS: We tried to repair connections before'
                              ' restarting pipeline')
                passed = True

        if not passed:
            self.fail("No attempts were made to repair connections on %s before"
                      " restarting pipeline" % self.src_cluster.get_nodes())
        self.verify_results()

    def test_verify_mb19802_1(self):
        load_tasks = self.setup_xdcr_async_load()
        goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
                     + '/goxdcr.log*'

        conn = RemoteMachineShellConnection(self.dest_cluster.get_master_node())
        conn.stop_couchbase()

        for task in load_tasks:
            task.result()

        conn.start_couchbase()
        self.sleep(300)

        for node in self.src_cluster.get_nodes():
            _, count = NodeHelper.check_goxdcr_log(
                            node,
                            "batchGetMeta received fatal error and had to abort",
                            goxdcr_log)
            self.assertEqual(count, 0, "batchGetMeta error message found in " + str(node.ip))
            self.log.info("batchGetMeta error message not found in " + str(node.ip))

        self.verify_results()

    def test_verify_mb19802_2(self):
        load_tasks = self.setup_xdcr_async_load()
        goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
                     + '/goxdcr.log*'

        self.dest_cluster.failover_and_rebalance_master()

        for task in load_tasks:
            task.result()

        for node in self.src_cluster.get_nodes():
            _, count = NodeHelper.check_goxdcr_log(
                            node,
                            "batchGetMeta received fatal error and had to abort",
                            goxdcr_log)
            self.assertEqual(count, 0, "batchGetMeta timed out error message found in " + str(node.ip))
            self.log.info("batchGetMeta error message not found in " + str(node.ip))

        self.sleep(300)
        self.verify_results()

    def test_verify_mb19697(self):
        self.setup_xdcr_and_load()
        goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
                     + '/goxdcr.log*'

        self.src_cluster.pause_all_replications()

        gen = BlobGenerator("C1-", "C1-", self._value_size, end=100000)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.src_cluster.resume_all_replications()
        self._wait_for_replication_to_catchup()

        gen = BlobGenerator("C1-", "C1-", self._value_size, end=100000)
        load_tasks = self.src_cluster.async_load_all_buckets_from_generator(gen)

        self.src_cluster.rebalance_out()

        for task in load_tasks:
            task.result()

        self._wait_for_replication_to_catchup()

        self.src_cluster.rebalance_in()

        gen = BlobGenerator("C1-", "C1-", self._value_size, end=100000)
        load_tasks = self.src_cluster.async_load_all_buckets_from_generator(gen)

        self.src_cluster.failover_and_rebalance_master()

        for task in load_tasks:
            task.result()

        self._wait_for_replication_to_catchup()

        for node in self.src_cluster.get_nodes():
            _, count = NodeHelper.check_goxdcr_log(
                            node,
                            "counter .+ goes backward, maybe due to the pipeline is restarted",
                            goxdcr_log)
            self.assertEqual(count, 0, "counter goes backward, maybe due to the pipeline is restarted "
                                        "error message found in " + str(node.ip))
            self.log.info("counter goes backward, maybe due to the pipeline is restarted "
                                        "error message not found in " + str(node.ip))

        self.sleep(300)
        self.verify_results()

    def test_verify_mb20463(self):
        src_version = NodeHelper.get_cb_version(self.src_cluster.get_master_node())
        if float(src_version[:3]) != 4.5:
            self.log.info("Source cluster has to be at 4.5 for this test")
            return

        servs = self._input.servers[2:4]
        params = {}
        params['num_nodes'] = len(servs)
        params['product'] = 'cb'
        params['version'] = '4.1.2-6088'
        params['vbuckets'] = [1024]
        self.log.info("will install {0} on {1}".format('4.1.2-6088', [s.ip for s in servs]))
        InstallerJob().parallel_install(servs, params)

        if params['product'] in ["couchbase", "couchbase-server", "cb"]:
            success = True
            for server in servs:
                success &= RemoteMachineShellConnection(server).is_couchbase_installed()
                if not success:
                    self.fail("some nodes were not installed successfully on target cluster!")

        self.log.info("4.1.2 installed successfully on target cluster")

        conn = RestConnection(self.dest_cluster.get_master_node())
        conn.add_node(user=self._input.servers[3].rest_username, password=self._input.servers[3].rest_password,
                      remoteIp=self._input.servers[3].ip)
        self.sleep(30)
        conn.rebalance(otpNodes=[node.id for node in conn.node_statuses()])
        self.sleep(30)
        conn.create_bucket(bucket='default', ramQuotaMB=512)

        tasks = self.setup_xdcr_async_load()

        self.sleep(30)

        NodeHelper.enable_firewall(self.dest_master)
        self.sleep(30)
        NodeHelper.disable_firewall(self.dest_master)

        for task in tasks:
            task.result()

        self._wait_for_replication_to_catchup(timeout=600)

        self.verify_results()

    def test_rollback(self):
        bucket = self.src_cluster.get_buckets()[0]
        nodes = self.src_cluster.get_nodes()

        # Stop Persistence on Node A & Node B
        for node in nodes:
            mem_client = MemcachedClientHelper.direct_client(node, bucket)
            mem_client.stop_persistence()

        goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
                     + '/goxdcr.log*'
        self.setup_xdcr()

        self.src_cluster.pause_all_replications()

        gen = BlobGenerator("C1-", "C1-", self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.src_cluster.resume_all_replications()

        # Perform mutations on the bucket
        self.async_perform_update_delete()

        rest1 = RestConnection(self.src_cluster.get_master_node())
        rest2 = RestConnection(self.dest_cluster.get_master_node())

        # Fetch count of docs in src and dest cluster
        _count1 = rest1.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
        _count2 = rest2.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1]

        self.log.info("Before rollback src cluster count = {0} dest cluster count = {1}".format(_count1, _count2))

        # Kill memcached on Node A so that Node B becomes master
        shell = RemoteMachineShellConnection(self.src_cluster.get_master_node())
        shell.kill_memcached()

        # Start persistence on Node B
        mem_client = MemcachedClientHelper.direct_client(nodes[1], bucket)
        mem_client.start_persistence()

        # Failover Node B
        failover_task = self.src_cluster.async_failover()
        failover_task.result()

        # Wait for Failover & rollback to complete
        self.sleep(60)

        # Fetch count of docs in src and dest cluster
        _count1 = rest1.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
        _count2 = rest2.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1]

        self.log.info("After rollback src cluster count = {0} dest cluster count = {1}".format(_count1, _count2))

        self.assertTrue(self.src_cluster.wait_for_outbound_mutations(),
                        "Mutations in source cluster not replicated to target after rollback")
        self.log.info("Mutations in source cluster replicated to target after rollback")

        _, count = NodeHelper.check_goxdcr_log(
                        nodes[0],
                        "Received rollback from DCP stream",
                        goxdcr_log, timeout=60)
        self.assertGreater(count, 0, "rollback did not happen as expected")
        self.log.info("rollback happened as expected")

    def test_verify_mb19181(self):
        load_tasks = self.setup_xdcr_async_load()
        goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0]) \
                     + '/goxdcr.log*'

        self.dest_cluster.failover_and_rebalance_master()

        for task in load_tasks:
            task.result()

        for node in self.src_cluster.get_nodes():
            _, count = NodeHelper.check_goxdcr_log(
                node,
                "Can't move update state from",
                goxdcr_log)
            self.assertEqual(count, 0, "Can't move update state from - error message found in " + str(node.ip))
            self.log.info("Can't move update state from - error message not found in " + str(node.ip))

        self.verify_results()

    def test_verify_mb21369(self):
        repeat = self._input.param("repeat", 5)
        load_tasks = self.setup_xdcr_async_load()

        conn = RemoteMachineShellConnection(self.src_cluster.get_master_node())
        output, error = conn.execute_command("netstat -an | grep " + self.src_cluster.get_master_node().ip
                                             + ":11210 | wc -l")
        conn.log_command_output(output, error)
        before = output[0]
        self.log.info("No. of memcached connections before: {0}".format(output[0]))

        for i in range(0, repeat):
            self.src_cluster.pause_all_replications()
            self.sleep(30)
            self.src_cluster.resume_all_replications()
            self.sleep(self._wait_timeout)
            output, error = conn.execute_command("netstat -an | grep " + self.src_cluster.get_master_node().ip
                                                 + ":11210 | wc -l")
            conn.log_command_output(output, error)
            self.log.info("No. of memcached connections in iteration {0}:  {1}".format(i+1, output[0]))
            if int(output[0]) - int(before) > 5:
                self.fail("Number of memcached connections changed beyond allowed limit")

        for task in load_tasks:
            task.result()

        self.log.info("No. of memcached connections did not increase with pausing and resuming replication multiple times")

    def test_maxttl_setting(self):
        maxttl = int(self._input.param("maxttl", None))
        self.setup_xdcr_and_load()
        self.merge_all_buckets()
        self._wait_for_replication_to_catchup()
        self.sleep(maxttl, "waiting for docs to expire per maxttl properly")
        for bucket in self.src_cluster.get_buckets():
            items = RestConnection(self.src_master).get_active_key_count(bucket)
            self.log.info("Docs in source bucket is {0} after maxttl has elapsed".format(items))
            if items != 0:
                self.fail("Docs in source bucket is not 0 after maxttl has elapsed")
        self._wait_for_replication_to_catchup()
예제 #50
0
    def test_ephemeral_bucket_stats(self):
        shell = RemoteMachineShellConnection(self.master)
        rest = RestConnection(self.servers[0])

        generate_load = BlobGenerator(EphemeralBucketsOOM.KEY_ROOT,
                                      'param2',
                                      self.value_size,
                                      start=0,
                                      end=self.num_items)
        self._load_all_ephemeral_buckets_until_no_more_memory(self.servers[0],
                                                              generate_load,
                                                              "create",
                                                              0,
                                                              self.num_items,
                                                              percentage=0.85)

        self.log.info('Memory almost full. Getting stats...')
        output, error = shell.execute_command(
            "/opt/couchbase/bin/cbstats localhost:11210 -b default"
            " all -u Administrator -p password | grep ephemeral")
        if self.input.param('eviction_policy', 'noEviction') == 'noEviction':
            self.assertEquals([
                ' ep_bucket_type:                                        ephemeral',
                ' ep_dcp_ephemeral_backfill_type:                        buffered',
                ' ep_ephemeral_full_policy:                              fail_new_data',
                ' ep_ephemeral_metadata_purge_age:                       259200',
                ' ep_ephemeral_metadata_purge_interval:                  60'
            ], output)
        else:
            self.assertEquals([
                ' ep_bucket_type:                                        ephemeral',
                ' ep_dcp_ephemeral_backfill_type:                        buffered',
                ' ep_ephemeral_full_policy:                              auto_delete',
                ' ep_ephemeral_metadata_purge_age:                       259200',
                ' ep_ephemeral_metadata_purge_interval:                  60'
            ], output)

        output, error = shell.execute_command(
            "/opt/couchbase/bin/cbstats localhost:11210 -b default "
            "vbucket-details -u Administrator -p password "
            "| grep seqlist_deleted_count")
        self.assertEquals(' vb_0:seqlist_deleted_count:              0',
                          output[0])

        item_count = rest.get_bucket(self.buckets[0]).stats.itemCount
        self.log.info('rest.get_bucket(self.buckets[0]).stats.itemCount: %s' %
                      item_count)
        output, error = shell.execute_command(
            "/opt/couchbase/bin/cbstats localhost:11210 -b default all"
            " -u Administrator -p password | grep curr_items")
        self.log.info(output)
        self.assertEquals(
            ' curr_items:                                            %s' %
            item_count, output[0])

        self.log.info(
            'The number of items when almost reached OOM is {0}'.format(
                item_count))

        mc_client = MemcachedClientHelper.direct_client(
            self.servers[0], self.buckets[0])

        for i in xrange(200):
            key = random.randint(0, 1200)
            mc_client.get(EphemeralBucketsOOM.KEY_ROOT + str(key))

        # load some more, this should trigger some deletes
        # add ~50% of new items
        for i in range(item_count, int(item_count * 1.5)):
            try:
                mc_client.set(EphemeralBucketsOOM.KEY_ROOT + str(i), 0, 0,
                              'a' * self.value_size)
            except:
                if self.input.param('eviction_policy',
                                    'noEviction') == 'noEviction':
                    break
                else:
                    raise
        self.sleep(10)
        item_count = rest.get_bucket(self.buckets[0]).stats.itemCount
        self.log.info("items count after we tried to add +50 per : %s" %
                      item_count)
        output, error = shell.execute_command(
            "/opt/couchbase/bin/cbstats localhost:11210 -b default all"
            " -u Administrator -p password | grep curr_items")
        self.assertEquals(
            ' curr_items:                                            %s' %
            item_count, output[0])

        output, error = shell.execute_command(
            "/opt/couchbase/bin/cbstats localhost:11210 -b default "
            "vbucket-details -u Administrator -p password "
            "| grep seqlist_deleted_count")
        self.log.info(output)
        if self.input.param('eviction_policy', 'noEviction') == 'noEviction':
            self.assertEquals(' vb_0:seqlist_deleted_count:              0',
                              output[0], 'have deleted items!')
        else:
            self.assertTrue(
                int(output[0].replace(
                    ' vb_0:seqlist_deleted_count:              ', '')) > 0,
                'no deleted items!')
예제 #51
0
class EphemeralBackupRestoreTest(EvictionBase):
    def setUp(self):
        super(EvictionBase, self).setUp()
        self.only_store_hash = False
        self.shell = RemoteMachineShellConnection(self.master)

    def tearDown(self):
        super(EvictionBase, self).tearDown()

    def _load_all_buckets(self):
        generate_load = BlobGenerator(EphemeralBucketsOOM.KEY_ROOT,
                                      'param2',
                                      self.value_size,
                                      start=0,
                                      end=self.num_items)
        self._load_all_ephemeral_buckets_until_no_more_memory(self.servers[0],
                                                              generate_load,
                                                              "create",
                                                              0,
                                                              self.num_items,
                                                              percentage=0.80)

    # https://issues.couchbase.com/browse/MB-23992
    def test_backup_restore(self):
        self._load_all_buckets()
        self.shell.execute_command("rm -rf /tmp/backups")
        output, error = self.shell.execute_command(
            "/opt/couchbase/bin/cbbackupmgr config "
            "--archive /tmp/backups --repo example")
        self.log.info(output)
        self.assertEquals(
            'Backup repository `example` created successfully in archive `/tmp/backups`',
            output[0])
        output, error = self.shell.execute_command(
            "/opt/couchbase/bin/cbbackupmgr backup --archive /tmp/backups --repo example "
            "--cluster couchbase://127.0.0.1 --username Administrator --password password"
        )
        self.log.info(output)
        self.assertEquals('Backup successfully completed', output[1])
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        imp_rest = RestConnection(self.master)
        info = imp_rest.get_nodes_self()
        if info.memoryQuota and int(info.memoryQuota) > 0:
            self.quota = info.memoryQuota
        bucket_params = self._create_bucket_params(
            server=self.master,
            size=250,
            bucket_type='ephemeral',
            replicas=self.num_replicas,
            enable_replica_index=self.enable_replica_index,
            eviction_policy=self.eviction_policy)
        self.cluster.create_default_bucket(bucket_params)
        output, error = self.shell.execute_command('ls /tmp/backups/example')
        output, error = self.shell.execute_command(
            "/opt/couchbase/bin/cbbackupmgr restore --archive /tmp/backups"
            " --repo example --cluster couchbase://127.0.0.1 "
            "--username Administrator --password password --start %s" %
            output[0])
        self.log.info(output)
        self.assertEquals('Restore completed successfully', output[1])
        self._verify_all_buckets(self.master)
 def getHostName(self, host):
     shell = RemoteMachineShellConnection(host)
     try:
         return (shell.execute_command("hostname")[0][0])
     except:
         shell.disconnect()
예제 #53
0
 def run(self):
     remote_client = RemoteMachineShellConnection(self.server)
     output, error = remote_client.execute_command(self.command)
     print(self.server.ip)
     print("\n".join(output))
     print("\n".join(error))
예제 #54
0
    def _test_backup_and_restore_from_to_different_buckets(self):
        bucket_before_backup = "bucket_before_backup"
        bucket_after_backup = "bucket_after_backup"
        BucketOperationHelper.create_bucket(serverInfo=self.master,
                                            name=bucket_before_backup,
                                            port=11212,
                                            test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(
            self.master, bucket_before_backup)
        self.assertTrue(ready, "wait_for_memcached failed")

        self.add_nodes_and_rebalance()

        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(
            servers=[self.master],
            name=bucket_before_backup,
            ram_load_ratio=20,
            value_size_distribution=distribution,
            write_only=True,
            moxi=True,
            number_of_threads=2)

        self.log.info("Sleep after data load")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master,
                                                      bucket_before_backup,
                                                      'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master,
                                                      bucket_before_backup,
                                                      'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket_before_backup, node,
                                              self.remote_tmp_folder)
            shell.disconnect()

        BucketOperationHelper.delete_bucket_or_assert(self.master,
                                                      bucket_before_backup,
                                                      self)
        BucketOperationHelper.create_bucket(serverInfo=self.master,
                                            name=bucket_after_backup,
                                            port=11212,
                                            test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(
            self.master, bucket_after_backup)
        self.assertTrue(ready, "wait_for_memcached failed")

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder,
                                               moxi_port=11212)
            time.sleep(10)

        ready = RebalanceHelper.wait_for_stats_on_all(self.master,
                                                      bucket_after_backup,
                                                      'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master,
                                                      bucket_after_backup,
                                                      'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        self.assertTrue(
            BucketOperationHelper.verify_data(self.master,
                                              inserted_keys,
                                              False,
                                              False,
                                              11212,
                                              debug=False,
                                              bucket=bucket_after_backup),
            "Missing keys")
    def test_cbServerOps(self):
        ops = self.input.param("ops", None)
        auditIns = audit(host=self.master)

        #Capture timestamp from first event for filename
        firstEventTime = self.getTimeStampForFile(auditIns)

        shell = RemoteMachineShellConnection(self.master)

        #Kill memcached to check for file roll over and new audit.log
        if (ops == "kill"):
            result = shell.kill_memcached()
            self.sleep(10)

        #Stop CB Server to check for file roll over and new audit.log
        if (ops == 'shutdown'):
            try:
                result = shell.stop_couchbase()
                self.sleep(120, 'Waiting for server to shutdown')
            finally:
                result = shell.start_couchbase()

        #Check for audit.log and for roll over file
        self.sleep(120, 'Waiting for server to start after shutdown')
        rest = RestConnection(self.master)
        #Create an Event for Bucket Creation
        #expectedResults = self.createBucketAudit(self.master, "TestBucketKillShutdown")
        status, content = rest.validateLogin("Administrator", "password", True, getContent=True)
        self.sleep(30)
        result = shell.file_exists(auditIns.pathLogFile, audit.AUDITLOGFILENAME)
        self.assertTrue(result, "Audit.log is not created when memcached server is killed or stopped")
        hostname = shell.execute_command("hostname")

        archiveFile = hostname[0][0] + '-' + firstEventTime + "-audit.log"
        self.log.info ("Archive File expected is {0}".format(auditIns.pathLogFile + archiveFile))
        result = shell.file_exists(auditIns.pathLogFile, archiveFile)
        self.assertTrue(result, "Archive Audit.log is not created when memcached server is killed or stopped")

        #archiveFile = auditIns.currentLogFile + "/" + archiveFile

        if (ops == 'shutdown'):
            expectedResult = {"source":"internal", "user":"******", "id":4097, "name":"shutting down audit daemon", "description":"The audit daemon is being shutdown"}
            data = auditIns.returnEvent(4097, archiveFile)
            flag = True
            for items in data:
                if (items == 'timestamp'):
                    tempFlag = auditIns.validateTimeStamp(data['timestamp'])
                    if (tempFlag is False):
                        flag = False
                else:
                    if (isinstance(data[items], dict)):
                        for seclevel in data[items]:
                            tempValue = expectedResult[seclevel]
                            if data[items][seclevel] == tempValue:
                                self.log.info ('Match Found expected values - {0} -- actual value -- {1} - eventName - {2}'.format(tempValue, data[items][seclevel], seclevel))
                            else:
                                self.log.info ('Mis-Match Found expected values - {0} -- actual value -- {1} - eventName - {2}'.format(tempValue, data[items][seclevel], seclevel))
                                flag = False
                    else:
                        if (data[items] == expectedResult[items]):
                            self.log.info ('Match Found expected values - {0} -- actual value -- {1} - eventName - {2}'.format(expectedResult[items.encode('utf-8')], data[items.encode('utf-8')], items))
                        else:
                            self.log.info ('Mis - Match Found expected values - {0} -- actual value -- {1} - eventName - {2}'.format(expectedResult[items.encode('utf-8')], data[items.encode('utf-8')], items))
                            flag = False
            self.assertTrue(flag, "Shutdown event is not printed")

        expectedResults = {"auditd_enabled":auditIns.getAuditConfigElement('auditd_enabled'),
                           "descriptors_path":self.changePathWindows(auditIns.getAuditConfigElement('descriptors_path')),
                           "log_path":self.changePathWindows(auditIns.getAuditLogPath().strip()[:-2]),
                           'source':'internal', 'user':'******',
                           "rotate_interval":auditIns.getAuditConfigElement('rotate_interval'),
                           "version":1, 'hostname':self.getHostName(self.master)}
        self.checkConfig(self.AUDITCONFIGRELOAD, self.master, expectedResults)
예제 #56
0
    def test_backup_upgrade_restore_default(self):
        if len(self.servers) < 2:
            self.log.error("At least 2 servers required for this test ..")
            return
        original_set = copy.copy(self.servers)
        worker = self.servers[len(self.servers) - 1]
        self.servers = self.servers[:len(self.servers) - 1]
        shell = RemoteMachineShellConnection(self.master)
        o, r = shell.execute_command("cat /opt/couchbase/VERSION.txt")
        fin = o[0]
        shell.disconnect()
        initial_version = self.input.param("initial_version", fin)
        final_version = self.input.param("final_version", fin)
        if initial_version == final_version:
            self.log.error("Same initial and final versions ..")
            return
        if not final_version.startswith('2.0'):
            self.log.error("Upgrade test not set to run from 1.8.1 -> 2.0 ..")
            return
        builds, changes = BuildQuery().get_all_builds()
        product = 'couchbase-server-enterprise'
        #CASE where the worker isn't a 2.0+
        worker_flag = 0
        shell = RemoteMachineShellConnection(worker)
        o, r = shell.execute_command("cat /opt/couchbase/VERSION.txt")
        temp = o[0]
        if not temp.startswith('2.0'):
            worker_flag = 1
        if worker_flag == 1:
            self.log.info(
                "Loading version {0} on worker.. ".format(final_version))
            remote = RemoteMachineShellConnection(worker)
            info = remote.extract_remote_info()
            older_build = BuildQuery().find_build(builds, product,
                                                  info.deliverable_type,
                                                  info.architecture_type,
                                                  final_version)
            remote.stop_couchbase()
            remote.couchbase_uninstall()
            remote.download_build(older_build)
            remote.install_server(older_build)
            remote.disconnect()

        remote_tmp = "{1}/{0}".format("backup", "/root")
        perm_comm = "mkdir -p {0}".format(remote_tmp)
        if not initial_version == fin:
            for server in self.servers:
                remote = RemoteMachineShellConnection(server)
                info = remote.extract_remote_info()
                self.log.info(
                    "Loading version ..  {0}".format(initial_version))
                older_build = BuildQuery().find_build(builds, product,
                                                      info.deliverable_type,
                                                      info.architecture_type,
                                                      initial_version)
                remote.stop_couchbase()
                remote.couchbase_uninstall()
                remote.download_build(older_build)
                remote.install_server(older_build)
                rest = RestConnection(server)
                RestHelper(rest).is_ns_server_running(
                    testconstants.NS_SERVER_TIMEOUT)
                rest.init_cluster(server.rest_username, server.rest_password)
                rest.init_cluster_memoryQuota(
                    memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                remote.disconnect()

        self.common_setUp()
        bucket = "default"
        if len(self.servers) > 1:
            self.add_nodes_and_rebalance()
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        rest.create_bucket(bucket, ramQuotaMB=size)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached_failed")
        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(
            servers=[self.master],
            name=bucket,
            ram_load_ratio=0.5,
            value_size_distribution=distribution,
            moxi=True,
            write_only=True,
            delete_ratio=0.1,
            number_of_threads=2)
        if len(self.servers) > 1:
            rest = RestConnection(self.master)
            self.assertTrue(RebalanceHelper.wait_for_replication(
                rest.get_nodes(), timeout=180),
                            msg="replication did not complete")

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket,
                                                      'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket,
                                                      'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        node = RestConnection(self.master).get_nodes_self()
        shell = RemoteMachineShellConnection(worker)
        o, r = shell.execute_command(perm_comm)
        shell.log_command_output(o, r)
        shell.disconnect()

        #Backup
        #BackupHelper(self.master, self).backup(bucket, node, remote_tmp)
        shell = RemoteMachineShellConnection(worker)
        shell.execute_command(
            "/opt/couchbase/bin/cbbackup http://{0}:{1} {2}".format(
                self.master.ip, self.master.port, remote_tmp))
        shell.disconnect()
        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket,
                                                      self)
        time.sleep(30)

        #Upgrade
        for server in self.servers:
            self.log.info(
                "Upgrading to current version {0}".format(final_version))
            remote = RemoteMachineShellConnection(server)
            info = remote.extract_remote_info()
            new_build = BuildQuery().find_build(builds, product,
                                                info.deliverable_type,
                                                info.architecture_type,
                                                final_version)
            remote.stop_couchbase()
            remote.couchbase_uninstall()
            remote.download_build(new_build)
            remote.install_server(new_build)
            rest = RestConnection(server)
            RestHelper(rest).is_ns_server_running(
                testconstants.NS_SERVER_TIMEOUT)
            rest.init_cluster(server.rest_username, server.rest_password)
            rest.init_cluster_memoryQuota(
                memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            remote.disconnect()
        time.sleep(30)

        #Restore
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        rest.create_bucket(bucket, ramQuotaMB=size)
        ready = BucketOperationHelper.wait_for_memcached(server, bucket)
        self.assertTrue(ready, "wait_for_memcached_failed")
        #BackupHelper(self.master, self).restore(backup_location=remote_tmp, moxi_port=info.moxi)
        shell = RemoteMachineShellConnection(worker)
        shell.execute_command(
            "/opt/couchbase/bin/cbrestore {2} http://{0}:{1} -b {3}".format(
                self.master.ip, self.master.port, remote_tmp, bucket))
        shell.disconnect()
        time.sleep(60)
        keys_exist = BucketOperationHelper.keys_exist_or_assert_in_parallel(
            inserted_keys, self.master, bucket, self, concurrency=4)
        self.assertTrue(keys_exist, msg="unable to verify keys after restore")
        time.sleep(30)
        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket,
                                                      self)
        rest = RestConnection(self.master)
        helper = RestHelper(rest)
        nodes = rest.node_statuses()
        master_id = rest.get_nodes_self().id
        if len(self.servers) > 1:
            removed = helper.remove_nodes(
                knownNodes=[node.id for node in nodes],
                ejectedNodes=[
                    node.id for node in nodes if node.id != master_id
                ],
                wait_for_rebalance=True)

        shell = RemoteMachineShellConnection(worker)
        shell.remove_directory(remote_tmp)
        shell.disconnect()

        self.servers = copy.copy(original_set)
        if initial_version == fin:
            builds, changes = BuildQuery().get_all_builds()
            for server in self.servers:
                remote = RemoteMachineShellConnection(server)
                info = remote.extract_remote_info()
                self.log.info(
                    "Loading version ..  {0}".format(initial_version))
                older_build = BuildQuery().find_build(builds, product,
                                                      info.deliverable_type,
                                                      info.architecture_type,
                                                      initial_version)
                remote.stop_couchbase()
                remote.couchbase_uninstall()
                remote.download_build(older_build)
                remote.install_server(older_build)
                rest = RestConnection(server)
                RestHelper(rest).is_ns_server_running(
                    testconstants.NS_SERVER_TIMEOUT)
                rest.init_cluster(server.rest_username, server.rest_password)
                rest.init_cluster_memoryQuota(
                    memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                remote.disconnect()
예제 #57
0
    def test_use_index_during_warmup(self):
        index_node = self.get_nodes_from_services_map(service_type="index",
                                                      get_all_nodes=False)
        rest = RestConnection(index_node)
        # Change indexer snapshot for a recovery point
        doc = {"indexer.settings.persisted_snapshot.moi.interval": 60000}
        rest.set_index_settings(doc)

        create_index_query = "CREATE INDEX idx ON default(age)"
        create_index_query2 = "CREATE INDEX idx1 ON default(age)"
        create_index_query3 = "CREATE INDEX idx2 ON default(age)"
        create_index_query4 = "CREATE INDEX idx3 ON default(age)"
        create_index_query5 = "CREATE INDEX idx4 ON default(age)"
        try:
            self.n1ql_helper.run_cbq_query(query=create_index_query,
                                           server=self.n1ql_node)
            self.n1ql_helper.run_cbq_query(query=create_index_query2,
                                           server=self.n1ql_node)
            self.n1ql_helper.run_cbq_query(query=create_index_query3,
                                           server=self.n1ql_node)
            self.n1ql_helper.run_cbq_query(query=create_index_query4,
                                           server=self.n1ql_node)
            self.n1ql_helper.run_cbq_query(query=create_index_query5,
                                           server=self.n1ql_node)
        except Exception as ex:
            self.log.info(str(ex))
            self.fail("index creation failed with error : {0}".format(str(ex)))

        self.wait_until_indexes_online()

        rest.set_service_memoryQuota(service='indexMemoryQuota',
                                     memoryQuota=256)

        master_rest = RestConnection(self.master)

        self.shell.execute_cbworkloadgen(master_rest.username,
                                         master_rest.password, 700000, 100,
                                         "default", 1024, '-j')

        index_stats = rest.get_indexer_stats()
        self.log.info(index_stats["indexer_state"])
        self.assertTrue(index_stats["indexer_state"].lower() != 'warmup')

        # Sleep for 60 seconds to allow a snapshot to be created
        self.sleep(60)

        t1 = Thread(target=self.monitor_index_stats,
                    name="monitor_index_stats",
                    args=([index_node, 60]))

        t1.start()

        shell = RemoteMachineShellConnection(index_node)
        output1, error1 = shell.execute_command("killall -9 indexer")

        t1.join()

        use_index_query = "select * from default where age > 30"

        # Results are not garunteed to be accurate so the query successfully running is all we can check
        try:
            results = self.n1ql_helper.run_cbq_query(query=use_index_query,
                                                     server=self.n1ql_node)
        except Exception as ex:
            self.log.info(str(ex))
            self.fail(
                "query should run correctly, an index is available for use")
예제 #58
0
 def test_uninstall_install_server(self):
     """
        This test will test uninstall and install again couchbase server.
        There was an issue that couchbase server does not start in debian
        if we uninstall with only -r flag, not follow by --purge flag
     """
     self.package_type = self.input.param("package_type", "deb")
     if len(self.servers) < 2:
         self.log.info("This test needs 2 or more server to run.")
         return
     debian_systemd = ["ubuntu 16.04", "ubuntu 18.04"]
     if self.os_version not in debian_systemd:
         self.log.info("This test only test in/uninstall server with systemd debian server.")
         return
     shell = RemoteMachineShellConnection(self.servers[1])
     try:
         self.log.info("** Start test debian uninstall with only -r flag **")
         shell.execute_command("dpkg -r couchbase-server")
         shell.execute_command("rm -rf /opt/couchbase")
         self.sleep(10)
         shell.execute_command("dpkg -i /tmp/couchbase-server-en*")
         output, error = shell.execute_command("systemctl list-unit-files |  grep couchbase")
         if output:
             self.log.info("output from list-unit-files |  grep couchbase: \n{0}"
                                                                 .format(output))
             if "masked" in output[0]:
                 self.fail("couchbase server is masked in systemd server => {0}"
                                                             .format(output[0]))
     finally:
         cmd1 = "dpkg -r couchbase-server; dpkg --purge couchbase-server"
         shell.execute_command(cmd1)
         shell.execute_command("rm -rf /opt/couchbase")
         self.sleep(10)
         shell.execute_command("dpkg -i /tmp/couchbase-server-*")
         output, error = shell.execute_command("systemctl list-unit-files |  grep couchbase")
         self.log.info("output from list-unit-files |  grep couchbase: \n{0}"
                                                             .format(output))
         shell.disconnect()
예제 #59
0
 def configure_gomaxprocs(self):
     max_proc = self.input.param("gomaxprocs", None)
     cmd = "export GOMAXPROCS=%s" % max_proc
     for server in self.servers:
         shell_connection = RemoteMachineShellConnection(self.master)
         shell_connection.execute_command(cmd)
예제 #60
0
class QueryTests(BaseTestCase):
    def setUp(self):
        if not self._testMethodName == 'suite_setUp':
            self.skip_buckets_handle = True
        super(QueryTests, self).setUp()
        self.version = self.input.param("cbq_version", "sherlock")
        if self.input.tuq_client and "client" in self.input.tuq_client:
            self.shell = RemoteMachineShellConnection(
                self.input.tuq_client["client"])
        else:
            self.shell = RemoteMachineShellConnection(self.master)
        if not self._testMethodName == 'suite_setUp' and self.input.param(
                "cbq_version", "sherlock") != 'sherlock':
            self._start_command_line_query(self.master)
        self.use_rest = self.input.param("use_rest", True)
        self.max_verify = self.input.param("max_verify", None)
        self.buckets = RestConnection(self.master).get_buckets()
        self.docs_per_day = self.input.param("doc-per-day", 49)
        self.item_flag = self.input.param("item_flag", 4042322160)
        self.n1ql_port = self.input.param("n1ql_port", 8093)
        self.analytics = self.input.param("analytics", False)
        self.dataset = self.input.param("dataset", "default")
        self.primary_indx_type = self.input.param("primary_indx_type", 'GSI')
        self.index_type = self.input.param("index_type", 'GSI')
        self.primary_indx_drop = self.input.param("primary_indx_drop", False)
        self.monitoring = self.input.param("monitoring", False)
        self.isprepared = False
        self.named_prepare = self.input.param("named_prepare", None)
        self.skip_primary_index = self.input.param("skip_primary_index", False)
        self.scan_consistency = self.input.param("scan_consistency",
                                                 'REQUEST_PLUS')
        shell = RemoteMachineShellConnection(self.master)
        type = shell.extract_remote_info().distribution_type
        self.path = testconstants.LINUX_COUCHBASE_BIN_PATH
        if type.lower() == 'windows':
            self.path = testconstants.WIN_COUCHBASE_BIN_PATH
        elif type.lower() == "mac":
            self.path = testconstants.MAC_COUCHBASE_BIN_PATH
        self.threadFailure = False
        if self.primary_indx_type.lower() == "gsi":
            self.gsi_type = self.input.param("gsi_type", 'plasma')
        else:
            self.gsi_type = None
        if self.input.param("reload_data", False):
            if self.analytics:
                self.cluster.rebalance([self.master, self.cbas_node], [],
                                       [self.cbas_node],
                                       services=['cbas'])
            for bucket in self.buckets:
                self.cluster.bucket_flush(self.master,
                                          bucket=bucket,
                                          timeout=self.wait_timeout * 5)
            # Adding sleep after flushing buckets (see CBQE-5838)
            self.sleep(210)
            self.gens_load = self.generate_docs(self.docs_per_day)
            self.load(self.gens_load, flag=self.item_flag)
            if self.analytics:
                self.cluster.rebalance([self.master, self.cbas_node],
                                       [self.cbas_node], [],
                                       services=['cbas'])
        self.gens_load = self.generate_docs(self.docs_per_day)
        if self.input.param("gomaxprocs", None):
            self.configure_gomaxprocs()
        self.gen_results = TuqGenerators(
            self.log, self.generate_full_docs_list(self.gens_load))
        if (self.analytics == False):
            self.create_primary_index_for_3_0_and_greater()
        if (self.analytics):
            self.setup_analytics()
            self.sleep(30, 'wait for analytics setup')

    def suite_setUp(self):
        try:
            self.load(self.gens_load, flag=self.item_flag)
            if not self.input.param("skip_build_tuq", True):
                self._build_tuq(self.master)
            self.skip_buckets_handle = True
            if (self.analytics):
                self.cluster.rebalance([self.master, self.cbas_node],
                                       [self.cbas_node], [],
                                       services=['cbas'])
                self.setup_analytics()
                self.sleep(30, 'wait for analytics setup')
        except:
            self.log.error('SUITE SETUP FAILED')
            self.tearDown()

    def tearDown(self):
        if self._testMethodName == 'suite_tearDown':
            self.skip_buckets_handle = False
        if self.analytics:
            bucket_username = "******"
            bucket_password = "******"
            data = 'use Default ;'
            for bucket in self.buckets:
                data += 'disconnect bucket {0} if connected;'.format(
                    bucket.name)
                data += 'drop dataset {0} if exists;'.format(bucket.name +
                                                             "_shadow")
                data += 'drop bucket {0} if exists;'.format(bucket.name)
            filename = "file.txt"
            f = open(filename, 'w')
            f.write(data)
            f.close()
            url = 'http://{0}:8095/analytics/service'.format(self.cbas_node.ip)
            cmd = 'curl -s --data pretty=true --data-urlencode "*****@*****.**" ' + url + " -u " + bucket_username + ":" + bucket_password
            os.system(cmd)
            os.remove(filename)
        super(QueryTests, self).tearDown()

    def suite_tearDown(self):
        if not self.input.param("skip_build_tuq", False):
            if hasattr(self, 'shell'):
                self.shell.execute_command("killall /tmp/tuq/cbq-engine")
                self.shell.execute_command("killall tuqtng")
                self.shell.disconnect()

##############################################################################################
#
#  Setup Helpers
##############################################################################################

    def setup_analytics(self):
        data = 'use Default;'
        bucket_username = "******"
        bucket_password = "******"
        for bucket in self.buckets:
            data += 'create bucket {0} with {{"bucket":"{0}","nodes":"{1}"}} ;'.format(
                bucket.name, self.master.ip)
            data += 'create shadow dataset {1} on {0}; '.format(
                bucket.name, bucket.name + "_shadow")
            data += 'connect bucket {0} with {{"username":"******","password":"******"}};'.format(
                bucket.name, bucket_username, bucket_password)
        filename = "file.txt"
        f = open(filename, 'w')
        f.write(data)
        f.close()
        url = 'http://{0}:8095/analytics/service'.format(self.cbas_node.ip)
        cmd = 'curl -s --data pretty=true --data-urlencode "*****@*****.**" ' + url + " -u " + bucket_username + ":" + bucket_password
        os.system(cmd)
        os.remove(filename)

    def run_active_requests(self, e, t):
        while not e.isSet():
            logging.debug('wait_for_event_timeout starting')
            event_is_set = e.wait(t)
            logging.debug('event set: %s', event_is_set)
            if event_is_set:
                result = self.run_cbq_query(
                    "select * from system:active_requests")
                self.assertTrue(result['metrics']['resultCount'] == 1)
                requestId = result['requestID']
                result = self.run_cbq_query(
                    'delete from system:active_requests where requestId  =  "%s"'
                    % requestId)
                time.sleep(20)
                result = self.run_cbq_query(
                    'select * from system:active_requests  where requestId  =  "%s"'
                    % requestId)
                self.assertTrue(result['metrics']['resultCount'] == 0)
                result = self.run_cbq_query(
                    "select * from system:completed_requests")
                requestId = result['requestID']
                result = self.run_cbq_query(
                    'delete from system:completed_requests where requestId  =  "%s"'
                    % requestId)
                time.sleep(10)
                result = self.run_cbq_query(
                    'select * from system:completed_requests where requestId  =  "%s"'
                    % requestId)
                self.assertTrue(result['metrics']['resultCount'] == 0)

##############################################################################################
#
#   COMMON FUNCTIONS
##############################################################################################

    def run_query_from_template(self, query_template):
        self.query = self.gen_results.generate_query(query_template)
        expected_result = self.gen_results.generate_expected_result()
        actual_result = self.run_cbq_query()
        return actual_result, expected_result

    def run_query_with_subquery_select_from_template(self, query_template):
        subquery_template = re.sub(r'.*\$subquery\(', '', query_template)
        subquery_template = subquery_template[:subquery_template.rfind(')')]
        keys_num = int(
            re.sub(r'.*KEYS \$', '', subquery_template).replace('KEYS $', ''))
        subquery_full_list = self.generate_full_docs_list(
            gens_load=self.gens_load, keys=self._get_keys(keys_num))
        subquery_template = re.sub(r'USE KEYS.*', '', subquery_template)
        sub_results = TuqGenerators(self.log, subquery_full_list)
        self.query = sub_results.generate_query(subquery_template)
        expected_sub = sub_results.generate_expected_result()
        alias = re.sub(r',.*', '',
                       re.sub(r'.*\$subquery\(.*\)', '', query_template))
        alias = re.sub(r'.*as', '', re.sub(r'FROM.*', '', alias)).strip()
        if not alias:
            alias = '$1'
        for item in self.gen_results.full_set:
            item[alias] = expected_sub[0]
        query_template = re.sub(r',.*\$subquery\(.*\).*%s' % alias,
                                ',%s' % alias, query_template)
        self.query = self.gen_results.generate_query(query_template)
        expected_result = self.gen_results.generate_expected_result()
        actual_result = self.run_cbq_query()
        return actual_result, expected_result

    def run_query_with_subquery_from_template(self, query_template):
        subquery_template = re.sub(r'.*\$subquery\(', '', query_template)
        subquery_template = subquery_template[:subquery_template.rfind(')')]
        subquery_full_list = self.generate_full_docs_list(
            gens_load=self.gens_load)
        sub_results = TuqGenerators(self.log, subquery_full_list)
        self.query = sub_results.generate_query(subquery_template)
        expected_sub = sub_results.generate_expected_result()
        alias = re.sub(r',.*', '',
                       re.sub(r'.*\$subquery\(.*\)', '', query_template))
        alias = re.sub(r'.*as ', '', alias).strip()
        self.gen_results = TuqGenerators(self.log, expected_sub)
        query_template = re.sub(r'\$subquery\(.*\).*%s' % alias, ' %s' % alias,
                                query_template)
        self.query = self.gen_results.generate_query(query_template)
        expected_result = self.gen_results.generate_expected_result()
        actual_result = self.run_cbq_query()
        return actual_result, expected_result

    def negative_common_body(self, queries_errors={}):
        if not queries_errors:
            self.fail("No queries to run!")
        for bucket in self.buckets:
            for query_template, error in queries_errors.items():
                try:
                    query = self.gen_results.generate_query(query_template)
                    actual_result = self.run_cbq_query(
                        query.format(bucket.name))
                except CBQError as ex:
                    self.log.error(ex)
                    self.assertTrue(
                        str(ex).find(error) != -1,
                        "Error is incorrect.Actual %s.\n Expected: %s.\n" %
                        (str(ex).split(':')[-1], error))
                else:
                    self.fail("There were no errors. Error expected: %s" %
                              error)

    def run_cbq_query(self, query=None, min_output_size=10, server=None):
        if query is None:
            query = self.query
        if server is None:
            server = self.master
            if server.ip == "127.0.0.1":
                self.n1ql_port = server.n1ql_port
        else:
            if server.ip == "127.0.0.1":
                self.n1ql_port = server.n1ql_port
            if self.input.tuq_client and "client" in self.input.tuq_client:
                server = self.tuq_client
        query_params = {}
        cred_params = {'creds': []}
        rest = RestConnection(server)
        username = rest.username
        password = rest.password
        cred_params['creds'].append({'user': username, 'pass': password})
        query_params.update(cred_params)
        if self.use_rest:
            query_params.update({'scan_consistency': self.scan_consistency})
            self.log.info('RUN QUERY %s' % query)

            if self.analytics:
                query = query + ";"
                for bucket in self.buckets:
                    query = query.replace(bucket.name, bucket.name + "_shadow")
                result = RestConnection(
                    self.cbas_node).execute_statement_on_cbas(
                        query, "immediate")
                result = json.loads(result)

            else:
                result = rest.query_tool(query,
                                         self.n1ql_port,
                                         query_params=query_params)

        else:
            if self.version == "git_repo":
                output = self.shell.execute_commands_inside("$GOPATH/src/github.com/couchbase/query/" +\
                                                            "shell/cbq/cbq ", "", "", "", "", "", "")
            else:
                os = self.shell.extract_remote_info().type.lower()
                if not (self.isprepared):
                    query = query.replace('"', '\\"')
                    query = query.replace('`', '\\`')

                cmd = "%s/cbq  -engine=http://%s:%s/ -q -u %s -p %s" % (
                    self.path, server.ip, server.port, username, password)

                output = self.shell.execute_commands_inside(
                    cmd, query, "", "", "", "", "")
                if not (output[0] == '{'):
                    output1 = '{' + str(output)
                else:
                    output1 = output
                result = json.loads(output1)
        if isinstance(result, str) or 'errors' in result:
            raise CBQError(result, server.ip)
        self.log.info("TOTAL ELAPSED TIME: %s" %
                      result["metrics"]["elapsedTime"])
        return result

    def build_url(self, version):
        info = self.shell.extract_remote_info()
        type = info.distribution_type.lower()
        if type in ["ubuntu", "centos", "red hat"]:
            url = "https://s3.amazonaws.com/packages.couchbase.com/releases/couchbase-query/dp1/"
            url += "couchbase-query_%s_%s_linux.tar.gz" % (
                version, info.architecture_type)
        #TODO for windows
        return url

    def _build_tuq(self, server):
        if self.version == "git_repo":
            os = self.shell.extract_remote_info().type.lower()
            if os != 'windows':
                goroot = testconstants.LINUX_GOROOT
                gopath = testconstants.LINUX_GOPATH
            else:
                goroot = testconstants.WINDOWS_GOROOT
                gopath = testconstants.WINDOWS_GOPATH
            if self.input.tuq_client and "gopath" in self.input.tuq_client:
                gopath = self.input.tuq_client["gopath"]
            if self.input.tuq_client and "goroot" in self.input.tuq_client:
                goroot = self.input.tuq_client["goroot"]
            cmd = "rm -rf {0}/src/github.com".format(gopath)
            self.shell.execute_command(cmd)
            cmd= 'export GOROOT={0} && export GOPATH={1} &&'.format(goroot, gopath) +\
                ' export PATH=$PATH:$GOROOT/bin && ' +\
                'go get github.com/couchbaselabs/tuqtng;' +\
                'cd $GOPATH/src/github.com/couchbaselabs/tuqtng; ' +\
                'go get -d -v ./...; cd .'
            self.shell.execute_command(cmd)
            cmd = 'export GOROOT={0} && export GOPATH={1} &&'.format(goroot, gopath) +\
                ' export PATH=$PATH:$GOROOT/bin && ' +\
                'cd $GOPATH/src/github.com/couchbaselabs/tuqtng; go build; cd .'
            self.shell.execute_command(cmd)
            cmd = 'export GOROOT={0} && export GOPATH={1} &&'.format(goroot, gopath) +\
                ' export PATH=$PATH:$GOROOT/bin && ' +\
                'cd $GOPATH/src/github.com/couchbaselabs/tuqtng/tuq_client; go build; cd .'
            self.shell.execute_command(cmd)
        else:
            cbq_url = self.build_url(self.version)
            #TODO for windows
            cmd = "cd /tmp; mkdir tuq;cd tuq; wget {0} -O tuq.tar.gz;".format(
                cbq_url)
            cmd += "tar -xvf tuq.tar.gz;rm -rf tuq.tar.gz"
            self.shell.execute_command(cmd)

    def _start_command_line_query(self, server):
        if self.version == "git_repo":
            os = self.shell.extract_remote_info().type.lower()
            if os != 'windows':
                gopath = testconstants.LINUX_GOPATH
            else:
                gopath = testconstants.WINDOWS_GOPATH
            if self.input.tuq_client and "gopath" in self.input.tuq_client:
                gopath = self.input.tuq_client["gopath"]
            if os == 'windows':
                cmd = "cd %s/src/github.com/couchbase/query/server/main; " % (gopath) +\
                "./cbq-engine.exe -datastore http://%s:%s/ >/dev/null 2>&1 &" %(
                                                                server.ip, server.port)
            else:
                cmd = "cd %s/src/github.com/couchbase/query//server/main; " % (gopath) +\
                "./cbq-engine -datastore http://%s:%s/ >n1ql.log 2>&1 &" %(
                                                                server.ip, server.port)
            self.shell.execute_command(cmd)
        elif self.version == "sherlock":
            if self.services_init.find('n1ql') != -1:
                return
            os = self.shell.extract_remote_info().type.lower()
            if os != 'windows':
                couchbase_path = testconstants.LINUX_COUCHBASE_BIN_PATH
            else:
                couchbase_path = testconstants.WIN_COUCHBASE_BIN_PATH
            if self.input.tuq_client and "sherlock_path" in self.input.tuq_client:
                couchbase_path = "%s/bin" % self.input.tuq_client[
                    "sherlock_path"]
            if os == 'windows':
                cmd = "cd %s; " % (couchbase_path) +\
                "./cbq-engine.exe -datastore http://%s:%s/ >/dev/null 2>&1 &" %(
                                                                server.ip, server.port)
            else:
                cmd = "cd %s; " % (couchbase_path) +\
                "./cbq-engine -datastore http://%s:%s/ >n1ql.log 2>&1 &" %(
                                                                server.ip, server.port)
                n1ql_port = self.input.param("n1ql_port", None)
                if server.ip == "127.0.0.1" and server.n1ql_port:
                    n1ql_port = server.n1ql_port
                if n1ql_port:
                    cmd = "cd %s; " % (couchbase_path) +\
                './cbq-engine -datastore http://%s:%s/ -http=":%s">n1ql.log 2>&1 &' %(
                                                                server.ip, server.port, n1ql_port)
            self.shell.execute_command(cmd)
        else:
            os = self.shell.extract_remote_info().type.lower()
            if os != 'windows':
                cmd = "cd /tmp/tuq;./cbq-engine -couchbase http://%s:%s/ >/dev/null 2>&1 &" % (
                    server.ip, server.port)
            else:
                cmd = "cd /cygdrive/c/tuq;./cbq-engine.exe -couchbase http://%s:%s/ >/dev/null 2>&1 &" % (
                    server.ip, server.port)
            self.shell.execute_command(cmd)

    def _parse_query_output(self, output):
        if output.find("cbq>") == 0:
            output = output[output.find("cbq>") + 4:].strip()
        if output.find("tuq_client>") == 0:
            output = output[output.find("tuq_client>") + 11:].strip()
        if output.find("cbq>") != -1:
            output = output[:output.find("cbq>")].strip()
        if output.find("tuq_client>") != -1:
            output = output[:output.find("tuq_client>")].strip()
        return json.loads(output)

    def generate_docs(self, num_items, start=0):
        try:
            return getattr(self, 'generate_docs_' + self.dataset)(num_items,
                                                                  start)
        except:
            self.fail("There is no dataset %s, please enter a valid one" %
                      self.dataset)

    def generate_docs_default(self, docs_per_day, start=0):
        json_generator = JsonGenerator()
        return json_generator.generate_docs_employee(docs_per_day, start)

    def generate_docs_sabre(self, docs_per_day, start=0):
        json_generator = JsonGenerator()
        return json_generator.generate_docs_sabre(docs_per_day, start)

    def generate_docs_employee(self, docs_per_day, start=0):
        json_generator = JsonGenerator()
        return json_generator.generate_docs_employee_data(
            docs_per_day=docs_per_day, start=start)

    def generate_docs_simple(self, docs_per_day, start=0):
        json_generator = JsonGenerator()
        return json_generator.generate_docs_employee_simple_data(
            docs_per_day=docs_per_day, start=start)

    def generate_docs_sales(self, docs_per_day, start=0):
        json_generator = JsonGenerator()
        return json_generator.generate_docs_employee_sales_data(
            docs_per_day=docs_per_day, start=start)

    def generate_docs_bigdata(self, docs_per_day, start=0):
        json_generator = JsonGenerator()
        return json_generator.generate_docs_bigdata(end=(1000 * docs_per_day),
                                                    start=start,
                                                    value_size=self.value_size)

    def _verify_results(self,
                        actual_result,
                        expected_result,
                        missing_count=1,
                        extra_count=1):
        if len(actual_result) != len(expected_result):
            missing, extra = self.check_missing_and_extra(
                actual_result, expected_result)
            self.log.error("Missing items: %s.\n Extra items: %s" %
                           (missing[:missing_count], extra[:extra_count]))
            self.fail(
                "Results are incorrect.Actual num %s. Expected num: %s.\n" %
                (len(actual_result), len(expected_result)))
        if self.max_verify is not None:
            actual_result = actual_result[:self.max_verify]
            expected_result = expected_result[:self.max_verify]

        msg = "Results are incorrect.\n Actual first and last 100:  %s.\n ... \n %s" +\
        "Expected first and last 100: %s.\n  ... \n %s"
        self.assertTrue(
            actual_result == expected_result,
            msg % (actual_result[:100], actual_result[-100:],
                   expected_result[:100], expected_result[-100:]))

    def check_missing_and_extra(self, actual, expected):
        missing = []
        extra = []
        for item in actual:
            if not (item in expected):
                extra.append(item)
        for item in expected:
            if not (item in actual):
                missing.append(item)
        return missing, extra

    def sort_nested_list(self, result):
        actual_result = []
        for item in result:
            curr_item = {}
            for key, value in item.items():
                if isinstance(value, list) or isinstance(value, set):
                    curr_item[key] = sorted(value)
                else:
                    curr_item[key] = value
            actual_result.append(curr_item)
        return actual_result

    def configure_gomaxprocs(self):
        max_proc = self.input.param("gomaxprocs", None)
        cmd = "export GOMAXPROCS=%s" % max_proc
        for server in self.servers:
            shell_connection = RemoteMachineShellConnection(self.master)
            shell_connection.execute_command(cmd)

    def create_primary_index_for_3_0_and_greater(self):
        self.log.info("CREATE PRIMARY INDEX using %s" % self.primary_indx_type)
        rest = RestConnection(self.master)
        versions = rest.get_nodes_versions()
        if versions[0].startswith("4") or versions[0].startswith(
                "3") or versions[0].startswith("5"):
            for bucket in self.buckets:
                if self.primary_indx_drop:
                    self.log.info(
                        "Dropping primary index for %s using %s ..." %
                        (bucket.name, self.primary_indx_type))
                    self.query = "DROP PRIMARY INDEX ON %s USING %s" % (
                        bucket.name, self.primary_indx_type)
                    #self.run_cbq_query()
                    self.sleep(3, 'Sleep for some time after index drop')
                self.query = 'select * from system:indexes where name="#primary" and keyspace_id = "%s"' % bucket.name
                res = self.run_cbq_query()
                self.sleep(10)
                if self.monitoring:
                    self.query = "delete from system:completed_requests"
                    self.run_cbq_query()
                if not self.skip_primary_index:
                    if (res['metrics']['resultCount'] == 0):
                        self.query = "CREATE PRIMARY INDEX ON %s USING %s" % (
                            bucket.name, self.primary_indx_type)
                        self.log.info("Creating primary index for %s ..." %
                                      bucket.name)
                        try:
                            self.run_cbq_query()
                            self.primary_index_created = True
                            if self.primary_indx_type.lower() == 'gsi':
                                self._wait_for_index_online(bucket, '#primary')
                        except Exception as ex:
                            self.log.info(str(ex))

    def _wait_for_index_online(self, bucket, index_name, timeout=6000):
        end_time = time.time() + timeout
        while time.time() < end_time:
            query = "SELECT * FROM system:indexes where name='%s'" % index_name
            res = self.run_cbq_query(query)
            for item in res['results']:
                if 'keyspace_id' not in item['indexes']:
                    self.log.error(item)
                    continue
                if item['indexes']['keyspace_id'] == bucket.name:
                    if item['indexes']['state'] == "online":
                        return
            self.sleep(
                5, 'index is pending or not in the list. sleeping... (%s)' %
                [item['indexes'] for item in res['results']])
        raise Exception('index %s is not online. last response is %s' %
                        (index_name, res))

    def _get_keys(self, key_num):
        keys = []
        for gen in self.gens_load:
            gen_copy = copy.deepcopy(gen)
            for i in range(gen_copy.end):
                key, _ = next(gen_copy)
                keys.append(key)
                if len(keys) == key_num:
                    return keys
        return keys