예제 #1
0
 def test_node_reboot(self):
     wait_timeout = 120
     timeout = self.timeout / 2
     status = self.rest.update_autoreprovision_settings(True, 1)
     if not status:
         self.fail('failed to change autoreprovision_settings!')
     self.sleep(5)
     shell = RemoteMachineShellConnection(self.server_fail)
     if shell.extract_remote_info().type.lower() == 'windows':
         o, r = shell.execute_command("shutdown -r -f -t 0")
     elif shell.extract_remote_info().type.lower() == 'linux':
         o, r = shell.execute_command("reboot")
     shell.log_command_output(o, r)
     if shell.extract_remote_info().type.lower() == 'windows':
         time.sleep(wait_timeout * 5)
     else:
         time.sleep(wait_timeout)
     # disable firewall on the node
     shell = RemoteMachineShellConnection(self.server_fail)
     shell.disable_firewall()
     AutoReprovisionBaseTest.wait_for_failover_or_assert(self.master, 0,
                                                         timeout + AutoReprovisionBaseTest.MAX_FAIL_DETECT_TIME,
                                                         self)
     helper = RestHelper(self.rest)
     self.assertTrue(helper.is_cluster_healthy(), "cluster status is not healthy")
     self.assertFalse(helper.is_cluster_rebalanced(), "cluster is balanced")
     self.rest.rebalance(otpNodes=[node.id for node in self.rest.node_statuses()], ejectedNodes=[])
     self.assertTrue(self.rest.monitorRebalance())
     buckets = self.rest.get_buckets()
     for bucket in buckets:
         self.verify_loaded_data(self.master, bucket.name, self.loaded_items[bucket.name])
예제 #2
0
    def _reboot_cluster(self, data_set):
        try:
            for server in self.servers[0:self.helper.num_nodes_reboot]:
                shell = RemoteMachineShellConnection(server)
                if shell.extract_remote_info().type.lower() == 'windows':
                    o, r = shell.execute_command("shutdown -r -f -t 0")
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} is being stopped".format(server.ip))
                elif shell.extract_remote_info().type.lower() == 'linux':
                    o, r = shell.execute_command("reboot")
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} is being stopped".format(server.ip))

                    time.sleep(120)
                    shell = RemoteMachineShellConnection(server)
                    command = "/sbin/iptables -F"
                    o, r = shell.execute_command(command)
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} backup".format(server.ip))
        finally:
            self.log.info("Warming-up server ..".format(server.ip))
            time.sleep(100)
예제 #3
0
    def replication_while_rebooting_a_non_master_destination_node(self):
        self._load_all_buckets(self.src_master, self.gen_create, "create", 0)
        self._load_all_buckets(self.dest_master, self.gen_create2, "create", 0)
        self._async_update_delete_data()
        time.sleep(self._timeout)

        i = len(self.dest_nodes) - 1
        shell = RemoteMachineShellConnection(self.dest_nodes[i])
        if shell.extract_remote_info().type.lower() == 'windows':
            o, r = shell.execute_command("shutdown -r -f -t 0")
        elif shell.extract_remote_info().type.lower() == 'linux':
            o, r = shell.execute_command("reboot")
        shell.log_command_output(o, r)
        i = len(self.src_nodes) - 1
        shell = RemoteMachineShellConnection(self.src_nodes[i])
        if shell.extract_remote_info().type.lower() == 'windows':
            o, r = shell.execute_command("shutdown -r -f -t 0")
        elif shell.extract_remote_info().type.lower() == 'linux':
            o, r = shell.execute_command("reboot")
        shell.log_command_output(o, r)
        time.sleep(self._timeout * 2)

        self.merge_buckets(self.src_master, self.dest_master, bidirection=True)

        self.verify_results(verify_src=True)
 def setUp(self):
     super(EnterpriseBackupMergeTest, self).setUp()
     for server in [self.backupset.backup_host,
                    self.backupset.restore_cluster_host]:
         conn = RemoteMachineShellConnection(server)
         conn.extract_remote_info()
         conn.terminate_processes(conn.info, ["cbbackupmgr"])
예제 #5
0
    def replication_while_rebooting_a_non_master_destination_node(self):
        self._load_all_buckets(self.src_master, self.gen_create, "create", 0)
        self._load_all_buckets(self.dest_master, self.gen_create2, "create", 0)
        self._async_update_delete_data()
        self.sleep(self._timeout)

        reboot_node_dest = self.dest_nodes[len(self.dest_nodes) - 1]
        shell = RemoteMachineShellConnection(reboot_node_dest)
        if shell.extract_remote_info().type.lower() == 'windows':
            o, r = shell.execute_command("shutdown -r -f -t 0")
        elif shell.extract_remote_info().type.lower() == 'linux':
            o, r = shell.execute_command("reboot")
        shell.log_command_output(o, r)
        reboot_node_src = self.src_nodes[len(self.src_nodes) - 1]
        shell = RemoteMachineShellConnection(reboot_node_src)
        if shell.extract_remote_info().type.lower() == 'windows':
            o, r = shell.execute_command("shutdown -r -f -t 0")
        elif shell.extract_remote_info().type.lower() == 'linux':
            o, r = shell.execute_command("reboot")
        shell.log_command_output(o, r)

        self.sleep(360)
        ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_dest], self, wait_if_warmup=True)
        ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_src], self, wait_if_warmup=True)
        self.merge_buckets(self.src_master, self.dest_master, bidirection=True)
        self.verify_results(verify_src=True)
예제 #6
0
    def _execute_boot_op(self, server):
        try:
            shell = RemoteMachineShellConnection(server)
            if self.boot_op == "warmup":
                shell.set_environment_variable(None, None)
                shell.disconnect()
            elif self.boot_op == "reboot":
                if shell.extract_remote_info().type.lower() == 'windows':
                    o, r = shell.execute_command("shutdown -r -f -t 0")
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} is being stopped".format(server.ip))
                elif shell.extract_remote_info().type.lower() == 'linux':
                    o, r = shell.execute_command("reboot")
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} is being stopped".format(server.ip))

                    time.sleep(self.wait_timeout * 2)
                    shell = RemoteMachineShellConnection(server)
                    command = "/sbin/iptables -F"
                    o, r = shell.execute_command(command)
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} backup".format(server.ip))
        finally:
            self.log.info("Warmed-up server .. ".format(server.ip))
예제 #7
0
    def test_large_file_version(self):
        rest = RestConnection(self.master)
        remote_client = RemoteMachineShellConnection(self.master)
        remote_client.extract_remote_info()

        self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
        self.disable_compaction()
        self._monitor_DB_fragmentation()

        # rename here

        remote_client.stop_couchbase()
        time.sleep(5)
        remote_client.execute_command("cd /opt/couchbase/var/lib/couchbase/data/default;rename .1 .65535 *.1")
        remote_client.execute_command("cd /opt/couchbase/var/lib/couchbase/data/default;rename .2 .65535 *.2")
        remote_client.start_couchbase()

        for i in range(5):
            self.log.info("starting a compaction iteration")
            compaction_task = self.cluster.async_compact_bucket(self.master, self.default_bucket_name)

            compact_run = remote_client.wait_till_compaction_end(rest, self.default_bucket_name, timeout_in_seconds=self.wait_timeout)
            res = compaction_task.result(self.wait_timeout)


        if compact_run:
            self.log.info("auto compaction run successfully")
        else:
            self.fail("auto compaction does not run")

        remote_client.disconnect()
예제 #8
0
 def getAuditConfigPathInitial(self):
     shell = RemoteMachineShellConnection(self.host)
     os_type = shell.extract_remote_info().distribution_type
     dist_ver = (shell.extract_remote_info().distribution_version).rstrip()
     log.info ("OS type is {0}".format(os_type))
     if os_type == 'windows':
         auditconfigpath = audit.WINCONFIFFILEPATH
         self.currentLogFile = audit.WINLOGFILEPATH
     elif os_type == 'Mac':
         if ('10.12' == dist_ver):
             auditconfigpath = "/Users/admin/Library/Application Support/Couchbase/var/lib/couchbase/config/"
             self.currentLogFile = "/Users/admin/Library/Application Support/Couchbase/var/lib/couchbase/logs"
         else:
             auditconfigpath = audit.MACCONFIGFILEPATH
             self.currentLogFile = audit.MACLOGFILEPATH
     else:
         if self.nonroot:
             auditconfigpath = "/home/%s%s" % (self.host.ssh_username,
                                               audit.LINCONFIGFILEPATH)
             self.currentLogFile = "/home/%s%s" % (self.host.ssh_username,
                                                   audit.LINLOGFILEPATH)
         else:
             auditconfigpath = audit.LINCONFIGFILEPATH
             self.currentLogFile = audit.LINLOGFILEPATH
     return auditconfigpath
예제 #9
0
 def _execute_boot_op(self, server):
     try:
         shell = RemoteMachineShellConnection(server)
         if self.boot_op == "warmup":
             shell.set_environment_variable(None, None)
         elif self.boot_op == "reboot":
             if shell.extract_remote_info().type.lower() == 'windows':
                 o, r = shell.execute_command("shutdown -r -f -t 0")
             elif shell.extract_remote_info().type.lower() == 'linux':
                 o, r = shell.execute_command("reboot")
             shell.log_command_output(o, r)
         self.log.info("Node {0} is being stopped".format(server.ip))
     finally:
         shell.disconnect()
예제 #10
0
 def _reboot_node(self, node):
     self.log.info("Rebooting node '{0}'....".format(node.ip))
     shell = RemoteMachineShellConnection(node)
     if shell.extract_remote_info().type.lower() == 'windows':
         o, r = shell.execute_command("shutdown -r -f -t 0")
     elif shell.extract_remote_info().type.lower() == 'linux':
         o, r = shell.execute_command("reboot")
     shell.log_command_output(o, r)
     # wait for restart and warmup on all node
     self.sleep(self.wait_timeout * 5)
     # disable firewall on these nodes
     self.stop_firewall_on_node(node)
     # wait till node is ready after warmup
     ClusterOperationHelper.wait_for_ns_servers_or_assert([node], self, wait_if_warmup=True)
예제 #11
0
 def install(self, params):
     #        log = logger.new_logger("Installer")
     build = self.build_url(params)
     remote_client = RemoteMachineShellConnection(params["server"])
     info = remote_client.extract_remote_info()
     type = info.type.lower()
     server = params["server"]
     if "vbuckets" in params:
         vbuckets = int(params["vbuckets"][0])
     else:
         vbuckets = None
     if type == "windows":
         build = self.build_url(params)
         remote_client.download_binary_in_win(build.url, params["product"], params["version"])
         remote_client.membase_install_win(build, params["version"])
     else:
         downloaded = remote_client.download_build(build)
         if not downloaded:
             log.error(downloaded, "unable to download binaries : {0}".format(build.url))
         path = server.data_path or "/tmp"
         remote_client.membase_install(build, path=path, vbuckets=vbuckets)
         ready = RestHelper(RestConnection(params["server"])).is_ns_server_running(60)
         if not ready:
             log.error("membase-server did not start...")
         log.info("wait 5 seconds for membase server to start")
         time.sleep(5)
예제 #12
0
 def multiple_connections_using_memcachetest (self):
     """ server side moxi is removed in spock as in MB-16661 """
     if self.cb_version[:5] in COUCHBASE_FROM_SPOCK:
         self.log.info("From spock, server side moxi is removed."
                       " More information could be found in MB-16661 ")
         return
     shell = RemoteMachineShellConnection(self.master)
     os_type = shell.extract_remote_info()
     if os_type.type != 'Linux':
         return
     mcsoda_items = self.input.param('mcsoda_items', 1000000)
     memcachetest_items = self.input.param('memcachetest_items', 100000)
     moxi_port = self.input.param('moxi_port', 51500)
     self._stop_moxi(self.master, moxi_port)
     self._stop_mcsoda_localy(moxi_port)
     try:
         self._run_moxi(self.master, moxi_port, self.master.ip, "default")
         self._run_mcsoda_localy(self.master.ip, moxi_port, "default",
                                                 mcsoda_items=mcsoda_items)
         self.sleep(30)
         sd = MemcachetestRunner(self.master, num_items=memcachetest_items, \
                                  extra_params="-W 16 -t 16 -c 0 -M 2")  # MB-8083
         status = sd.start_memcachetest()
         if not status:
             self.fail("see logs above!")
     finally:
         self._stop_mcsoda_localy(moxi_port)
         if 'sd' in locals():
             sd.stop_memcachetest()
예제 #13
0
 def parallel_install(self, servers, params):
     uninstall_threads = []
     install_threads = []
     initializer_threads = []
     queue = Queue.Queue()
     success = True
     for server in servers:
         _params = copy.deepcopy(params)
         _params["server"] = server
         u_t = Thread(target=installer_factory(params).uninstall,
                    name="uninstaller-thread-{0}".format(server.ip),
                    args=(_params,))
         i_t = Thread(target=installer_factory(params).install,
                    name="installer-thread-{0}".format(server.ip),
                    args=(_params, queue))
         init_t = Thread(target=installer_factory(params).initialize,
                    name="initializer-thread-{0}".format(server.ip),
                    args=(_params,))
         uninstall_threads.append(u_t)
         install_threads.append(i_t)
         initializer_threads.append(init_t)
     for t in uninstall_threads:
         t.start()
     for t in uninstall_threads:
         t.join()
         print "thread {0} finished".format(t.name)
     if "product" in params and params["product"] in ["couchbase", "couchbase-server", "cb"]:
         success = True
         for server in servers:
             success &= not RemoteMachineShellConnection(server).is_couchbase_installed()
         if not success:
             print "Server:{0}.Couchbase is still installed after uninstall".format(server)
             return success
     for t in install_threads:
         t.start()
     for t in install_threads:
         t.join()
         print "thread {0} finished".format(t.name)
     while not queue.empty():
         success &= queue.get()
     if not success:
         print "installation failed. initializer threads were skipped"
         return success
     for t in initializer_threads:
         t.start()
     for t in initializer_threads:
         t.join()
         print "thread {0} finished".format(t.name)
     """ remove any capture files left after install windows """
     remote_client = RemoteMachineShellConnection(servers[0])
     type = remote_client.extract_remote_info().distribution_type
     remote_client.disconnect()
     if type.lower() == 'windows':
         for server in servers:
             shell = RemoteMachineShellConnection(server)
             shell.execute_command("rm -f /cygdrive/c/automation/*_172.23*")
             shell.execute_command("rm -f /cygdrive/c/automation/*_10.17*")
             os.system("rm -f resources/windows/automation/*_172.23*")
             os.system("rm -f resources/windows/automation/*_10.17*")
     return success
예제 #14
0
    def test_cbDiskConf(self):
        ops = self.input.param('ops', None)
        source = 'ns_server'
        user = self.master.rest_username
        rest = RestConnection(self.master)
	shell = RemoteMachineShellConnection(self.master)
	os_type = shell.extract_remote_info().distribution_type
	if (os_type == 'Windows'):
            currentPath = "c:/Program Files/Couchbase/Server/var/lib/couchbase/data"
            newPath = "C:/tmp"
        else:
	    currentPath = '/opt/couchbase/var/lib/couchbase/data'
	    newPath = "/tmp"

        if (ops == 'indexPath'):
            try:
                expectedResults = {'node': 'ns_1@' + self.master.ip, 'source':source,
                                'user':user, 'ip':self.ipAddress, 'port':1234,
                                'index_path':newPath, 'db_path':currentPath,
                                'cbas_dirs':currentPath}

                rest.set_data_path(index_path=newPath)
                self.checkConfig(self.eventID, self.master, expectedResults)
            finally:
                rest.set_data_path(index_path=currentPath)
예제 #15
0
    def replication_while_rebooting_a_non_master_destination_node(self):
        self.set_xdcr_param("xdcrFailureRestartInterval", 1)

        self._load_all_buckets(self.src_master, self.gen_create, "create", 0)
        self._async_modify_data()
        self.sleep(self._timeout)

        i = len(self.dest_nodes) - 1
        shell = RemoteMachineShellConnection(self.dest_nodes[i])
        type = shell.extract_remote_info().type.lower()
        if type == 'windows':
            o, r = shell.execute_command("shutdown -r -f -t 0")
        elif type == 'linux':
            o, r = shell.execute_command("reboot")
        shell.log_command_output(o, r)
        shell.disconnect()
        self.sleep(60, "after rebooting node")
        num = 0
        while num < 10:
            try:
                shell = RemoteMachineShellConnection(self.dest_nodes[i])
            except BaseException, e:
                self.log.warn("node {0} is unreachable".format(self.dest_nodes[i].ip))
                self.sleep(60, "still can't connect to node")
                num += 1
            else:
                break
예제 #16
0
 def test_upgrade_negative(self):
     op = self.input.param("op", None)
     error = self.input.param("error", '')
     remote = RemoteMachineShellConnection(self.master)
     if op is None:
         self.fail("operation should be specified")
     if op == "higher_version":
         tmp = self.initial_version
         self.initial_version = self.upgrade_versions[0]
         self.upgrade_versions = [tmp, ]
     info = None
     if op == "wrong_arch":
         info = remote.extract_remote_info()
         info.architecture_type = ('x86_64', 'x86')[info.architecture_type == 'x86']
     self._install([self.master])
     self.operations([self.master])
     try:
         if op == "close_port":
             RemoteUtilHelper.enable_firewall(self.master)
         for upgrade_version in self.upgrade_versions:
             self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for upgrade to {0} version".\
                    format(upgrade_version))
             output, error = self._upgrade(upgrade_version, self.master, info=info)
             if str(output).find(error) != -1 or str(error).find(error) != -1:
                 raise Exception(error)
     except Exception, ex:
         self.log.info("Exception %s appeared as expected" % ex)
         self.log.info("Check that old version is working fine")
         self.verification([self.master])
예제 #17
0
 def changePathWindows(self, path):
     shell = RemoteMachineShellConnection(self.master)
     os_type = shell.extract_remote_info().distribution_type
     self.log.info ("OS type is {0}".format(os_type))
     if os_type == 'windows':
         path = path.replace("/", "\\")
     return path
예제 #18
0
 def verify_for_recovery_type(self, chosen = [], serverMap = {}, buckets = [], recoveryTypeMap = {}, fileMap = {}, deltaRecoveryBuckets = []):
     """ Verify recovery type is delta or full """
     summary = ""
     logic = True
     for server in self.chosen:
         shell = RemoteMachineShellConnection(serverMap[server.ip])
         os_type = shell.extract_remote_info()
         if os_type.type.lower() == 'windows':
             return
         for bucket in buckets:
             path = fileMap[server.ip][bucket.name]
             exists = shell.file_exists(path,"check.txt")
             if deltaRecoveryBuckets != None:
                 if recoveryTypeMap[server.ip] == "delta" and (bucket.name in deltaRecoveryBuckets) and not exists:
                     logic = False
                     summary += "\n Failed Condition :: node {0}, bucket {1} :: Expected Delta, Actual Full".format(server.ip,bucket.name)
                 elif recoveryTypeMap[server.ip] == "delta" and (bucket.name not in deltaRecoveryBuckets) and exists:
                     summary += "\n Failed Condition :: node {0}, bucket {1} :: Expected Full, Actual Delta".format(server.ip,bucket.name)
                     logic = False
             else:
                 if recoveryTypeMap[server.ip] == "delta"  and not exists:
                     logic = False
                     summary += "\n Failed Condition :: node {0}, bucket {1} :: Expected Delta, Actual Full".format(server.ip,bucket.name)
                 elif recoveryTypeMap[server.ip] == "full" and exists:
                     logic = False
                     summary += "\n Failed Condition :: node {0}, bucket {1}  :: Expected Full, Actual Delta".format(server.ip,bucket.name)
         shell.disconnect()
     self.assertTrue(logic, summary)
예제 #19
0
 def mutate_and_check_error404(self, n=1):
     # get vb0 active source node
     active_src_node = self.get_active_vb0_node(self.src_master)
     shell = RemoteMachineShellConnection(active_src_node)
     os_type = shell.extract_remote_info().distribution_type
     if os_type.lower() == 'windows':
         trace_log = "C:/Program Files/Couchbase/Server/var/lib/couchbase/logs/xdcr_trace.log"
     else:
         trace_log = "/opt/couchbase/var/lib/couchbase/logs/xdcr_trace.*"
     num_404_errors_before_load, error = shell.execute_command("grep \"error,404\" {} | wc -l"
                                                                  .format(trace_log))
     num_get_remote_bkt_failed_before_load, error = shell.execute_command("grep \"get_remote_bucket_failed\" \"{}\" | wc -l"
                                                                  .format(trace_log))
     self.log.info("404 errors: {}, get_remote_bucket_failed errors : {}".
                   format(num_404_errors_before_load, num_get_remote_bkt_failed_before_load))
     self.sleep(60)
     self.log.info("################ New mutation:{} ##################".format(self.key_counter+1))
     self.load_one_mutation_into_source_vb0(active_src_node)
     self.sleep(5)
     num_404_errors_after_load, error = shell.execute_command("grep \"error,404\" {} | wc -l"
                                                                  .format(trace_log))
     num_get_remote_bkt_failed_after_load, error = shell.execute_command("grep \"get_remote_bucket_failed\" \"{}\" | wc -l"
                                                                  .format(trace_log))
     self.log.info("404 errors: {}, get_remote_bucket_failed errors : {}".
                   format(num_404_errors_after_load, num_get_remote_bkt_failed_after_load))
     shell.disconnect()
     if (int(num_404_errors_after_load[0]) > int(num_404_errors_before_load[0])) or \
        (int(num_get_remote_bkt_failed_after_load[0]) > int(num_get_remote_bkt_failed_before_load[0])):
         self.log.info("Checkpointing error-404 verified after dest failover/rebalance out")
         return True
     else:
         self.log.info("404 errors on source node before last load : {}, after last node: {}".
                       format(int(num_404_errors_after_load[0]), int(num_404_errors_before_load[0])))
         self.log.error("Checkpoint 404 error NOT recorded at source following dest failover or rebalance!")
예제 #20
0
def _get_build(master, version, is_amazon=False):
    log = logger.Logger.get_logger()
    remote = RemoteMachineShellConnection(master)
    info = remote.extract_remote_info()
    remote.disconnect()
    builds, changes = BuildQuery().get_all_builds()
    log.info("finding build {0} for machine {1}".format(version, master))
    result = re.search('r', version)
    product = 'membase-server-enterprise'
    if re.search('1.8',version):
        product = 'couchbase-server-enterprise'

    if result is None:
        appropriate_build = BuildQuery().find_membase_release_build(product,
                                                                    info.deliverable_type,
                                                                    info.architecture_type,
                                                                    version.strip(),
                                                                    is_amazon=is_amazon)
    else:
        appropriate_build = BuildQuery().find_membase_build(builds, product,
                                                            info.deliverable_type,
                                                            info.architecture_type,
                                                            version.strip(),
                                                            is_amazon=is_amazon)
    return appropriate_build
예제 #21
0
    def __init__(self,
                 eventID=None,
                 host=None,
                 method='REST'):

        rest = RestConnection(host)
        if (rest.is_enterprise_edition()):
            log.info ("Enterprise Edition, Audit is part of the test")
        else:
            raise Exception(" Install is not an enterprise edition, Audit requires enterprise edition.")
        self.method = method
        self.host = host
        self.nonroot = False
        shell = RemoteMachineShellConnection(self.host)
        self.info = shell.extract_remote_info()
        if self.info.distribution_type.lower() in LINUX_DISTRIBUTION_NAME and \
                                                    host.ssh_username != "root":
            self.nonroot = True
        shell.disconnect()
        self.pathDescriptor = self.getAuditConfigElement("descriptors_path") + "/"
        self.pathLogFile = self.getAuditLogPath()
        self.defaultFields = ['id', 'name', 'description']
        if (eventID is not None):
            self.eventID = eventID
            self.eventDef = self.returnEventsDef()
예제 #22
0
파일: install.py 프로젝트: pm48/testrunner
    def install(self, params):
        remote_client = RemoteMachineShellConnection(params["server"])
        info = remote_client.extract_remote_info()
        os = info.type.lower()
        type = info.deliverable_type.lower()
        version = info.distribution_version.lower()
        sdk_url = "git+git://github.com/couchbase/[email protected]"
        if os == "linux":
            if type == "rpm":
                repo_file = "/etc/yum.repos.d/couchbase.repo"
                baseurl = ""
                if version.find("centos") != -1 and version.find("6.2") != -1:
                    baseurl = "http://packages.couchbase.com/rpm/6.2/x86-64"
                elif version.find("centos") != -1 and version.find("7") != -1:
                    baseurl = "http://packages.couchbase.com/rpm/7/x86_64"
                else:
                    log.info("os version {0} not supported".format(version))
                    exit(1)
                remote_client.execute_command("rm -rf {0}".format(repo_file))
                remote_client.execute_command("touch {0}".format(repo_file))
                remote_client.execute_command("echo [couchbase] >> {0}".format(repo_file))
                remote_client.execute_command("echo enabled=1 >> {0}".format(repo_file))
                remote_client.execute_command("echo name = Couchbase package repository >> {0}".format(repo_file))
                remote_client.execute_command("baseurl = {0} >> {1}".format(baseurl, repo_file))
                remote_client.execute_command(
                    "yum -y install libcouchbase2-libevent libcouchbase-devel libcouchbase2-bin"
                )
                remote_client.execute_command("yum -y install pip")
                remote_client.execute_command("pip uninstall couchbase")
                remote_client.execute_command("pip install {0}".format(sdk_url))
            elif type == "deb":
                repo_file = "/etc/sources.list.d/couchbase.list"
                entry = ""
                if version.find("ubuntu") != -1 and version.find("12.04") != -1:
                    entry = "http://packages.couchbase.com/ubuntu precise precise/main"
                elif version.find("ubuntu") != -1 and version.find("14.04") != -1:
                    entry = "http://packages.couchbase.com/ubuntu trusty trusty/main"
                elif version.find("debian") != -1 and version.find("7") != -1:
                    entry = "http://packages.couchbase.com/ubuntu wheezy wheezy/main"
                else:
                    log.info("os version {0} not supported".format(version))
                    exit(1)
                remote_client.execute_command("rm -rf {0}".format(repo_file))
                remote_client.execute_command("touch {0}".format(repo_file))
                remote_client.execute_command("deb {0} >> {1}".format(entry, repo_file))
                remote_client.execute_command(
                    "apt-get -y install libcouchbase2-libevent libcouchbase-devel libcouchbase2-bin"
                )
                remote_client.execute_command("apt-get -y install pip")
                remote_client.execute_command("pip uninstall couchbase")
                remote_client.execute_command("pip install {0}".format(sdk_url))
        if os == "mac":
            remote_client.execute_command("brew install libcouchbase; brew link libcouchbase")
            remote_client.execute_command("brew install pip; brew link pip")
            remote_client.execute_command("pip install {0}".format(sdk_url))
        if os == "windows":
            log.info("Currently not supported")

        remote_client.disconnect()
        return True
예제 #23
0
 def _init_parameters(self):
     self.bucket_name = self.input.param("bucket_name", 'default')
     self.bucket_type = self.input.param("bucket_type", 'sasl')
     self.bucket_size = self.quota
     self.password = '******'
     self.server = self.master
     self.rest = RestConnection(self.server)
     self.node_version = self.rest.get_nodes_version()
     self.total_items_travel_sample = 31569
     if self.node_version[:5] in COUCHBASE_FROM_WATSON:
         self.total_items_travel_sample = 31591
     shell = RemoteMachineShellConnection(self.master)
     type = shell.extract_remote_info().distribution_type
     shell.disconnect()
     self.sample_path = LINUX_COUCHBASE_SAMPLE_PATH
     self.bin_path = LINUX_COUCHBASE_BIN_PATH
     if self.nonroot:
         self.sample_path = "/home/%s%s" % (self.master.ssh_username,
                                            LINUX_COUCHBASE_SAMPLE_PATH)
         self.bin_path = "/home/%s%s" % (self.master.ssh_username,
                                         LINUX_COUCHBASE_BIN_PATH)
     if type.lower() == 'windows':
         self.sample_path = WIN_COUCHBASE_SAMPLE_PATH
         self.bin_path = WIN_COUCHBASE_BIN_PATH
     elif type.lower() == "mac":
         self.sample_path = MAC_COUCHBASE_SAMPLE_PATH
         self.bin_path = MAC_COUCHBASE_BIN_PATH
예제 #24
0
    def run_failover_operations(self, chosen, failover_reason):
        """ Method to run fail over operations used in the test scenario based on failover reason """
        # Perform Operations relalted to failover
        for node in chosen:
            if failover_reason == 'stop_server':
                self.stop_server(node)
                self.log.info("10 seconds delay to wait for membase-server to shutdown")
                # wait for 5 minutes until node is down
                self.assertTrue(RestHelper(self.rest).wait_for_node_status(node, "unhealthy", 300),
                                    msg="node status is not unhealthy even after waiting for 5 minutes")
            elif failover_reason == "firewall":
                server = [srv for srv in self.servers if node.ip == srv.ip][0]
                RemoteUtilHelper.enable_firewall(server, bidirectional=self.bidirectional)
                status = RestHelper(self.rest).wait_for_node_status(node, "unhealthy", 300)
                if status:
                    self.log.info("node {0}:{1} is 'unhealthy' as expected".format(node.ip, node.port))
                else:
                    # verify iptables on the node if something wrong
                    for server in self.servers:
                        if server.ip == node.ip:
                            shell = RemoteMachineShellConnection(server)
                            info = shell.extract_remote_info()
                            if info.type.lower() == "windows":
                                o, r = shell.execute_command("netsh advfirewall show allprofiles")
                                shell.log_command_output(o, r)
                            else:
                                o, r = shell.execute_command("/sbin/iptables --list")
                                shell.log_command_output(o, r)
                            shell.disconnect()
                    self.rest.print_UI_logs()
                    api = self.rest.baseUrl + 'nodeStatuses'
                    status, content, header = self.rest._http_request(api)
                    json_parsed = json.loads(content)
                    self.log.info("nodeStatuses: {0}".format(json_parsed))
                    self.fail("node status is not unhealthy even after waiting for 5 minutes")

        # define precondition check for failover
        failed_over = self.rest.fail_over(node.id, graceful=self.graceful)

        # Check for negative cases
        if self.graceful and (failover_reason in ['stop_server', 'firewall']):
            if failed_over:
                # MB-10479
                self.rest.print_UI_logs()
            self.assertFalse(failed_over, "Graceful Falover was started for unhealthy node!!! ")
            return
        elif self.gracefulFailoverFail and failed_over:
            """ Check if the fail_over fails as expected """
            self.assertTrue(not failed_over,""" Graceful failover should fail due to not enough replicas """)
            return

        # Check if failover happened as expected or re-try one more time
        if not failed_over:
            self.log.info("unable to failover the node the first time. try again in  60 seconds..")
            # try again in 75 seconds
            self.sleep(75)
            failed_over = self.rest.fail_over(node.id, graceful=self.graceful)
        if self.graceful and (failover_reason not in ['stop_server', 'firewall']):
            reached = RestHelper(self.rest).rebalance_reached()
            self.assertTrue(reached, "rebalance failed for Graceful Failover, stuck or did not completed")
예제 #25
0
    def setUp(self):
        super(RackzoneBaseTest, self).setUp()
        self.product = self.input.param("product", "cb")
        self.vbuckets = self.input.param("vbuckets", 128)
        self.version = self.input.param("version", "2.5.1-1082")
        self.type = self.input.param('type', 'enterprise')
        self.doc_ops = self.input.param("doc_ops", None)
        if self.doc_ops is not None:
            self.doc_ops = self.doc_ops.split(";")
        self.defaul_map_func = "function (doc) {\n  emit(doc._id, doc);\n}"

        #define the data that will be used to test
        self.blob_generator = self.input.param("blob_generator", True)
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)
        if not rest.is_enterprise_edition():
            raise Exception("This couchbase server is not Enterprise Edition.\
                  This RZA feature requires Enterprise Edition to work")
        if self.blob_generator:
            #gen_load data is used for upload before each test(1000 items by default)
            self.gen_load = BlobGenerator('test', 'test-', self.value_size, end=self.num_items)
            #gen_update is used for doing mutation for 1/2th of uploaded data
            self.gen_update = BlobGenerator('test', 'test-', self.value_size, end=(self.num_items / 2 - 1))
            #upload data before each test
            self._load_all_buckets(self.servers[0], self.gen_load, "create", 0)
        else:
            self._load_doc_data_all_buckets()
        shell = RemoteMachineShellConnection(self.master)
        type = shell.extract_remote_info().distribution_type
        shell.disconnect()
        if type.lower() == 'windows':
            self.is_linux = False
        else:
            self.is_linux = True
예제 #26
0
 def _verify_replica_distribution_in_zones(self, nodes, commmand, saslPassword = "" ):
     shell = RemoteMachineShellConnection(self.servers[0])
     type = shell.extract_remote_info().distribution_type
     shell.disconnect()
     if type.lower() == 'linux':
         cbstat_command = "%scbstats" % (testconstants.LINUX_COUCHBASE_BIN_PATH)
     elif type.lower() == 'windows':
         cbstat_command = "%scbstats.exe" % (testconstants.WIN_COUCHBASE_BIN_PATH)
     elif type.lower() == 'mac':
         cbstat_command = "%scbstats" % (testconstants.MAC_COUCHBASE_BIN_PATH)
     command = "tap"
     saslPassword = ''
     for group in nodes:
         for node in nodes[group]:
             if not self.is_windows:
                 commands = "%s %s:11210 %s -b %s -p \"%s\" |grep :vb_filter: |  awk '{print $1}' \
                         | xargs | sed 's/eq_tapq:replication_ns_1@//g'  | sed 's/:vb_filter://g' \
                         " % (cbstat_command, node, command,"default", saslPassword)
             elif self.is_windows:
                 """ standalone gawk.exe should be copy to ../ICW/bin for command below to work.
                     Ask IT to do this if you don't know how """
                 commands = "%s %s:11210 %s -b %s -p \"%s\" | grep.exe :vb_filter: | gawk.exe '{print $1}' \
                            | sed.exe 's/eq_tapq:replication_ns_1@//g'  | sed.exe 's/:vb_filter://g' \
                            " % (cbstat_command, node, command,"default", saslPassword)
             output, error = shell.execute_command(commands)
             shell.log_command_output(output, error)
             output = output[0].split(" ")
             if node not in output:
                 self.log.info("{0}".format(nodes))
                 self.log.info("replica of node {0} are not in its zone {1}".format(node, group))
             else:
                 raise Exception("replica of node {0} are on its own zone {1}".format(node, group))
    def multiple_connections_using_memcachetest (self):

        shell = RemoteMachineShellConnection(self.master)
        os_type = shell.extract_remote_info()
        if os_type.type != 'Linux':
            return
        mcsoda_items = self.input.param('mcsoda_items', 1000000)
        memcachetest_items = self.input.param('memcachetest_items', 100000)
        moxi_port = self.input.param('moxi_port', 51500)
        self._stop_moxi(self.master, moxi_port)
        self._stop_mcsoda_localy(moxi_port)
        try:
            self._run_moxi(self.master, moxi_port, self.master.ip, "default")
            self._run_mcsoda_localy(self.master.ip, moxi_port, "default",
                                                    mcsoda_items=mcsoda_items)
            self.sleep(30)
            sd = MemcachetestRunner(self.master, num_items=memcachetest_items, \
                                     extra_params="-W 16 -t 16 -c 0 -M 2")  # MB-8083
            status = sd.start_memcachetest()
            if not status:
                self.fail("see logs above!")
        finally:
            self._stop_mcsoda_localy(moxi_port)
            if 'sd' in locals():
                sd.stop_memcachetest()
예제 #28
0
파일: sg_base.py 프로젝트: pm48/testrunner
 def setUp(self):
     super(GatewayBaseTest, self).setUp()
     self.log = logger.Logger.get_logger()
     self.input = TestInputSingleton.input
     self.case_number = self.input.param("case_number", 0)
     self.version = self.input.param("version", "0.0.0-358")
     self.extra_param = self.input.param("extra_param", "")
     if isinstance(self.extra_param, str):
         self.extra_param=self.extra_param.replace("$", "=")  # '=' is a delimiter in conf file
     self.logsdir = self.input.param("logsdir", "/tmp/sync_gateway/logs")
     self.datadir = self.input.param("datadir", "/tmp/sync_gateway")
     self.configdir = self.input.param("configdir", "/tmp/sync_gateway")
     self.configfile = self.input.param("configfile", "sync_gateway.json")
     self.expected_error = self.input.param("expected_error", "")
     self.servers = self.input.servers
     self.master = self.servers[0]
     self.folder_prefix = ""
     self.installed_folder = '/opt/couchbase-sync-gateway/bin'
     shell = RemoteMachineShellConnection(self.master)
     type = shell.extract_remote_info().distribution_type
     shell.disconnect()
     if type.lower() == 'windows':
         self.folder_prefix = "/cygdrive/c"
         self.installed_folder = '/cygdrive/c/Program\ Files\ \(x86\)/Couchbase'
         self.logsdir = self.folder_prefix + self.configdir
         self.datadir = self.folder_prefix + self.configdir
         self.configdir = self.folder_prefix + self.configdir
 def tearDown(self):
     super(EnterpriseBackupRestoreBase, self).tearDown()
     if not self.input.param("skip_cleanup", False):
         remote_client = RemoteMachineShellConnection(self.input.clusters[1][0])
         info = remote_client.extract_remote_info().type.lower()
         if info == 'linux' or info == 'mac':
             backup_directory = "/tmp/entbackup"
             validation_files_location = "/tmp/backuprestore"
         elif info == 'windows':
             backup_directory = testconstants.WIN_TMP_PATH + "entbackup"
             validation_files_location = testconstants.WIN_TMP_PATH + "backuprestore"
         else:
             raise Exception("OS not supported.")
         command = "rm -rf {0}".format(backup_directory)
         output, error = remote_client.execute_command(command)
         remote_client.log_command_output(output, error)
         if info == 'linux':
             command = "rm -rf /cbqe3043/entbackup".format(backup_directory)
             output, error = remote_client.execute_command(command)
             remote_client.log_command_output(output, error)
         if self.input.clusters:
             for key in self.input.clusters.keys():
                 servers = self.input.clusters[key]
                 self.backup_reset_clusters(servers)
         if os.path.exists(validation_files_location):
             shutil.rmtree(validation_files_location)
예제 #30
0
    def install(self, params):
#        log = logger.new_logger("Installer")
        build = self.build_url(params)
        remote_client = RemoteMachineShellConnection(params["server"])
        info = remote_client.extract_remote_info()
        type = info.type.lower()
        server = params["server"]
        if "vbuckets" in params:
            vbuckets = int(params["vbuckets"][0])
        else:
            vbuckets = None
        if type == "windows":
            build = self.build_url(params)
            remote_client.download_binary_in_win(build.url, params["product"], params["version"])
            remote_client.membase_install_win(build, params["version"])
        else:
            downloaded = remote_client.download_build(build)
            if not downloaded:
                log.error(downloaded, 'unable to download binaries : {0}'.format(build.url))
            #TODO: need separate methods in remote_util for couchbase and membase install
            path = server.data_path or '/tmp'
            remote_client.membase_install(build, path=path, vbuckets=vbuckets)
            log.info('wait 5 seconds for membase server to start')
            time.sleep(5)
        if "rest_vbuckets" in params:
            rest_vbuckets = int(params["rest_vbuckets"])
            ClusterOperationHelper.set_vbuckets(server, rest_vbuckets)
예제 #31
0
    def test_history(self):
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            type2 = shell.extract_remote_info().distribution_type
            queries = []
            queries2 = []
            queries3 = []
            queries5 = []
            queries6 = []
            for bucket in self.buckets:
                if type2.lower() == 'windows':
                    queries = ["\set histfile c:\\tmp\\history.txt;"]
                    queries2 = ["\Alias p c:\\tmp\\history2.txt;"]
                    queries3 = ["\set $a c:\\tmp\\history3.txt;"]
                    queries5 = ['\set $a "\\abcde";']
                    queries6 = ["\set $a '\\abcde';"]
                elif type2.lower() == "linux":
                    queries = ["\set histfile /tmp/history;"]
                    queries2 = ["\Alias p /tmp/history2;"]
                    queries3 = ["\set $a /tmp/history3;"]
                    queries5 = ['\set $a "/abcde";']
                    queries6 = ["\set $a /abcde;"]

                queries.extend([
                    '\ALIAS tempcommand create primary index on bucketname;',
                    '\\\\tempcommand;',
                    '\ALIAS tempcommand2 select * from bucketname limit 1;',
                    '\\\\tempcommand2;', '\ALIAS;', '\echo \\\\tempcommand;',
                    '\echo \\\\tempcommand2;', '\echo histfile;'
                ])
                o = self.execute_commands_inside('%s/cbq -quiet' % (self.path),
                                                 '', queries, '', '',
                                                 bucket.name, '')
                if type2.lower() == "linux":
                    self.assertTrue('/tmp/history' in o)

                queries2.extend([
                    "\set histfile \\\\p;", "\echo histfile;",
                    "\set histfile '\\\\p';", "\echo histfile;"
                ])
                o = self.execute_commands_inside('%s/cbq -quiet' % (self.path),
                                                 '', queries2, '', '',
                                                 bucket.name, '')

                if type2.lower() == "linux":
                    self.assertTrue('/tmp/history2' in o)
                    self.assertTrue('\\p' in o)

                queries3.extend(["\set histfile $a;", "\echo histfile;"])
                o = self.execute_commands_inside('%s/cbq -quiet' % (self.path),
                                                 '', queries3, '', '',
                                                 bucket.name, '')

                queries4 = [
                    "\push histfile newhistory.txt;", "\echo histfile;",
                    '\ALIAS tempcommand create primary index on bucketname;',
                    '\\\\tempcommand;',
                    '\ALIAS tempcommand2 select * from bucketname limit 1;',
                    '\\\\tempcommand2;', '\ALIAS;', '\echo \\\\tempcommand;',
                    '\echo \\\\tempcommand2;', '\echo histfile;'
                ]
                o = self.execute_commands_inside('%s/cbq -quiet' % (self.path),
                                                 '', queries4, '', '',
                                                 bucket.name, '')

                queries5.append("\echo $a;")
                o = self.execute_commands_inside('%s/cbq -quiet' % (self.path),
                                                 '', queries5, '', '',
                                                 bucket.name, '')

                queries6.append("\echo $a;")
                o = self.execute_commands_inside('%s/cbq -quiet' % (self.path),
                                                 '', queries6, '', '',
                                                 bucket.name, '')
예제 #32
0
    def run_failover_operations(self, chosen, failover_reason):
        """ Method to run fail over operations used in the test scenario based on failover reason """
        # Perform Operations relalted to failover
        graceful_count = 0
        graceful_failover = True
        failed_over = True
        for node in chosen:
            unreachable = False
            if failover_reason == 'stop_server':
                unreachable = True
                self.stop_server(node)
                self.log.info(
                    "10 seconds delay to wait for membase-server to shutdown")
                # wait for 5 minutes until node is down
                self.assertTrue(
                    RestHelper(self.rest).wait_for_node_status(
                        node, "unhealthy", self.wait_timeout * 10),
                    msg=
                    "node status is not unhealthy even after waiting for 5 minutes"
                )
            elif failover_reason == "firewall":
                unreachable = True
                self.filter_list.append(node.ip)
                server = [srv for srv in self.servers if node.ip == srv.ip][0]
                RemoteUtilHelper.enable_firewall(
                    server, bidirectional=self.bidirectional)
                status = RestHelper(self.rest).wait_for_node_status(
                    node, "unhealthy", self.wait_timeout * 10)
                if status:
                    self.log.info(
                        "node {0}:{1} is 'unhealthy' as expected".format(
                            node.ip, node.port))
                else:
                    # verify iptables on the node if something wrong
                    for server in self.servers:
                        if server.ip == node.ip:
                            shell = RemoteMachineShellConnection(server)
                            info = shell.extract_remote_info()
                            if info.type.lower() == "windows":
                                o, r = shell.execute_command(
                                    "netsh advfirewall show allprofiles")
                                shell.log_command_output(o, r)
                            else:
                                o, r = shell.execute_command(
                                    "/sbin/iptables --list")
                                shell.log_command_output(o, r)
                            shell.disconnect()
                    self.rest.print_UI_logs()
                    api = self.rest.baseUrl + 'nodeStatuses'
                    status, content, header = self.rest._http_request(api)
                    json_parsed = json.loads(content)
                    self.log.info("nodeStatuses: {0}".format(json_parsed))
                    self.fail(
                        "node status is not unhealthy even after waiting for 5 minutes"
                    )
            # verify the failover type
            if self.check_verify_failover_type:
                graceful_count, graceful_failover = self.verify_failover_type(
                    node, graceful_count, self.num_replicas, unreachable)
            # define precondition check for failover
            success_failed_over = self.rest.fail_over(
                node.id, graceful=(self.graceful and graceful_failover))
            if self.graceful and graceful_failover:
                if self.stopGracefulFailover or self.killNodes or self.stopNodes or self.firewallOnNodes:
                    self.victim_node_operations(node)
                    # Start Graceful Again
                    self.log.info(" Start Graceful Failover Again !")
                    self.sleep(60)
                    success_failed_over = self.rest.fail_over(
                        node.id,
                        graceful=(self.graceful and graceful_failover))
                    msg = "graceful failover failed for nodes {0}".format(
                        node.id)
                    self.assertTrue(
                        self.rest.monitorRebalance(stop_if_loop=True), msg=msg)
                else:
                    msg = "rebalance failed while removing failover nodes {0}".format(
                        node.id)
                    self.assertTrue(
                        self.rest.monitorRebalance(stop_if_loop=True), msg=msg)
            failed_over = failed_over and success_failed_over

        # Check for negative cases
        if self.graceful and (failover_reason in ['stop_server', 'firewall']):
            if failed_over:
                # MB-10479
                self.rest.print_UI_logs()
            self.assertFalse(
                failed_over,
                "Graceful Falover was started for unhealthy node!!! ")
            return
        elif self.gracefulFailoverFail and not failed_over:
            """ Check if the fail_over fails as expected """
            self.assertFalse(
                failed_over,
                """ Graceful failover should fail due to not enough replicas """
            )
            return

        # Check if failover happened as expected or re-try one more time
        if not failed_over:
            self.log.info(
                "unable to failover the node the first time. try again in  60 seconds.."
            )
            # try again in 75 seconds
            self.sleep(75)
            failed_over = self.rest.fail_over(node.id,
                                              graceful=(self.graceful
                                                        and graceful_failover))
        if self.graceful and (failover_reason
                              not in ['stop_server', 'firewall']):
            reached = RestHelper(self.rest).rebalance_reached()
            self.assertTrue(
                reached,
                "rebalance failed for Graceful Failover, stuck or did not completed"
            )

        # Verify Active and Replica Bucket Count
        if self.num_replicas > 0:
            nodes = self.filter_servers(self.servers, chosen)
            self.vb_distribution_analysis(servers=nodes,
                                          buckets=self.buckets,
                                          std=20.0,
                                          total_vbuckets=self.total_vbuckets,
                                          type="failover",
                                          graceful=(self.graceful
                                                    and graceful_failover))
예제 #33
0
class auditCLITest(CliBaseTest):

    def setUp(self):
        super(auditCLITest, self).setUp()
        self.enableStatus = self.input.param("enableStatus", None)
        self.logPath = self.input.param("logPath", None)
        self.rotateInt = self.input.param("rotateInt", None)
        self.errorMsg = self.input.param("errorMsg", None)
        self.ldapUser = self.input.param('ldapUser', 'Administrator')
        self.ldapPass = self.input.param('ldapPass', 'password')
        self.source = self.input.param('source', None)
        self.shell = RemoteMachineShellConnection(self.master)
        info = self.shell.extract_remote_info()
        type = info.type.lower()
        if type == 'windows' and self.source == 'saslauthd':
            raise Exception(" Ldap Tests cannot run on windows");
        elif self.source == 'saslauthd':
                rest = RestConnection(self.master)
                self.setupLDAPSettings(rest)
                #rest.ldapUserRestOperation(True, [[self.ldapUser]], exclude=None)
                self.set_user_role(rest,self.ldapUser)

    def tearDown(self):
        super(auditCLITest, self).tearDown()

    def set_user_role(self,rest,username,user_role='admin'):
        payload = "name=" + username + "&roles=" + user_role
        status, content, header =  rest._set_user_roles(rest,user_name=username,payload=payload)

    def setupLDAPSettings (self,rest):
        api = rest.baseUrl + 'settings/saslauthdAuth'
        params = urllib.urlencode({"enabled":'true',"admins":[],"roAdmins":[]})
        status, content, header = rest._http_request(api, 'POST', params)
        return status, content, header

    def returnBool(self, boolString):
        if boolString in ('True', True, 'true'):
            return 1
        else:
            return 0

    def returnBoolVal(self, boolVal):
        if boolVal == 1:
            return 'true'
        else:
            return 'false'



    def test_enableDisableAudit(self):
        auditIns = audit(host=self.master)
        remote_client = RemoteMachineShellConnection(self.master)
        tempEnable = auditIns.getAuditStatus()
        try:
            cli_command = 'setting-audit'
            options = "--audit-enable=0"
            output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \
                        options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
            tempEnable = auditIns.getAuditStatus()
            self.assertFalse(tempEnable, "Issues enable/disable via CLI")
            options = "--audit-enable=1"
            output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \
                        options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
            tempEnable = auditIns.getAuditStatus()
            self.assertTrue(tempEnable, "Issues enable/disable via CLI")
        finally:
            auditIns.setAuditEnable(self.returnBoolVal(tempEnable))

    def test_setAuditParam(self):
        auditIns = audit(host=self.master)
        tempEnable = auditIns.getAuditStatus()
        tempLogPath = auditIns.getAuditLogPath()
        tempRotateInt = auditIns.getAuditRotateInterval()
        try:
            remote_client = RemoteMachineShellConnection(self.master)
            cli_command = "setting-audit"
            options = " --audit-enable={0}".format(self.enableStatus)
            options += " --audit-log-rotate-interval={0}".format(self.rotateInt)
            options += " --audit-log-path={0}".format(self.logPath)
            output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \
                        options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
            temp_rotate_int = self.rotateInt*60
            tempFlag = self.validateSettings(self.enableStatus, self.logPath, temp_rotate_int)
            self.assertTrue(tempFlag)
        finally:
            auditIns.setAuditEnable(self.returnBoolVal(tempEnable))
            auditIns.setAuditLogPath(tempLogPath)
            auditIns.setAuditRotateInterval(tempRotateInt)


    def validateSettings(self, status, log_path, rotate_interval):
        auditIns = audit(host=self.master)
        tempLogPath = (auditIns.getAuditLogPath())[:-1]
        tempStatus = auditIns.getAuditStatus()
        tempRotateInt = auditIns.getAuditRotateInterval()
        flag = True

        if (status != self.returnBool(tempStatus)):
            self.log.info ("Mismatch with status - Expected - {0} -- Actual - {1}".format(status, tempStatus))
            flag = False

        if (log_path != tempLogPath):
            self.log.info ("Mismatch with log path - Expected - {0} -- Actual - {1}".format(log_path, tempLogPath))
            flag = False

        if (rotate_interval != tempRotateInt):
            self.log.info ("Mismatch with rotate interval - Expected - {0} -- Actual - {1}".format(rotate_interval, tempRotateInt))
            flag = False
        return flag
예제 #34
0
    def _install_and_upgrade(self,
                             initial_version='1.6.5.3',
                             create_buckets=False,
                             insert_data=False,
                             start_upgraded_first=True,
                             load_ratio=-1,
                             roll_upgrade=False,
                             upgrade_path=[],
                             do_new_rest=False):
        node_upgrade_path = []
        node_upgrade_path.extend(upgrade_path)
        #then start them in whatever order you want
        inserted_keys = []
        log = logger.Logger.get_logger()
        if roll_upgrade:
            log.info("performing an online upgrade")
        input = TestInputSingleton.input
        rest_settings = input.membase_settings
        servers = input.servers
        save_upgrade_config = False
        is_amazon = False
        if input.test_params.get('amazon', False):
            is_amazon = True
        if initial_version.startswith("1.6") or initial_version.startswith(
                "1.7"):
            product = 'membase-server-enterprise'
        else:
            product = 'couchbase-server-enterprise'
        # install older build on all nodes
        for server in servers:
            remote = RemoteMachineShellConnection(server)
            info = remote.extract_remote_info()
            # check to see if we are installing from latestbuilds or releases
            # note: for newer releases (1.8.0) even release versions can have the
            #  form 1.8.0r-55
            if re.search('r', initial_version):
                builds, changes = BuildQuery().get_all_builds()
                older_build = BuildQuery().find_membase_build(
                    builds,
                    deliverable_type=info.deliverable_type,
                    os_architecture=info.architecture_type,
                    build_version=initial_version,
                    product=product,
                    is_amazon=is_amazon)

            else:
                older_build = BuildQuery().find_membase_release_build(
                    deliverable_type=info.deliverable_type,
                    os_architecture=info.architecture_type,
                    build_version=initial_version,
                    product=product,
                    is_amazon=is_amazon)

            remote.membase_uninstall()
            remote.couchbase_uninstall()
            remote.stop_membase()
            remote.stop_couchbase()
            remote.download_build(older_build)
            #now let's install ?
            remote.membase_install(older_build)
            rest = RestConnection(server)
            RestHelper(rest).is_ns_server_running(
                testconstants.NS_SERVER_TIMEOUT)
            rest.init_cluster_port(rest_settings.rest_username,
                                   rest_settings.rest_password)
            rest.init_cluster_memoryQuota(
                memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            remote.disconnect()

        bucket_data = {}
        master = servers[0]
        if create_buckets:
            #let's create buckets
            #wait for the bucket
            #bucket port should also be configurable , pass it as the
            #parameter to this test ? later

            self._create_default_bucket(master)
            inserted_keys = self._load_data(master, load_ratio)
            _create_load_multiple_bucket(self, master, bucket_data, howmany=2)

        # cluster all the nodes together
        ClusterOperationHelper.add_all_nodes_or_assert(master, servers,
                                                       rest_settings, self)
        rest = RestConnection(master)
        nodes = rest.node_statuses()
        otpNodeIds = []
        for node in nodes:
            otpNodeIds.append(node.id)
        rebalanceStarted = rest.rebalance(otpNodeIds, [])
        self.assertTrue(
            rebalanceStarted,
            "unable to start rebalance on master node {0}".format(master.ip))
        log.info('started rebalance operation on master node {0}'.format(
            master.ip))
        rebalanceSucceeded = rest.monitorRebalance()
        self.assertTrue(
            rebalanceSucceeded,
            "rebalance operation for nodes: {0} was not successful".format(
                otpNodeIds))

        if initial_version == "1.7.0" or initial_version == "1.7.1":
            self._save_config(rest_settings, master)

        input_version = input.test_params['version']
        node_upgrade_path.append(input_version)
        current_version = initial_version
        previous_version = current_version
        #if we dont want to do roll_upgrade ?
        log.info("Upgrade path: {0} -> {1}".format(initial_version,
                                                   node_upgrade_path))
        log.info("List of servers {0}".format(servers))
        if not roll_upgrade:
            for version in node_upgrade_path:
                previous_version = current_version
                current_version = version
                if version != initial_version:
                    log.info("Upgrading to version {0}".format(version))
                    self._stop_membase_servers(servers)
                    if previous_version.startswith(
                            "1.7") and current_version.startswith("1.8"):
                        save_upgrade_config = True
                    # No need to save the upgrade config from 180 to 181
                    if previous_version.startswith(
                            "1.8.0") and current_version.startswith("1.8.1"):
                        save_upgrade_config = False
                    appropriate_build = _get_build(servers[0],
                                                   version,
                                                   is_amazon=is_amazon)
                    self.assertTrue(
                        appropriate_build.url,
                        msg="unable to find build {0}".format(version))
                    for server in servers:
                        remote = RemoteMachineShellConnection(server)
                        remote.download_build(appropriate_build)
                        remote.membase_upgrade(
                            appropriate_build,
                            save_upgrade_config=save_upgrade_config)
                        RestHelper(
                            RestConnection(server)).is_ns_server_running(
                                testconstants.NS_SERVER_TIMEOUT)

                        #verify admin_creds still set
                        pools_info = RestConnection(server).get_pools_info()
                        self.assertTrue(pools_info['implementationVersion'],
                                        appropriate_build.product_version)

                        if start_upgraded_first:
                            log.info("Starting server {0} post upgrade".format(
                                server))
                            remote.start_membase()
                        else:
                            remote.stop_membase()

                        remote.disconnect()
                    if not start_upgraded_first:
                        log.info("Starting all servers together")
                        self._start_membase_servers(servers)
                    time.sleep(TIMEOUT_SECS)
                    if version == "1.7.0" or version == "1.7.1":
                        self._save_config(rest_settings, master)

                    if create_buckets:
                        self.assertTrue(
                            BucketOperationHelper.wait_for_bucket_creation(
                                'default', RestConnection(master)),
                            msg="bucket 'default' does not exist..")
                    if insert_data:
                        self._verify_data(master, rest, inserted_keys)

        # rolling upgrade
        else:
            version = input.test_params['version']
            appropriate_build = _get_build(servers[0],
                                           version,
                                           is_amazon=is_amazon)
            self.assertTrue(appropriate_build.url,
                            msg="unable to find build {0}".format(version))
            # rebalance node out
            # remove membase from node
            # install destination version onto node
            # rebalance it back into the cluster
            for server_index in range(len(servers)):
                server = servers[server_index]
                master = servers[server_index - 1]
                log.info("current master is {0}, rolling node is {1}".format(
                    master, server))

                rest = RestConnection(master)
                nodes = rest.node_statuses()
                allNodes = []
                toBeEjectedNodes = []
                for node in nodes:
                    allNodes.append(node.id)
                    if "{0}:{1}".format(node.ip,
                                        node.port) == "{0}:{1}".format(
                                            server.ip, server.port):
                        toBeEjectedNodes.append(node.id)
                helper = RestHelper(rest)
                removed = helper.remove_nodes(knownNodes=allNodes,
                                              ejectedNodes=toBeEjectedNodes)
                self.assertTrue(
                    removed,
                    msg="Unable to remove nodes {0}".format(toBeEjectedNodes))
                remote = RemoteMachineShellConnection(server)
                remote.download_build(appropriate_build)
                # if initial version is 180
                # Don't uninstall the server
                if not initial_version.startswith('1.8.0'):
                    remote.membase_uninstall()
                    remote.couchbase_uninstall()
                    remote.membase_install(appropriate_build)
                else:
                    remote.membase_upgrade(appropriate_build)

                RestHelper(rest).is_ns_server_running(
                    testconstants.NS_SERVER_TIMEOUT)
                log.info(
                    "sleep for 10 seconds to wait for membase-server to start..."
                )
                time.sleep(TIMEOUT_SECS)
                rest.init_cluster_port(rest_settings.rest_username,
                                       rest_settings.rest_password)
                rest.init_cluster_memoryQuota(
                    memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                remote.disconnect()

                #readd this to the cluster
                ClusterOperationHelper.add_all_nodes_or_assert(
                    master, [server], rest_settings, self)
                nodes = rest.node_statuses()
                otpNodeIds = []
                for node in nodes:
                    otpNodeIds.append(node.id)
                # Issue rest call to the newly added node
                # MB-5108
                if do_new_rest:
                    master = server
                    rest = RestConnection(master)
                rebalanceStarted = rest.rebalance(otpNodeIds, [])
                self.assertTrue(
                    rebalanceStarted,
                    "unable to start rebalance on master node {0}".format(
                        master.ip))
                log.info(
                    'started rebalance operation on master node {0}'.format(
                        master.ip))
                rebalanceSucceeded = rest.monitorRebalance()
                self.assertTrue(
                    rebalanceSucceeded,
                    "rebalance operation for nodes: {0} was not successful".
                    format(otpNodeIds))

            #TODO: how can i verify that the cluster init config is preserved
            # verify data on upgraded nodes
            if create_buckets:
                self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                    'default', RestConnection(master)),
                                msg="bucket 'default' does not exist..")
            if insert_data:
                self._verify_data(master, rest, inserted_keys)
                rest = RestConnection(master)
                buckets = rest.get_buckets()
                for bucket in buckets:
                    BucketOperationHelper.keys_exist_or_assert(
                        bucket_data[bucket.name]["inserted_keys"], master,
                        bucket.name, self)
예제 #35
0
class AltAddrBaseTest(BaseTestCase):
    vbucketId = 0

    def setUp(self):
        self.times_teardown_called = 1
        super(AltAddrBaseTest, self).setUp()
        self.r = random.Random()
        self.cluster = Cluster()
        self.clusters_dic = self.input.clusters
        self.client_os = self.input.param("client_os", "linux")
        self.alt_addr_with_xdcr = self.input.param("alt_addr_with_xdcr", False)
        if self.clusters_dic:
            if len(self.clusters_dic) > 1:
                self.dest_nodes = self.clusters_dic[1]
                self.dest_master = self.dest_nodes[0]
            elif len(self.clusters_dic) == 1:
                self.log.error(
                    "=== need 2 cluster to setup xdcr in ini file ===")
            if self.alt_addr_with_xdcr:
                self.des_name = "des_cluster"
                self.delete_xdcr_reference(self.clusters_dic[0][0].ip,
                                           self.clusters_dic[1][0].ip)
                if self.skip_init_check_cbserver:
                    for key in self.clusters_dic.keys():
                        servers = self.clusters_dic[key]
                        try:
                            self.backup_reset_clusters(servers)
                        except:
                            self.log.error(
                                "was not able to cleanup cluster the first time"
                            )
                            self.backup_reset_clusters(servers)
        else:
            self.log.error("**** Cluster config is setup in ini file. ****")

        self.shell = RemoteMachineShellConnection(self.master)
        if not self.skip_init_check_cbserver:
            self.rest = RestConnection(self.master)
            self.cb_version = self.rest.get_nodes_version()

        self.key_gen = self.input.param("key-gen", True)
        self.secure_conn = self.input.param("secure-conn", False)
        self.no_cacert = self.input.param("no-cacert", False)
        self.no_ssl_verify = self.input.param("no-ssl-verify", False)
        self.verify_data = self.input.param("verify-data", False)
        self.debug_logs = self.input.param("debug-logs", False)
        self.should_fail = self.input.param("should-fail", False)
        self.add_hostname_node = self.input.param("add_hostname_node", False)
        self.add_hostname_node_at_src = self.input.param(
            "add_hostname_node_at_src", False)
        self.add_hostname_node_at_des = self.input.param(
            "add_hostname_node_at_des", False)
        self.num_hostname_add = self.input.param("num_hostname_add", 1)
        self.alt_addr_services_in = self.input.param("alt_addr_services_in",
                                                     "kv")
        self.alt_addr_rebalance_out = self.input.param(
            "alt_addr_rebalance_out", False)
        self.alt_addr_rebalance_in = self.input.param("alt_addr_rebalance_in",
                                                      False)
        self.alt_addr_rebalance_in_services = self.input.param(
            "alt_addr_rebalance_in_services", "kv")
        self.alt_addr_use_public_dns = self.input.param(
            "alt_addr_use_public_dns", False)
        self.alt_addr_kv_loader = self.input.param("alt_addr_kv_loader", False)
        self.alt_addr_n1ql_query = self.input.param("alt_addr_n1ql_query",
                                                    False)
        self.alt_addr_eventing_function = self.input.param(
            "alt_addr_eventing_function", False)
        self.alt_addr_fts_loader = self.input.param("alt_addr_fts_loader",
                                                    False)
        self.run_alt_addr_loader = self.input.param("run_alt_addr_loader",
                                                    False)
        self.all_alt_addr_set = False

        info = self.shell.extract_remote_info()
        self.os_version = info.distribution_version.lower()
        self.deliverable_type = info.deliverable_type.lower()
        type = info.type.lower()
        self.excluded_commands = self.input.param("excluded_commands", None)
        self.os = 'linux'
        self.full_v = None
        self.short_v = None
        self.build_number = None
        cmd = 'curl -g {0}:8091/diag/eval -u {1}:{2} '.format(
            self.master.ip, self.master.rest_username,
            self.master.rest_password)
        cmd += '-d "path_config:component_path(bin)."'
        bin_path = check_output(cmd, shell=True)
        if "bin" not in bin_path:
            self.fail("Check if cb server install on %s" % self.master.ip)
        else:
            self.cli_command_path = bin_path.replace('"', '') + "/"
        self.root_path = LINUX_ROOT_PATH
        self.tmp_path = "/tmp/"
        self.tmp_path_raw = "/tmp/"
        self.cmd_ext = ""
        self.src_file = ""
        self.des_file = ""
        self.log_path = LINUX_COUCHBASE_LOGS_PATH
        self.base_cb_path = LINUX_CB_PATH
        """ non root path """
        if self.nonroot:
            self.log_path = "/home/%s%s" % (self.master.ssh_username,
                                            LINUX_COUCHBASE_LOGS_PATH)
            self.base_cb_path = "/home/%s%s" % (self.master.ssh_username,
                                                LINUX_CB_PATH)
            self.root_path = "/home/%s/" % self.master.ssh_username
        if type == 'windows':
            self.os = 'windows'
            self.cmd_ext = ".exe"
            self.root_path = WIN_ROOT_PATH
            self.tmp_path = WIN_TMP_PATH
            self.tmp_path_raw = WIN_TMP_PATH_RAW
            win_format = "C:/Program Files"
            cygwin_format = "/cygdrive/c/Program\ Files"
            if win_format in self.cli_command_path:
                self.cli_command_path = self.cli_command_path.replace(
                    win_format, cygwin_format)
            self.base_cb_path = WIN_CB_PATH
        if info.distribution_type.lower() == 'mac':
            self.os = 'mac'
        self.full_v, self.short_v, self.build_number = self.shell.get_cbversion(
            type)
        self.couchbase_usrname = "%s" % (
            self.input.membase_settings.rest_username)
        self.couchbase_password = "******" % (
            self.input.membase_settings.rest_password)
        self.cb_login_info = "%s:%s" % (self.couchbase_usrname,
                                        self.couchbase_password)
        self.path_type = self.input.param("path_type", None)
        if self.path_type is None:
            self.log.info("Test command with absolute path ")
        elif self.path_type == "local":
            self.log.info("Test command at %s dir " % self.cli_command_path)
            self.cli_command_path = "cd %s; ./" % self.cli_command_path
        self.cli_command = self.input.param("cli_command", None)

        self.start_with_cluster = self.input.param("start_with_cluster", True)
        if str(self.__class__).find(
                'couchbase_clitest.CouchbaseCliTest') == -1:
            if len(self.servers) > 1 and int(
                    self.nodes_init) == 1 and self.start_with_cluster:
                servers_in = [
                    self.servers[i + 1] for i in range(self.num_servers - 1)
                ]
                self.cluster.rebalance(self.servers[:1], servers_in, [])
        for bucket in self.buckets:
            testuser = [{
                'id': bucket.name,
                'name': bucket.name,
                'password': '******'
            }]
            rolelist = [{
                'id': bucket.name,
                'name': bucket.name,
                'roles': 'admin'
            }]
            self.add_built_in_server_user(testuser=testuser, rolelist=rolelist)

    def tearDown(self):
        self.times_teardown_called += 1
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)
        self.clusters_dic = self.input.clusters
        if self.clusters_dic:
            if len(self.clusters_dic) > 1:
                self.dest_nodes = self.clusters_dic[1]
                self.dest_master = self.dest_nodes[0]
                if self.dest_nodes and len(self.dest_nodes) > 1:
                    self.log.info(
                        "======== clean up destination cluster =======")
                    rest = RestConnection(self.dest_nodes[0])
                    rest.remove_all_remote_clusters()
                    rest.remove_all_replications()
                    BucketOperationHelper.delete_all_buckets_or_assert(
                        self.dest_nodes, self)
                    ClusterOperationHelper.cleanup_cluster(self.dest_nodes)
            elif len(self.clusters_dic) == 1:
                self.log.error(
                    "=== need 2 cluster to setup xdcr in ini file ===")
        else:
            self.log.info(
                "**** If run xdcr test, need cluster config is setup in ini file. ****"
            )
        super(AltAddrBaseTest, self).tearDown()

    def _list_compare(self, list1, list2):
        if len(list1) != len(list2):
            return False
        for elem1 in list1:
            found = False
            for elem2 in list2:
                if elem1 == elem2:
                    found = True
                    break
            if not found:
                return False
        return True

    def get_internal_IP(self, server):
        shell = RemoteMachineShellConnection(server)
        internal_IP = shell.get_ip_address()
        internal_IP = [x for x in internal_IP if x != "127.0.0.1"]
        shell.disconnect()
        if internal_IP:
            return internal_IP[0]
        else:
            self.fail("Fail to get internal IP")

    def backup_reset_clusters(self, servers):
        BucketOperationHelper.delete_all_buckets_or_assert(servers, self)
        ClusterOperationHelper.cleanup_cluster(servers, master=servers[0])
        #ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, self)

    def get_external_IP(self, internal_IP):
        found = False
        external_IP = ""
        for server in self.servers:
            internalIP = self.get_internal_IP(server)
            if internal_IP == internalIP:
                found = True
                external_IP = server.ip
                break
        if not found:
            self.fail("Could not find server which matches internal IP")
        else:
            return external_IP

    def setup_xdcr_cluster(self):
        if not self.input.clusters[0] and not self.input.clusters[1]:
            self.fail("This test needs ini set with cluster config")
        self.log.info("Create source cluster")
        self.create_xdcr_cluster(self.input.clusters[0])
        self.log.info("Create destination cluster")
        self.create_xdcr_cluster(self.input.clusters[1])

    def create_xdcr_cluster(self, cluster_servers):
        num_hostname_add = 1
        add_host_name = False
        if self.add_hostname_node_at_src:
            add_host_name = True
        if self.add_hostname_node_at_des:
            add_host_name = True
        shell = RemoteMachineShellConnection(cluster_servers[0])
        services_in = self.alt_addr_services_in
        if "-" in services_in:
            set_services = services_in.split("-")
        else:
            set_services = services_in.split(",")

        for server in cluster_servers[1:]:
            add_node_IP = self.get_internal_IP(server)
            node_services = "kv"
            if len(set_services) == 1:
                node_services = set_services[0]
            elif len(set_services) > 1:
                if len(set_services) == len(cluster_servers):
                    node_services = set_services[i]
                    i += 1
            if add_host_name and num_hostname_add <= self.num_hostname_add:
                add_node_IP = server.ip
                num_hostname_add += 1

            try:
                shell.alt_addr_add_node(main_server=cluster_servers[0],
                                        internal_IP=add_node_IP,
                                        server_add=server,
                                        services=node_services,
                                        cmd_ext=self.cmd_ext)
            except Exception as e:
                if e:
                    self.fail("Error: {0}".format(e))
        rest = RestConnection(cluster_servers[0])
        rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
                       ejectedNodes=[])
        rest.monitorRebalance()

    def create_xdcr_reference(self, src_IP, des_IP):
        cmd = "curl -u Administrator:password "
        cmd += "http://{0}:8091/pools/default/remoteClusters ".format(src_IP)
        cmd += "-d username=Administrator -d password=password "
        cmd += "-d name={0} -d demandEncryption=0 ".format(self.des_name)
        cmd += "-d hostname={0}:8091 ".format(des_IP)

        mesg = "\n **** Create XDCR cluster remote reference from cluster {0} ".format(
            src_IP)
        mesg += "to cluster {0}".format(des_IP)
        self.log.info(mesg)
        print("command to run: {0}".format(cmd))
        try:
            output = check_output(cmd, shell=True, stderr=STDOUT)
        except CalledProcessError as e:
            if e.output:
                self.fail("\n Error: ".format(e.output))

    def delete_xdcr_reference(self, src_IP, des_IP):
        cmd = "curl -X DELETE -u Administrator:password "
        cmd += "http://{0}:8091/pools/default/remoteClusters/{1}".format(
            src_IP, self.des_name)
        print("command to run: {0}".format(cmd))
        try:
            output = check_output(cmd, shell=True, stderr=STDOUT)
        except CalledProcessError as e:
            if e.output:
                self.fail("Error: ".format(e.output))

    def create_xdcr_replication(self, src_IP, des_IP, bucket_name):
        cmd = "curl -X POST -u Administrator:password "
        cmd += "http://{0}:8091/controller/createReplication ".format(src_IP)
        cmd += "-d fromBucket={0} ".format(bucket_name)
        cmd += "-d toCluster={0} ".format(self.des_name)
        cmd += "-d toBucket={0} ".format(bucket_name)
        cmd += "-d replicationType=continuous -d enableCompression=1 "
        print("command to run: {0}".format(cmd))
        try:
            output = check_output(cmd, shell=True, stderr=STDOUT)
            return output
        except CalledProcessError as e:
            if e.output:
                self.fail("Error: ".format(e.output))

    def delete_xdcr_replication(self, src_IP, replication_id):
        replication_id = urllib.quote(replication_id, safe='')
        cmd = "curl -X DELETE -u Administrator:password "
        cmd += " http://{0}:8091/controller/cancelXDCR/{1} ".format(
            src_IP, replication_id)
        print("command to run: {0}".format(cmd))
        try:
            output = check_output(cmd, shell=True, stderr=STDOUT)
        except CalledProcessError as e:
            if e.output:
                self.fail("Error: ".format(e.output))

    def set_xdcr_checkpoint(self, src_IP, check_time):
        cmd = "curl  -u Administrator:password "
        cmd += "http://{0}:8091/settings/replications ".format(src_IP)
        cmd += "-d goMaxProcs=10 "
        cmd += "-d checkpointInterval={0} ".format(check_time)
        print("command to run: {0}".format(cmd))
        try:
            self.log.info(
                "Set xdcr checkpoint to {0} seconds".format(check_time))
            output = check_output(cmd, shell=True, stderr=STDOUT)
        except CalledProcessError as e:
            if e.output:
                self.fail("Error: ".format(e.output))

    def waitForItemCount(self, server, bucket_name, count, timeout=30):
        rest = RestConnection(server)
        for sec in range(timeout):
            items = int(
                rest.get_bucket_json(bucket_name)["basicStats"]["itemCount"])
            if items != count:
                time.sleep(1)
            else:
                return True
        log.info("Waiting for item count to be %d timed out", count)
        return False

    def _check_output(self, word_check, output):
        found = False
        if len(output) >= 1:
            if isinstance(word_check, list):
                for ele in word_check:
                    for x in output:
                        if ele.lower() in x.lower():
                            self.log.info(
                                "Found '{0} in CLI output".format(ele))
                            found = True
                            break
            elif isinstance(word_check, str):
                for x in output:
                    if word_check.lower() in x.lower():
                        self.log.info(
                            "Found '{0}' in CLI output".format(word_check))
                        found = True
                        break
            else:
                self.log.error("invalid {0}".format(word_check))
        return found
예제 #36
0
class AnalyticsHelper():
    def __init__(self,
                 version=None,
                 master=None,
                 shell=None,
                 use_rest=None,
                 max_verify=0,
                 buckets=[],
                 item_flag=0,
                 analytics_port=8095,
                 n1ql_port=8093,
                 full_docs_list=[],
                 log=None,
                 input=None,
                 database=None):
        self.version = version
        self.shell = shell
        self.n1ql_port = n1ql_port
        self.max_verify = max_verify
        self.buckets = buckets
        self.item_flag = item_flag
        self.analytics_port = analytics_port
        self.input = input
        self.log = log
        self.use_rest = True
        self.full_docs_list = full_docs_list
        self.master = master
        self.database = database
        if self.full_docs_list and len(self.full_docs_list) > 0:
            self.gen_results = TuqGenerators(self.log, self.full_docs_list)

    def killall_tuq_process(self):
        self.shell.execute_command("killall cbq-engine")
        self.shell.execute_command("killall tuqtng")
        self.shell.execute_command("killall indexer")

    def run_query_from_template(self, query_template):
        self.query = self.gen_results.generate_query(query_template)
        expected_result = self.gen_results.generate_expected_result()
        actual_result = self.run_analytics_query()
        return actual_result, expected_result

    def run_analytics_query(self,
                            query=None,
                            min_output_size=10,
                            server=None,
                            query_params={},
                            is_prepared=False,
                            scan_consistency=None,
                            scan_vector=None,
                            verbose=False):
        if query is None:
            query = self.query
        if server is None:
            server = self.master
            if server.ip == "127.0.0.1":
                self.analytics_port = server.analytics_port
        else:
            if server.ip == "127.0.0.1":
                self.analytics_port = server.analytics_port
            if self.input.tuq_client and "client" in self.input.tuq_client:
                server = self.tuq_client
        if self.analytics_port == None or self.analytics_port == '':
            self.analytics_port = self.input.param("analytics_port", 8095)
            if not self.analytics_port:
                self.log.info(
                    " analytics_port is not defined, processing will not proceed further"
                )
                raise Exception(
                    "analytics_port is not defined, processing will not proceed further"
                )
        if self.use_rest:
            query_params = {}
            if scan_consistency:
                query_params['scan_consistency'] = scan_consistency
            if scan_vector:
                query_params['scan_vector'] = str(scan_vector).replace(
                    "'", '"')
            if verbose:
                self.log.info('RUN QUERY %s' % query)
            query = query + ";"
            if "USE INDEX" in query:
                query = query.replace("USE INDEX(`#primary` USING GSI)", " ")
            for bucket in self.buckets:
                query = query.replace(bucket.name + " ",
                                      bucket.name + "_shadow ")

            self.log.info(" CBAS QUERY :: {0}".format(query))
            result = RestConnection(server).analytics_tool(
                query,
                self.analytics_port,
                query_params=query_params,
                verbose=verbose)

        if isinstance(result, str) or 'errors' in result:
            error_result = str(result)
            length_display = len(error_result)
            if length_display > 500:
                error_result = error_result[:500]
            raise CBQError(error_result, server.ip)
        self.log.info("TOTAL ELAPSED TIME: %s" %
                      result["metrics"]["elapsedTime"])
        return result

    def _verify_results(self,
                        actual_result,
                        expected_result,
                        missing_count=1,
                        extra_count=1):
        self.log.info(" Analyzing Actual Result")
        actual_result = self._gen_dict(actual_result)
        self.log.info(" Analyzing Expected Result")
        expected_result = self._gen_dict(expected_result)
        if len(actual_result) != len(expected_result):
            raise Exception(
                "Results are incorrect.Actual num %s. Expected num: %s.\n" %
                (len(actual_result), len(expected_result)))
        msg = "The number of rows match but the results mismatch, please check"
        if actual_result != expected_result:
            raise Exception(msg)

    def _verify_results_rqg_new(self,
                                n1ql_result=[],
                                sql_result=[],
                                hints=["a1"]):
        new_n1ql_result = []
        for result in n1ql_result:
            if result != {}:
                for key in list(result.keys()):
                    if key.find('_shadow') != -1:
                        new_n1ql_result.append(result[key])
                    else:
                        new_n1ql_result.append(result)
                        break
        n1ql_result = new_n1ql_result
        if self._is_function_in_result(hints):
            return self._verify_results_rqg_for_function(
                n1ql_result, sql_result)
        check = self._check_sample(n1ql_result, hints)
        actual_result = n1ql_result
        if actual_result == [{}]:
            actual_result = []
        if check:
            actual_result = self._gen_dict(n1ql_result)
        actual_result = sorted(actual_result)
        expected_result = sorted(sql_result)
        if len(actual_result) != len(expected_result):
            extra_msg = self._get_failure_message(expected_result,
                                                  actual_result)
            raise Exception(
                "Results are incorrect.Actual num %s. Expected num: %s.:: %s \n"
                % (len(actual_result), len(expected_result), extra_msg))
        msg = "The number of rows match but the results mismatch, please check"
        if self._sort_data(actual_result) != self._sort_data(expected_result):
            extra_msg = self._get_failure_message(expected_result,
                                                  actual_result)
            raise Exception(msg + "\n " + extra_msg)

    def _verify_results_rqg(self, n1ql_result=[], sql_result=[], hints=["a1"]):
        new_n1ql_result = []
        for result in n1ql_result:
            if result != {}:
                new_n1ql_result.append(result)
        n1ql_result = new_n1ql_result
        if self._is_function_in_result(hints):
            return self._verify_results_rqg_for_function(
                n1ql_result, sql_result)
        check = self._check_sample(n1ql_result, hints)
        actual_result = n1ql_result
        if actual_result == [{}]:
            actual_result = []
        if check:
            actual_result = self._gen_dict(n1ql_result)
        actual_result = sorted(actual_result)
        expected_result = sorted(sql_result)
        if len(actual_result) != len(expected_result):
            extra_msg = self._get_failure_message(expected_result,
                                                  actual_result)
            raise Exception(
                "Results are incorrect.Actual num %s. Expected num: %s.:: %s \n"
                % (len(actual_result), len(expected_result), extra_msg))
        msg = "The number of rows match but the results mismatch, please check"
        if self._sort_data(actual_result) != self._sort_data(expected_result):
            extra_msg = self._get_failure_message(expected_result,
                                                  actual_result)
            raise Exception(msg + "\n " + extra_msg)

    def _sort_data(self, result):
        new_data = []
        for data in result:
            new_data.append(sorted(data))
        return new_data

    def _verify_results_crud_rqg(self,
                                 n1ql_result=[],
                                 sql_result=[],
                                 hints=["primary_key_id"]):
        new_n1ql_result = []
        for result in n1ql_result:
            if result != {}:
                new_n1ql_result.append(result)
        n1ql_result = new_n1ql_result
        if self._is_function_in_result(hints):
            return self._verify_results_rqg_for_function(
                n1ql_result, sql_result)
        check = self._check_sample(n1ql_result, hints)
        actual_result = n1ql_result
        if actual_result == [{}]:
            actual_result = []
        if check:
            actual_result = self._gen_dict(n1ql_result)
        actual_result = sorted(actual_result)
        expected_result = sorted(sql_result)
        if len(actual_result) != len(expected_result):
            extra_msg = self._get_failure_message(expected_result,
                                                  actual_result)
            raise Exception(
                "Results are incorrect.Actual num %s. Expected num: %s.:: %s \n"
                % (len(actual_result), len(expected_result), extra_msg))
        if not self._result_comparison_analysis(actual_result,
                                                expected_result):
            msg = "The number of rows match but the results mismatch, please check"
            extra_msg = self._get_failure_message(expected_result,
                                                  actual_result)
            raise Exception(msg + "\n " + extra_msg)

    def _get_failure_message(self, expected_result, actual_result):
        if expected_result == None:
            expected_result = []
        if actual_result == None:
            actual_result = []
        len_expected_result = len(expected_result)
        len_actual_result = len(actual_result)
        len_expected_result = min(5, len_expected_result)
        len_actual_result = min(5, len_actual_result)
        extra_msg = "mismatch in results :: expected :: {0}, actual :: {1} ".format(
            expected_result[0:len_expected_result],
            actual_result[0:len_actual_result])
        return extra_msg

    def _result_comparison_analysis(self, expected_result, actual_result):
        expected_map = {}
        actual_map = {}
        for data in expected_result:
            primary = None
            for key in list(data.keys()):
                keys = key
                if keys.encode('ascii') == "primary_key_id":
                    primary = keys
            expected_map[data[primary]] = data
        for data in actual_result:
            primary = None
            for key in list(data.keys()):
                keys = key
                if keys.encode('ascii') == "primary_key_id":
                    primary = keys
            actual_map[data[primary]] = data
        check = True
        for key in list(expected_map.keys()):
            if sorted(actual_map[key]) != sorted(expected_map[key]):
                check = False
        return check

    def _analyze_for_special_case_using_func(self, expected_result,
                                             actual_result):
        if expected_result == None:
            expected_result = []
        if actual_result == None:
            actual_result = []
        if len(expected_result) == 1:
            value = list(expected_result[0].values())[0]
            if value == None or value == 0:
                expected_result = []
        if len(actual_result) == 1:
            value = list(actual_result[0].values())[0]
            if value == None or value == 0:
                actual_result = []
        return expected_result, actual_result

    def _is_function_in_result(self, result):
        if result == "FUN":
            return True
        return False

    def _verify_results_rqg_for_function(self,
                                         n1ql_result=[],
                                         sql_result=[],
                                         hints=["a1"]):
        actual_count = -1
        expected_count = -1
        actual_result = n1ql_result
        sql_result, actual_result = self._analyze_for_special_case_using_func(
            sql_result, actual_result)
        if len(sql_result) != len(actual_result):
            msg = "the number of results do not match :: expected = {0}, actual = {1}".format(
                len(n1ql_result), len(sql_result))
            extra_msg = self._get_failure_message(sql_result, actual_result)
            raise Exception(msg + "\n" + extra_msg)
        n1ql_result = self._gen_dict_n1ql_func_result(n1ql_result)
        n1ql_result = sorted(n1ql_result)
        sql_result = self._gen_dict_n1ql_func_result(sql_result)
        sql_result = sorted(sql_result)
        if len(sql_result) == 0 and len(actual_result) == 0:
            return
        if sql_result != n1ql_result:
            max = 2
            if len(sql_result) < 5:
                max = len(sql_result)
            msg = "mismatch in results :: expected [0:{0}]:: {1}, actual [0:{0}]:: {2} ".format(
                max, sql_result[0:max], n1ql_result[0:max])
            raise Exception(msg)

    def _convert_to_number(self, val):
        if not isinstance(val, str):
            return val
        value = -1
        try:
            if value == '':
                return 0
            value = int(val.split("(")[1].split(")")[0])
        except Exception as ex:
            self.log.info(ex)
        finally:
            return value

    def analyze_failure(self, actual, expected):
        missing_keys = []
        different_values = []
        for key in list(expected.keys()):
            if key not in list(actual.keys()):
                missing_keys.append(key)
            if expected[key] != actual[key]:
                different_values.append(
                    "for key {0}, expected {1} \n actual {2}".format(
                        key, expected[key], actual[key]))
        self.log.info(missing_keys)
        if (len(different_values) > 0):
            self.log.info(" number of such cases {0}".format(
                len(different_values)))
            self.log.info(" example key {0}".format(different_values[0]))

    def check_missing_and_extra(self, actual, expected):
        missing = []
        extra = []
        for item in actual:
            if not (item in expected):
                extra.append(item)
        for item in expected:
            if not (item in actual):
                missing.append(item)
        return missing, extra

    def build_url(self, version):
        info = self.shell.extract_remote_info()
        type = info.distribution_type.lower()
        if type in ["ubuntu", "centos", "red hat"]:
            url = "https://s3.amazonaws.com/packages.couchbase.com/releases/couchbase-query/dp1/"
            url += "couchbase-query_%s_%s_linux.tar.gz" % (
                version, info.architecture_type)
        #TODO for windows
        return url

    def _restart_indexer(self):
        couchbase_path = "/opt/couchbase/var/lib/couchbase"
        cmd = "rm -f {0}/meta;rm -f /tmp/log_upr_client.sock".format(
            couchbase_path)
        self.shell.execute_command(cmd)

    def _start_command_line_query(self, server):
        self.shell = RemoteMachineShellConnection(server)
        self._set_env_variable(server)
        if self.version == "git_repo":
            os = self.shell.extract_remote_info().type.lower()
            if os != 'windows':
                gopath = testconstants.LINUX_GOPATH
            else:
                gopath = testconstants.WINDOWS_GOPATH
            if self.input.tuq_client and "gopath" in self.input.tuq_client:
                gopath = self.input.tuq_client["gopath"]
            if os == 'windows':
                cmd = "cd %s/src/github.com/couchbase/query/server/main; " % (gopath) +\
                "./cbq-engine.exe -datastore http://%s:%s/ >/dev/null 2>&1 &" %(
                                                                server.ip, server.port)
            else:
                cmd = "cd %s/src/github.com/couchbase/query//server/main; " % (gopath) +\
                "./cbq-engine -datastore http://%s:%s/ >n1ql.log 2>&1 &" %(
                                                                server.ip, server.port)
            self.shell.execute_command(cmd)
        elif self.version == "sherlock":
            os = self.shell.extract_remote_info().type.lower()
            if os != 'windows':
                couchbase_path = testconstants.LINUX_COUCHBASE_BIN_PATH
            else:
                couchbase_path = testconstants.WIN_COUCHBASE_BIN_PATH
            if self.input.tuq_client and "sherlock_path" in self.input.tuq_client:
                couchbase_path = "%s/bin" % self.input.tuq_client[
                    "sherlock_path"]
                print("PATH TO SHERLOCK: %s" % couchbase_path)
            if os == 'windows':
                cmd = "cd %s; " % (couchbase_path) +\
                "./cbq-engine.exe -datastore http://%s:%s/ >/dev/null 2>&1 &" %(
                                                                server.ip, server.port)
            else:
                cmd = "cd %s; " % (couchbase_path) +\
                "./cbq-engine -datastore http://%s:%s/ >n1ql.log 2>&1 &" %(
                                                                server.ip, server.port)
                n1ql_port = self.input.param("n1ql_port", None)
                if server.ip == "127.0.0.1" and server.n1ql_port:
                    n1ql_port = server.n1ql_port
                if n1ql_port:
                    cmd = "cd %s; " % (couchbase_path) +\
                './cbq-engine -datastore http://%s:%s/ -http=":%s">n1ql.log 2>&1 &' %(
                                                                server.ip, server.port, n1ql_port)
            self.shell.execute_command(cmd)
        else:
            os = self.shell.extract_remote_info().type.lower()
            if os != 'windows':
                cmd = "cd /tmp/tuq;./cbq-engine -couchbase http://%s:%s/ >/dev/null 2>&1 &" % (
                    server.ip, server.port)
            else:
                cmd = "cd /cygdrive/c/tuq;./cbq-engine.exe -couchbase http://%s:%s/ >/dev/null 2>&1 &" % (
                    server.ip, server.port)
            self.shell.execute_command(cmd)

    def _parse_query_output(self, output):
        if output.find("cbq>") == 0:
            output = output[output.find("cbq>") + 4:].strip()
        if output.find("tuq_client>") == 0:
            output = output[output.find("tuq_client>") + 11:].strip()
        if output.find("cbq>") != -1:
            output = output[:output.find("cbq>")].strip()
        if output.find("tuq_client>") != -1:
            output = output[:output.find("tuq_client>")].strip()
        return json.loads(output)

    def sort_nested_list(self, result):
        actual_result = []
        for item in result:
            curr_item = {}
            for key, value in item.items():
                if isinstance(value, list) or isinstance(value, set):
                    curr_item[key] = sorted(value)
                else:
                    curr_item[key] = value
            actual_result.append(curr_item)
        return actual_result

    def configure_gomaxprocs(self):
        max_proc = self.input.param("gomaxprocs", None)
        cmd = "export GOMAXPROCS=%s" % max_proc
        for server in self.servers:
            shell_connection = RemoteMachineShellConnection(self.master)
            shell_connection.execute_command(cmd)

    def drop_primary_index(self, using_gsi=True, server=None):
        if server == None:
            server = self.master
        self.log.info("CHECK FOR PRIMARY INDEXES")
        for bucket in self.buckets:
            self.query = "DROP PRIMARY INDEX ON {0}".format(bucket.name)
            if using_gsi:
                self.query += " USING GSI"
            if not using_gsi:
                self.query += " USING VIEW "
            self.log.info(self.query)
            try:
                check = self._is_index_in_list(bucket.name,
                                               "#primary",
                                               server=server)
                if check:
                    self.run_analytics_query(server=server)
            except Exception as ex:
                self.log.error('ERROR during index creation %s' % str(ex))

    def create_primary_index(self, using_gsi=True, server=None):
        if server == None:
            server = self.master
        for bucket in self.buckets:
            self.query = "CREATE PRIMARY INDEX ON %s " % (bucket.name)
            if using_gsi:
                self.query += " USING GSI"
                # if gsi_type == "memdb":
                #     self.query += " WITH {'index_type': 'memdb'}"
            if not using_gsi:
                self.query += " USING VIEW "
            try:
                check = self._is_index_in_list(bucket.name,
                                               "#primary",
                                               server=server)
                if not check:
                    self.run_analytics_query(server=server)
                    check = self.is_index_online_and_in_list(bucket.name,
                                                             "#primary",
                                                             server=server)
                    if not check:
                        raise Exception(
                            " Timed-out Exception while building primary index for bucket {0} !!!"
                            .format(bucket.name))
                else:
                    raise Exception(
                        " Primary Index Already present, This looks like a bug !!!"
                    )
            except Exception as ex:
                self.log.error('ERROR during index creation %s' % str(ex))
                raise ex

    def verify_index_with_explain(self,
                                  actual_result,
                                  index_name,
                                  check_covering_index=False):
        check = True
        if check_covering_index:
            if "covering" in str(actual_result):
                check = True
            else:
                check = False
        if index_name in str(actual_result):
            return True and check
        return False

    def run_query_and_verify_result(self,
                                    server=None,
                                    query=None,
                                    timeout=120.0,
                                    max_try=1,
                                    expected_result=None,
                                    scan_consistency=None,
                                    scan_vector=None,
                                    verify_results=True):
        check = False
        init_time = time.time()
        try_count = 0
        while not check:
            next_time = time.time()
            try:
                actual_result = self.run_analytics_query(
                    query=query,
                    server=server,
                    scan_consistency=scan_consistency,
                    scan_vector=scan_vector)
                if verify_results:
                    self._verify_results(actual_result['results'],
                                         expected_result)
                else:
                    return "ran query with success and validated results", True
                check = True
            except Exception as ex:
                if (next_time - init_time > timeout or try_count >= max_try):
                    return ex, False
            finally:
                try_count += 1
        return "ran query with success and validated results", check

    def run_cbq_query(self,
                      query=None,
                      min_output_size=10,
                      server=None,
                      query_params={},
                      is_prepared=False,
                      scan_consistency=None,
                      scan_vector=None,
                      verbose=True):
        if query is None:
            query = self.query
        if server is None:
            server = self.master
            if server.ip == "127.0.0.1":
                self.n1ql_port = server.n1ql_port
        else:
            if server.ip == "127.0.0.1":
                self.n1ql_port = server.n1ql_port
            if self.input.tuq_client and "client" in self.input.tuq_client:
                server = self.tuq_client
        if self.n1ql_port == None or self.n1ql_port == '':
            self.n1ql_port = self.input.param("n1ql_port", 90)
            if not self.n1ql_port:
                self.log.info(
                    " n1ql_port is not defined, processing will not proceed further"
                )
                raise Exception(
                    "n1ql_port is not defined, processing will not proceed further"
                )
        if self.use_rest:
            query_params = {}
            if scan_consistency:
                query_params['scan_consistency'] = scan_consistency
            if scan_vector:
                query_params['scan_vector'] = str(scan_vector).replace(
                    "'", '"')
            if verbose:
                self.log.info('RUN QUERY %s' % query)
            result = RestConnection(server).query_tool(
                query,
                self.n1ql_port,
                query_params=query_params,
                is_prepared=is_prepared,
                verbose=verbose)
        else:
            # if self.version == "git_repo":
            #     output = self.shell.execute_commands_inside("$GOPATH/src/github.com/couchbaselabs/tuqtng/" +\
            #                                                 "tuq_client/tuq_client " +\
            #                                                 "-engine=http://%s:8093/" % server.ip,
            #                                            subcommands=[query,],
            #                                            min_output_size=20,
            #                                            end_msg='tuq_client>')
            # else:
            #os = self.shell.extract_remote_info().type.lower()
            shell = RemoteMachineShellConnection(server)
            #query = query.replace('"', '\\"')
            #query = query.replace('`', '\\`')
            #if os == "linux":
            cmd = "%s/cbq  -engine=http://%s:8093/" % (
                testconstants.LINUX_COUCHBASE_BIN_PATH, server.ip)
            output = shell.execute_commands_inside(cmd, query, "", "", "", "",
                                                   "")
            print(
                "--------------------------------------------------------------------------------------------------------------------------------"
            )
            print(output)
            result = json.loads(output)
            print(result)
            result = self._parse_query_output(output)
        if isinstance(result, str) or 'errors' in result:
            error_result = str(result)
            length_display = len(error_result)
            if length_display > 500:
                error_result = error_result[:500]
            raise CBQError(error_result, server.ip)
        self.log.info("TOTAL ELAPSED TIME: %s" %
                      result["metrics"]["elapsedTime"])
        return result

    # def is_index_online_and_in_list(self, bucket, index_name, server=None, timeout=600.0):
    #     check = self._is_index_in_list(bucket, index_name, server = server)
    #     init_time = time.time()
    #     while not check:
    #         time.sleep(1)
    #         check = self._is_index_in_list(bucket, index_name, server = server)
    #         next_time = time.time()
    #         if check or (next_time - init_time > timeout):
    #             return check
    #     return check
    #
    # def is_index_ready_and_in_list(self, bucket, index_name, server=None, timeout=600.0):
    #     query = "SELECT * FROM system:indexes where name = \'{0}\'".format(index_name)
    #     if server == None:
    #         server = self.master
    #     init_time = time.time()
    #     check = False
    #     while not check:
    #         res = self.run_analytics_query(query=query, server=server)
    #         for item in res['results']:
    #             if 'keyspace_id' not in item['indexes']:
    #                 check = False
    #             elif item['indexes']['keyspace_id'] == str(bucket) \
    #                     and item['indexes']['name'] == index_name \
    #                     and item['indexes']['state'] == "online":
    #                 check = True
    #         time.sleep(1)
    #         next_time = time.time()
    #         check = check or (next_time - init_time > timeout)
    #     return check

    # def is_index_online_and_in_list_bulk(self, bucket, index_names = [], server = None, index_state = "online", timeout = 600.0):
    #     check, index_names = self._is_index_in_list_bulk(bucket, index_names, server = server, index_state = index_state)
    #     init_time = time.time()
    #     while not check:
    #         check, index_names = self._is_index_in_list_bulk(bucket, index_names, server = server, index_state = index_state)
    #         next_time = time.time()
    #         if check or (next_time - init_time > timeout):
    #             return check
    #     return check
    #
    # def gen_build_index_query(self, bucket = "default", index_list = []):
    #     return "BUILD INDEX on {0}({1}) USING GSI".format(bucket,",".join(index_list))
    #
    # def gen_query_parameter(self, scan_vector = None, scan_consistency = None):
    #     query_params = {}
    #     if scan_vector:
    #         query_params.update("scan_vector", scan_vector)
    #     if scan_consistency:
    #         query_params.update("scan_consistency", scan_consistency)
    #     return query_params

    # def _is_index_in_list(self, bucket, index_name, server = None, index_state = ["pending", "building", "deferred"]):
    #     query = "SELECT * FROM system:indexes where name = \'{0}\'".format(index_name)
    #     if server == None:
    #         server = self.master
    #     res = self.run_cbq_query(query = query, server = server)
    #     for item in res['results']:
    #         if 'keyspace_id' not in item['indexes']:
    #             return False
    #         if item['indexes']['keyspace_id'] == str(bucket) and item['indexes']['name'] == index_name and item['indexes']['state'] not in index_state:
    #             return True
    #     return False
    #
    # def _is_index_in_list_bulk(self, bucket, index_names = [], server = None, index_state = ["pending","building"]):
    #     query = "SELECT * FROM system:indexes"
    #     if server == None:
    #         server = self.master
    #     res = self.run_cbq_query(query = query, server = server)
    #     index_count=0
    #     found_index_list = []
    #     for item in res['results']:
    #         if 'keyspace_id' not in item['indexes']:
    #             return False
    #         for index_name in index_names:
    #             if item['indexes']['keyspace_id'] == str(bucket) and item['indexes']['name'] == index_name and item['indexes']['state'] not in index_state:
    #                 found_index_list.append(index_name)
    #     if len(found_index_list) == len(index_names):
    #         return True, []
    #     return False, list(set(index_names) - set(found_index_list))
    #
    # def gen_index_map(self, server = None):
    #     query = "SELECT * FROM system:indexes"
    #     if server == None:
    #         server = self.master
    #     res = self.run_cbq_query(query = query, server = server)
    #     index_map = {}
    #     for item in res['results']:
    #         bucket_name = item['indexes']['keyspace_id'].encode('ascii','ignore')
    #         if bucket_name not in index_map.keys():
    #             index_map[bucket_name] = {}
    #         index_name = str(item['indexes']['name'])
    #         index_map[bucket_name][index_name] = {}
    #         index_map[bucket_name][index_name]['state'] = item['indexes']['state']
    #     return index_map
    #
    # def get_index_count_using_primary_index(self, buckets, server = None):
    #     query = "SELECT COUNT(*) FROM {0}"
    #     map= {}
    #     if server == None:
    #         server = self.master
    #     for bucket in buckets:
    #         res = self.run_cbq_query(query = query.format(bucket.name), server = server)
    #         map[bucket.name] = int(res["results"][0]["$1"])
    #     return map
    #
    # def get_index_count_using_index(self, bucket, index_name,server=None):
    #     query = 'SELECT COUNT(*) FROM {0} USE INDEX ({1})'.format(bucket.name, index_name)
    #     if not server:
    #         server = self.master
    #     res = self.run_cbq_query(query=query, server=server)
    #     return int(res['results'][0]['$1'])

    def _gen_dict(self, result):
        result_set = []
        if result != None and len(result) > 0:
            for val in result:
                for key in list(val.keys()):
                    result_set.append(val[key])
        return result_set

    def _gen_dict_n1ql_func_result(self, result):
        result_set = [val[key] for val in result for key in list(val.keys())]
        new_result_set = []
        if len(result_set) > 0:
            for value in result_set:
                if isinstance(value, float):
                    new_result_set.append(round(value, 0))
                else:
                    new_result_set.append(value)
        else:
            new_result_set = result_set
        return new_result_set

    def _check_sample(self, result, expected_in_key=None):
        if expected_in_key == "FUN":
            return False
        if expected_in_key == None or len(expected_in_key) == 0:
            return False
        if result != None and len(result) > 0:
            sample = result[0]
            for key in list(sample.keys()):
                for sample in expected_in_key:
                    if key in sample:
                        return True
        return False

    def old_gen_dict(self, result):
        result_set = []
        map = {}
        duplicate_keys = []
        try:
            if result != None and len(result) > 0:
                for val in result:
                    for key in list(val.keys()):
                        result_set.append(val[key])
            for val in result_set:
                if val["_id"] in list(map.keys()):
                    duplicate_keys.append(val["_id"])
                map[val["_id"]] = val
            keys = list(map.keys())
            keys.sort()
        except Exception as ex:
            self.log.info(ex)
            raise
        if len(duplicate_keys) > 0:
            raise Exception(" duplicate_keys {0}".format(duplicate_keys))
        return map
예제 #37
0
class QueryWhitelistTests(QueryTests):
    def setUp(self):
        super(QueryWhitelistTests, self).setUp()
        self.shell = RemoteMachineShellConnection(self.master)
        self.info = self.shell.extract_remote_info()
        if self.info.type.lower() == 'windows':
            self.curl_path = "%scurl" % self.path
            self.file_path = "Filec:\\ProgramFiles\\Couchbase\\Server\\bin\\..\\var\\lib\\couchbase\\n1qlcerts\\curl_whitelist"
            self.lowercase_file_path = "filec:\\ProgramFiles\\Couchbase\\Server\\bin\\..\\var\\lib\\couchbase\\n1qlcerts\\curl_whitelist"
        else:
            self.curl_path = "curl"
            self.file_path = "File/opt/couchbase/bin/../var/lib/couchbase/n1qlcerts/curl_whitelist"
            self.lowercase_file_path = "file/opt/couchbase/bin/../var/lib/couchbase/n1qlcerts/curl_whitelist"
        self.rest = RestConnection(self.master)
        self.cbqpath = '%scbq' % self.path + " -e %s:%s -q -u %s -p %s"\
                                             % (self.master.ip, self.n1ql_port, self.rest.username, self.rest.password)
        self.query_service_url = "'http://%s:%s/query/service'" % (
            self.master.ip, self.n1ql_port)
        self.api_port = self.input.param("api_port", 8094)
        self.load_sample = self.input.param("load_sample", False)
        self.create_users = self.input.param("create_users", False)
        self.full_access = self.input.param("full_access", True)
        self.run_cbq_query('delete from system:prepareds')

    def suite_setUp(self):
        super(QueryWhitelistTests, self).suite_setUp()
        # Create the users necessary for the RBAC tests in curl
        if self.create_users:
            testuser = [{
                'id': 'no_curl',
                'name': 'no_curl',
                'password': '******'
            }, {
                'id': 'curl',
                'name': 'curl',
                'password': '******'
            }, {
                'id': 'curl_no_insert',
                'name': 'curl_no_insert',
                'password': '******'
            }]
            RbacBase().create_user_source(testuser, 'builtin', self.master)

            noncurl_permissions = 'bucket_full_access[*]:query_select[*]:query_update[*]:' \
                                  'query_insert[*]:query_delete[*]:query_manage_index[*]:' \
                                  'query_system_catalog'
            curl_permissions = 'bucket_full_access[*]:query_select[*]:query_update[*]:' \
                               'query_insert[*]:query_delete[*]:query_manage_index[*]:' \
                               'query_system_catalog:query_external_access'
            # Assign user to role
            role_list = [{
                'id': 'no_curl',
                'name': 'no_curl',
                'roles': '%s' % noncurl_permissions
            }, {
                'id': 'curl',
                'name': 'curl',
                'roles': '%s' % curl_permissions
            }]
            temp = RbacBase().add_user_role(role_list, self.rest, 'builtin')

    def tearDown(self):
        super(QueryWhitelistTests, self).tearDown()

    def suite_tearDown(self):
        super(QueryWhitelistTests, self).suite_tearDown()

    '''Test running a curl command without a whitelist present'''

    def test_no_whitelist(self):
        # The query that curl will send to couchbase
        n1ql_query = 'select * from default limit 5'
        error_msg = "Errorevaluatingprojection.-cause:%s.jsondoesnotexistonnode" % (
            self.file_path)
        # This is the query that the cbq-engine will execute
        query = "select curl(" + self.query_service_url + \
                ", {'data' : 'statement=%s','user':'******'})" % (
                n1ql_query, self.username, self.password)
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        json_curl = self.convert_to_json(curl)
        self.assertTrue(
            error_msg in json_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (json_curl['errors'][0]['msg'], error_msg))

    '''Test running a curl command with an empty whitelist'''

    def test_empty_whitelist(self):
        self.shell.create_whitelist(self.n1ql_certs_path, {})
        n1ql_query = 'select * from default limit 5'
        error_msg = "Errorevaluatingprojection.-cause:%s.jsoncontainsemptyJSONobjectonnode" % (
            self.file_path)

        # This is the query that the cbq-engine will execute
        query = "select curl(" + self.query_service_url + \
                ", {'data' : 'statement=%s','user':'******'})" % (
                n1ql_query, self.username, self.password)
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        json_curl = self.convert_to_json(curl)
        self.assertTrue(
            error_msg in json_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (json_curl['errors'][0]['msg'], error_msg))

        error_msg = "Errorevaluatingprojection.-cause:all_accessshouldbebooleanvaluein%s.jsononnode"\
                    % (self.lowercase_file_path)

        self.shell.create_whitelist(self.n1ql_certs_path, {
            "all_access": None,
            "allowed_urls": None,
            "disallowed_urls": None
        })
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        json_curl = self.convert_to_json(curl)
        self.assertTrue(
            error_msg in json_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (json_curl['errors'][0]['msg'], error_msg))

    '''Test running a curl command with whitelists that are invalid'''

    def test_invalid_whitelist(self):
        self.shell.create_whitelist(self.n1ql_certs_path, "thisisnotvalid")
        n1ql_query = 'select * from default limit 5'
        error_msg= "Errorevaluatingprojection.-cause:%s.jsoncontainsinvalidJSONonnode" % \
                   (self.file_path)
        # This is the query that the cbq-engine will execute
        query = "select curl(" + self.query_service_url + \
                ", {'data' : 'statement=%s','user':'******'})" % (
                n1ql_query, self.username, self.password)
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        json_curl = self.convert_to_json(curl)
        self.assertTrue(
            error_msg in json_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (json_curl['errors'][0]['msg'], error_msg))

        error_msg = "Errorevaluatingprojection.-cause:all_accessshouldbebooleanvaluein%s.jsononnode" \
                    % (self.lowercase_file_path)

        self.shell.create_whitelist(
            self.n1ql_certs_path, {
                "all_access": "hello",
                "allowed_urls": ["goodbye"],
                "disallowed_urls": ["invalid"]
            })
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        json_curl = self.convert_to_json(curl)
        self.assertTrue(
            error_msg in json_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (json_curl['errors'][0]['msg'], error_msg))

    '''Test running a curl command with a whitelist that contains the field all_access: True and also
       inavlid/fake fields'''

    def test_basic_all_access_true(self):
        n1ql_query = 'select * from default limit 5'
        self.shell.create_whitelist(self.n1ql_certs_path, {"all_access": True})
        query = "select curl(" + self.query_service_url + \
                ", {'data' : 'statement=%s','user':'******'})" % (
                n1ql_query, self.username, self.password)
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        json_curl = self.convert_to_json(curl)
        expected_result = self.run_cbq_query('select * from default limit 5')
        self.assertEqual(json_curl['results'][0]['$1']['results'],
                         expected_result['results'])

        curl_output = self.shell.execute_command(
            "%s https://jira.atlassian.com/rest/api/latest/issue/JRA-9" %
            self.curl_path)
        expected_curl = self.convert_list_to_json(curl_output[0])
        url = "'https://jira.atlassian.com/rest/api/latest/issue/JRA-9'"
        query = "select curl(" + url + ")"
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertEqual(actual_curl['results'][0]['$1'], expected_curl)

        self.shell.create_whitelist(self.n1ql_certs_path, {
            "all_access": True,
            "fake_field": "blahahahahaha",
            "fake_url": "fake"
        })

        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertEqual(actual_curl['results'][0]['$1'], expected_curl)

        self.shell.create_whitelist(self.n1ql_certs_path, {
            "fake_field": "blahahahahaha",
            "all_access": True,
            "fake_url": "fake"
        })

        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertEqual(actual_curl['results'][0]['$1'], expected_curl)

    '''Test all_access: True with nonsense in the allowed/disallowed fields as well as nothing
       in the allowed/disallowed fields'''

    def test_all_access_true(self):
        self.shell.create_whitelist(
            self.n1ql_certs_path, {
                "all_access": True,
                "allowed_urls": ["blahahahahaha"],
                "disallowed_urls": ["fake"]
            })
        curl_output = self.shell.execute_command(
            "%s https://jira.atlassian.com/rest/api/latest/issue/JRA-9" %
            self.curl_path)
        expected_curl = self.convert_list_to_json(curl_output[0])
        url = "'https://jira.atlassian.com/rest/api/latest/issue/JRA-9'"
        query = "select curl(" + url + ")"
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertEqual(actual_curl['results'][0]['$1'], expected_curl)

        self.shell.create_whitelist(self.n1ql_certs_path, {
            "all_access": True,
            "allowed_urls": None,
            "disallowed_urls": None
        })
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertEqual(actual_curl['results'][0]['$1'], expected_curl)

    '''Test what happens if you give an disallowed_url field as well as an all_access field, all_access
       should get precedence over disallowed_urls field'''

    def test_all_access_true_disallowed_url(self):
        self.shell.create_whitelist(
            self.n1ql_certs_path, {
                "all_access": True,
                "disallowed_urls": ["https://maps.googleapis.com"]
            })
        curl_output = self.shell.execute_command(
            "%s --get https://maps.googleapis.com/maps/api/geocode/json "
            "-d 'address=santa+cruz&components=country:ES&key=AIzaSyCT6niGCMsgegJkQSYSqpoLZ4_rSO59XQQ'"
            % self.curl_path)
        expected_curl = self.convert_list_to_json(curl_output[0])
        url = "'https://maps.googleapis.com/maps/api/geocode/json'"
        options = "{'get':True,'data': 'address=santa+cruz&components=country:ES&key=AIzaSyCT6niGCMsgegJkQSYSqpoLZ4_rSO59XQQ'}"
        query = "select curl(" + url + ", %s" % options + ")"
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertEqual(actual_curl['results'][0]['$1'], expected_curl)

    '''Test what happens if you give an allowed_url field as well as an all_access field, all_access
       should get precedence over allowed_urls field'''

    def test_all_access_true_allowed_url(self):
        self.shell.create_whitelist(
            self.n1ql_certs_path, {
                "all_access": True,
                "allowed_urls": ["https://maps.googleapis.com"]
            })
        curl_output = self.shell.execute_command(
            "%s https://jira.atlassian.com/rest/api/latest/issue/JRA-9" %
            self.curl_path)
        expected_curl = self.convert_list_to_json(curl_output[0])
        url = "'https://jira.atlassian.com/rest/api/latest/issue/JRA-9'"
        query = "select curl(" + url + ")"
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertEqual(actual_curl['results'][0]['$1'], expected_curl)

    '''Test what happens when you set the all_access field multiple times, or try and give it multiple
       values'''

    def test_multiple_all_access(self):
        self.shell.create_whitelist(self.n1ql_certs_path, {
            "all_access": True,
            "all_access": False
        })
        error_msg = "Errorevaluatingprojection.-cause:URLendpointisn'twhitelisted" \
                    "https://jira.atlassian.com/rest/api/latest/issue/JRA-9onnode"

        curl_output = self.shell.execute_command(
            "%s https://jira.atlassian.com/rest/api/latest/issue/JRA-9" %
            self.curl_path)
        expected_curl = self.convert_list_to_json(curl_output[0])
        url = "'https://jira.atlassian.com/rest/api/latest/issue/JRA-9'"
        query = "select curl(" + url + ")"
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertTrue(
            error_msg in actual_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (actual_curl['errors'][0]['msg'], error_msg))

        self.shell.create_whitelist(self.n1ql_certs_path, {
            "all_access": False,
            "all_access": True
        })
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertEqual(actual_curl['results'][0]['$1'], expected_curl)

        error_msg = "Errorevaluatingprojection.-cause:all_accessshouldbebooleanvaluein%s.jsononnode" \
                    % (self.lowercase_file_path)

        self.shell.create_whitelist(self.n1ql_certs_path,
                                    {"all_access": [True, False]})
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertTrue(
            error_msg in actual_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (actual_curl['errors'][0]['msg'], error_msg))

    '''Test to make sure that whitelist enforces that allowed_urls field must be given as a list'''

    def test_invalid_allowed_url(self):
        self.shell.create_whitelist(self.n1ql_certs_path, {
            "all_access": False,
            "allowed_urls": "blahblahblah"
        })
        error_msg = "Errorevaluatingprojection.-cause:allowed_urlsshouldbelistofurlsin%s.jsononnode" \
                    % (self.lowercase_file_path)

        n1ql_query = 'select * from default limit 5'
        # This is the query that the cbq-engine will execute
        query = "select curl(" + self.query_service_url + \
                ", {'data' : 'statement=%s','user':'******'})" % (
                n1ql_query, self.username, self.password)
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        json_curl = self.convert_to_json(curl)
        self.assertTrue(
            error_msg in json_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (json_curl['errors'][0]['msg'], error_msg))

    '''Test the allowed_urls field, try to run curl against an endpoint not in allowed_urls and then
       try to run curl against an endpoint in allowed_urls'''

    def test_allowed_url(self):
        self.shell.create_whitelist(
            self.n1ql_certs_path, {
                "all_access": False,
                "allowed_urls": ["https://maps.googleapis.com"]
            })
        error_msg = "Errorevaluatingprojection.-cause:URLendpointisn'twhitelisted" \
                    "https://jira.atlassian.com/rest/api/latest/issue/JRA-9onnode"

        url = "'https://jira.atlassian.com/rest/api/latest/issue/JRA-9'"
        query = "select curl(" + url + ")"
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertTrue(
            error_msg in actual_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (actual_curl['errors'][0]['msg'], error_msg))

        curl_output = self.shell.execute_command(
            "%s --get https://maps.googleapis.com/maps/api/geocode/json "
            "-d 'address=santa+cruz&components=country:ES&key=AIzaSyCT6niGCMsgegJkQSYSqpoLZ4_rSO59XQQ'"
            % self.curl_path)
        expected_curl = self.convert_list_to_json(curl_output[0])
        url = "'https://maps.googleapis.com/maps/api/geocode/json'"
        options = "{'get':True,'data': 'address=santa+cruz&components=country:ES&key=AIzaSyCT6niGCMsgegJkQSYSqpoLZ4_rSO59XQQ'}"
        query = "select curl(" + url + ", %s" % options + ")"
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertEqual(actual_curl['results'][0]['$1'], expected_curl)

    '''Test the allowed_urls field, try to run curl against an endpoint not in disallowed_urls and then
       try to run curl against an endpoint in disallowed_urls, both should fail'''

    def test_disallowed_url(self):
        self.shell.create_whitelist(
            self.n1ql_certs_path, {
                "all_access": False,
                "disallowed_urls": ["https://maps.googleapis.com"]
            })
        error_msg = "Errorevaluatingprojection.-cause:URLendpointisn'twhitelisted" \
                    "https://jira.atlassian.com/rest/api/latest/issue/JRA-9onnode"

        url = "'https://jira.atlassian.com/rest/api/latest/issue/JRA-9'"
        query = "select curl(" + url + ")"
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertTrue(
            error_msg in actual_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (actual_curl['errors'][0]['msg'], error_msg))

        error_msg = "Errorevaluatingprojection.-cause:URLendpointisn'twhitelisted" \
                    "https://maps.googleapis.com/maps/api/geocode/jsononnode"

        url = "'https://maps.googleapis.com/maps/api/geocode/json'"
        options = "{'get':True,'data': 'address=santa+cruz&components=country:ES&key=AIzaSyCT6niGCMsgegJkQSYSqpoLZ4_rSO59XQQ'}"
        query = "select curl(" + url + ", %s" % options + ")"
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertTrue(
            error_msg in actual_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (actual_curl['errors'][0]['msg'], error_msg))

    '''Test that disallowed_urls field has precedence over allowed_urls'''

    def test_disallowed_precedence(self):
        self.shell.create_whitelist(
            self.n1ql_certs_path, {
                "all_access":
                False,
                "allowed_urls":
                ["https://maps.googleapis.com/maps/api/geocode/json"],
                "disallowed_urls": ["https://maps.googleapis.com"]
            })
        error_msg = "Errorevaluatingprojection.-cause:URLendpointisn'twhitelisted" \
                    "https://maps.googleapis.com/maps/api/geocode/jsononnode"

        url = "'https://maps.googleapis.com/maps/api/geocode/json'"
        options = "{'get':True,'data': 'address=santa+cruz&components=country:ES&key=AIzaSyCT6niGCMsgegJkQSYSqpoLZ4_rSO59XQQ'}"
        query = "select curl(" + url + ", %s" % options + ")"
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertTrue(
            error_msg in actual_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (actual_curl['errors'][0]['msg'], error_msg))

        self.shell.create_whitelist(
            self.n1ql_certs_path, {
                "all_access":
                False,
                "allowed_urls":
                ["https://maps.googleapis.com/maps/api/geocode/json"],
                "disallowed_urls":
                ["https://maps.googleapis.com/maps/api/geocode/json"]
            })
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertTrue(
            error_msg in actual_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (actual_curl['errors'][0]['msg'], error_msg))

    '''Test valid allowed with an invalid disallowed'''

    def test_allowed_invalid_disallowed(self):
        self.shell.create_whitelist(
            self.n1ql_certs_path, {
                "all_access": False,
                "allowed_urls": ["https://maps.googleapis.com"],
                "disallowed_urls": ["blahblahblah"]
            })
        error_msg = "Errorevaluatingprojection.-cause:URLendpointisn'twhitelisted" \
                    "https://jira.atlassian.com/rest/api/latest/issue/JRA-9onnode"

        curl_output = self.shell.execute_command(
            "%s https://jira.atlassian.com/rest/api/latest/issue/JRA-9" %
            self.curl_path)
        expected_curl = self.convert_list_to_json(curl_output[0])
        url = "'https://jira.atlassian.com/rest/api/latest/issue/JRA-9'"
        query = "select curl(" + url + ")"
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertTrue(
            error_msg in actual_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (actual_curl['errors'][0]['msg'], error_msg))

        curl_output = self.shell.execute_command(
            "%s --get https://maps.googleapis.com/maps/api/geocode/json "
            "-d 'address=santa+cruz&components=country:ES&key=AIzaSyCT6niGCMsgegJkQSYSqpoLZ4_rSO59XQQ'"
            % self.curl_path)
        expected_curl = self.convert_list_to_json(curl_output[0])
        url = "'https://maps.googleapis.com/maps/api/geocode/json'"
        options = "{'get':True,'data': 'address=santa+cruz&components=country:ES&key=AIzaSyCT6niGCMsgegJkQSYSqpoLZ4_rSO59XQQ'}"
        query = "select curl(" + url + ", %s" % options + ")"
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertEqual(actual_curl['results'][0]['$1'], expected_curl)

        self.shell.create_whitelist(
            self.n1ql_certs_path, {
                "all_access": False,
                "allowed_urls": ["https://maps.googleapis.com"],
                "disallowed_urls": "blahblahblah"
            })
        error_msg = "Errorevaluatingprojection.-cause:disallowed_urlsshouldbelistofurlsin%s.jsononnode" \
                    % (self.lowercase_file_path)

        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertTrue(
            error_msg in actual_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (actual_curl['errors'][0]['msg'], error_msg))

    '''Test a valid disallowed with an invalid allowed'''

    def test_disallowed_invalid_allowed(self):
        self.shell.create_whitelist(
            self.n1ql_certs_path, {
                "all_access": False,
                "allowed_urls": ["blahblahblah"],
                "disallowed_urls": ["https://maps.googleapis.com"]
            })

        error_msg = "Errorevaluatingprojection.-cause:URLendpointisn'twhitelisted" \
                    "https://maps.googleapis.com/maps/api/geocode/jsononnode"

        url = "'https://maps.googleapis.com/maps/api/geocode/json'"
        options = "{'get':True,'data': 'address=santa+cruz&components=country:ES&key=AIzaSyCT6niGCMsgegJkQSYSqpoLZ4_rSO59XQQ'}"
        query = "select curl(" + url + ", %s" % options + ")"
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertTrue(
            error_msg in actual_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (actual_curl['errors'][0]['msg'], error_msg))

        self.shell.create_whitelist(
            self.n1ql_certs_path, {
                "all_access": False,
                "allowed_urls": "blahblahblah",
                "disallowed_urls": ["https://maps.googleapis.com"]
            })
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertTrue(
            error_msg in actual_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (actual_curl['errors'][0]['msg'], error_msg))

    '''Should not be able to curl localhost even if you are on the localhost unless whitelisted'''

    def test_localhost(self):
        self.shell.create_whitelist(self.n1ql_certs_path,
                                    {"all_access": False})
        error_msg ="Errorevaluatingprojection.-cause:URLendpointisn'twhitelisted" \
                   "http://localhost:8093/query/serviceonnode"

        n1ql_query = 'select * from default limit 5'
        query = "select curl('http://localhost:8093/query/service', {'data' : 'statement=%s'," \
                "'user':'******'})" % (n1ql_query, self.username, self.password)
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        json_curl = self.convert_to_json(curl)
        self.assertTrue(
            error_msg in json_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (json_curl['errors'][0]['msg'], error_msg))
예제 #38
0
    def test_backup_upgrade_restore_default(self):
        if len(self.servers) < 2:
            self.log.error("At least 2 servers required for this test ..")
            return
        original_set = copy.copy(self.servers)
        worker = self.servers[len(self.servers) - 1]
        self.servers = self.servers[:len(self.servers) - 1]
        shell = RemoteMachineShellConnection(self.master)
        o, r = shell.execute_command("cat /opt/couchbase/VERSION.txt")
        fin = o[0]
        shell.disconnect()
        initial_version = self.input.param("initial_version", fin)
        final_version = self.input.param("final_version", fin)
        if initial_version == final_version:
            self.log.error("Same initial and final versions ..")
            return
        if not final_version.startswith('2.0'):
            self.log.error("Upgrade test not set to run from 1.8.1 -> 2.0 ..")
            return
        builds, changes = BuildQuery().get_all_builds(version=final_version)
        product = 'couchbase-server-enterprise'
        #CASE where the worker isn't a 2.0+
        worker_flag = 0
        shell = RemoteMachineShellConnection(worker)
        o, r = shell.execute_command("cat /opt/couchbase/VERSION.txt")
        temp = o[0]
        if not temp.startswith('2.0'):
            worker_flag = 1
        if worker_flag == 1:
            self.log.info("Loading version {0} on worker.. ".format(final_version))
            remote = RemoteMachineShellConnection(worker)
            info = remote.extract_remote_info()
            older_build = BuildQuery().find_build(builds, product, info.deliverable_type,
                                                  info.architecture_type, final_version)
            remote.stop_couchbase()
            remote.couchbase_uninstall()
            remote.download_build(older_build)
            remote.install_server(older_build)
            remote.disconnect()

        remote_tmp = "{1}/{0}".format("backup", "/root")
        perm_comm = "mkdir -p {0}".format(remote_tmp)
        if not initial_version == fin:
            for server in self.servers:
                remote = RemoteMachineShellConnection(server)
                info = remote.extract_remote_info()
                self.log.info("Loading version ..  {0}".format(initial_version))
                older_build = BuildQuery().find_build(builds, product, info.deliverable_type,
                                                      info.architecture_type, initial_version)
                remote.stop_couchbase()
                remote.couchbase_uninstall()
                remote.download_build(older_build)
                remote.install_server(older_build)
                rest = RestConnection(server)
                RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
                rest.init_cluster(server.rest_username, server.rest_password)
                rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                remote.disconnect()

        self.common_setUp()
        bucket = "default"
        if len(self.servers) > 1:
            self.add_nodes_and_rebalance()
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        rest.create_bucket(bucket, ramQuotaMB=size)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached_failed")
        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
                                                                                             name=bucket,
                                                                                             ram_load_ratio=0.5,
                                                                                             value_size_distribution=distribution,
                                                                                             moxi=True,
                                                                                             write_only=True,
                                                                                             delete_ratio=0.1,
                                                                                             number_of_threads=2)
        if len(self.servers) > 1:
            rest = RestConnection(self.master)
            self.assertTrue(RebalanceHelper.wait_for_replication(rest.get_nodes(), timeout=180),
                            msg="replication did not complete")

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        node = RestConnection(self.master).get_nodes_self()
        shell = RemoteMachineShellConnection(worker)
        o, r = shell.execute_command(perm_comm)
        shell.log_command_output(o, r)
        shell.disconnect()

        #Backup
        #BackupHelper(self.master, self).backup(bucket, node, remote_tmp)
        shell = RemoteMachineShellConnection(worker)
        shell.execute_command("/opt/couchbase/bin/cbbackup http://{0}:{1} {2}".format(
                                                            self.master.ip, self.master.port, remote_tmp))
        shell.disconnect()
        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
        time.sleep(30)

        #Upgrade
        for server in self.servers:
            self.log.info("Upgrading to current version {0}".format(final_version))
            remote = RemoteMachineShellConnection(server)
            info = remote.extract_remote_info()
            new_build = BuildQuery().find_build(builds, product, info.deliverable_type,
                                                info.architecture_type, final_version)
            remote.stop_couchbase()
            remote.couchbase_uninstall()
            remote.download_build(new_build)
            remote.install_server(new_build)
            rest = RestConnection(server)
            RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
            rest.init_cluster(server.rest_username, server.rest_password)
            rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            remote.disconnect()
        time.sleep(30)

        #Restore
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        rest.create_bucket(bucket, ramQuotaMB=size)
        ready = BucketOperationHelper.wait_for_memcached(server, bucket)
        self.assertTrue(ready, "wait_for_memcached_failed")
        #BackupHelper(self.master, self).restore(backup_location=remote_tmp, moxi_port=info.moxi)
        shell = RemoteMachineShellConnection(worker)
        shell.execute_command("/opt/couchbase/bin/cbrestore {2} http://{0}:{1} -b {3}".format(
                                                            self.master.ip, self.master.port, remote_tmp, bucket))
        shell.disconnect()
        time.sleep(60)
        keys_exist = BucketOperationHelper.keys_exist_or_assert_in_parallel(inserted_keys, self.master, bucket, self, concurrency=4)
        self.assertTrue(keys_exist, msg="unable to verify keys after restore")
        time.sleep(30)
        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
        rest = RestConnection(self.master)
        helper = RestHelper(rest)
        nodes = rest.node_statuses()
        master_id = rest.get_nodes_self().id
        if len(self.servers) > 1:
                removed = helper.remove_nodes(knownNodes=[node.id for node in nodes],
                                          ejectedNodes=[node.id for node in nodes if node.id != master_id],
                                          wait_for_rebalance=True)

        shell = RemoteMachineShellConnection(worker)
        shell.remove_directory(remote_tmp)
        shell.disconnect()

        self.servers = copy.copy(original_set)
        if initial_version == fin:
            builds, changes = BuildQuery().get_all_builds(version=initial_version)
            for server in self.servers:
                remote = RemoteMachineShellConnection(server)
                info = remote.extract_remote_info()
                self.log.info("Loading version ..  {0}".format(initial_version))
                older_build = BuildQuery().find_build(builds, product, info.deliverable_type,
                                                      info.architecture_type, initial_version)
                remote.stop_couchbase()
                remote.couchbase_uninstall()
                remote.download_build(older_build)
                remote.install_server(older_build)
                rest = RestConnection(server)
                RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
                rest.init_cluster(server.rest_username, server.rest_password)
                rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                remote.disconnect()
예제 #39
0
    def _install_and_upgrade(self,
                             initial_version='1.6.5.3',
                             initialize_cluster=False,
                             create_buckets=False,
                             insert_data=False):
        input = TestInputSingleton.input
        rest_settings = input.membase_settings
        servers = input.servers
        server = servers[0]
        save_upgrade_config = False
        if initial_version.startswith(
                "1.7") and input.test_params['version'].startswith("1.8"):
            save_upgrade_config = True
        is_amazon = False
        if input.test_params.get('amazon', False):
            is_amazon = True
        if initial_version.startswith("1.6") or initial_version.startswith(
                "1.7"):
            product = 'membase-server-enterprise'
        else:
            product = 'couchbase-server-enterprise'
        remote = RemoteMachineShellConnection(server)
        info = remote.extract_remote_info()
        remote.membase_uninstall()
        remote.couchbase_uninstall()
        builds, changes = BuildQuery().get_all_builds()
        # check to see if we are installing from latestbuilds or releases
        # note: for newer releases (1.8.0) even release versions can have the
        #  form 1.8.0r-55
        if re.search('r', initial_version):
            builds, changes = BuildQuery().get_all_builds()
            older_build = BuildQuery().find_membase_build(
                builds,
                deliverable_type=info.deliverable_type,
                os_architecture=info.architecture_type,
                build_version=initial_version,
                product=product,
                is_amazon=is_amazon)
        else:
            older_build = BuildQuery().find_membase_release_build(
                deliverable_type=info.deliverable_type,
                os_architecture=info.architecture_type,
                build_version=initial_version,
                product=product,
                is_amazon=is_amazon)
        remote.stop_membase()
        remote.stop_couchbase()
        remote.download_build(older_build)
        #now let's install ?
        remote.membase_install(older_build)
        rest = RestConnection(server)
        RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
        rest.init_cluster_port(rest_settings.rest_username,
                               rest_settings.rest_password)
        bucket_data = {}
        if initialize_cluster:
            rest.init_cluster_memoryQuota(
                memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            if create_buckets:
                _create_load_multiple_bucket(self,
                                             server,
                                             bucket_data,
                                             howmany=2)
        version = input.test_params['version']

        appropriate_build = _get_build(servers[0],
                                       version,
                                       is_amazon=is_amazon)
        self.assertTrue(appropriate_build.url,
                        msg="unable to find build {0}".format(version))

        remote.download_build(appropriate_build)
        remote.membase_upgrade(appropriate_build,
                               save_upgrade_config=save_upgrade_config)
        remote.disconnect()
        RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)

        pools_info = rest.get_pools_info()

        rest.init_cluster_port(rest_settings.rest_username,
                               rest_settings.rest_password)
        time.sleep(TIMEOUT_SECS)
        #verify admin_creds still set

        self.assertTrue(pools_info['implementationVersion'],
                        appropriate_build.product_version)
        if initialize_cluster:
            #TODO: how can i verify that the cluster init config is preserved
            if create_buckets:
                self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                    'bucket-0', rest),
                                msg="bucket 'default' does not exist..")
            if insert_data:
                buckets = rest.get_buckets()
                for bucket in buckets:
                    BucketOperationHelper.keys_exist_or_assert(
                        bucket_data[bucket.name]["inserted_keys"], server,
                        bucket.name, self)
예제 #40
0
class XdcrCLITest(CliBaseTest):

    def setUp(self):
        TestInputSingleton.input.test_params["default_bucket"] = False
        super(XdcrCLITest, self).setUp()
        self.ldapUser = self.input.param("ldapUser", 'Administrator')
        self.ldapPass = self.input.param('ldapPass', 'password')
        self.source = self.input.param('source', 'ns_server')
        self.__user = self.input.param("ldapUser", 'Administrator')
        self.__password = self.input.param('ldapPass', 'password')
        self.shell = RemoteMachineShellConnection(self.master)
        info = self.shell.extract_remote_info()
        type = info.type.lower()
        self.role = self.input.param("role","admin")
        if self.role in ['bucket_admin','views_admin']:
            self.role = self.role + "[*]"
        self.log.info (" value of self.role is {0}".format(self.role))
        if type == 'windows' and self.source == 'saslauthd':
            raise Exception(" Ldap Tests cannot run on windows");
        else:
            if self.source == 'saslauthd':
                rest = RestConnection(self.master)
                self.setupLDAPSettings(rest)
                self.set_user_role(rest,self.ldapUser,user_role=self.role)


    def tearDown(self):
        for server in self.servers:
            rest = RestConnection(server)
            rest.remove_all_remote_clusters()
            rest.remove_all_replications()
            rest.remove_all_recoveries()
        super(XdcrCLITest, self).tearDown()

    #Wrapper around auditmain
    def checkConfig(self, eventID, host, expectedResults):
        Audit = audit(eventID=eventID, host=host)
        fieldVerification, valueVerification = Audit.validateEvents(expectedResults)
        self.assertTrue(fieldVerification, "One of the fields is not matching")
        self.assertTrue(valueVerification, "Values for one of the fields is not matching")

    def setupLDAPSettings (self,rest):
        api = rest.baseUrl + 'settings/saslauthdAuth'
        params = urllib.urlencode({"enabled":'true',"admins":[],"roAdmins":[]})
        status, content, header = rest._http_request(api, 'POST', params)
        return status, content, header

    def __execute_cli(self, cli_command, options, cluster_host="localhost", user=None, password=None):
        if user is None:
            user = self.__user
            password = self.__password
        return self.shell.execute_couchbase_cli(
                                                cli_command=cli_command,
                                                options=options,
                                                cluster_host=cluster_host,
                                                user=user,
                                                password=password)
    def set_user_role(self,rest,username,user_role='admin'):
        payload = "name=" + username + "&roles=" + user_role
        rest.set_user_roles(user_id=username,payload=payload)

    def _validate_roles(self,output,result):
        print output
        final_result = True
        for outputs in output:
            print outputs
            if result not in outputs:
                final_result = False
            else:
                final_result = True
        self.assertTrue(final_result,"Incorrect Message for the role")

    def __xdcr_setup_create(self):
        testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket',
                     'password': '******'}]
        rolelist = [{'id': 'cbadminbucket', 'name': 'cbadminbucket',
                     'roles': 'admin'}]
        RbacBase().create_user_source(testuser, 'builtin', self.master)
        RbacBase().add_user_role(rolelist, RestConnection(self.master), 'builtin')
        # xdcr_hostname=the number of server in ini file to add to master as replication
        xdcr_cluster_name = self.input.param("xdcr-cluster-name", None)
        xdcr_hostname = self.input.param("xdcr-hostname", None)
        xdcr_username = self.input.param("xdcr-username", None)
        xdcr_password = self.input.param("xdcr-password", None)
        demand_encyrption = self.input.param("demand-encryption", 0)
        xdcr_cert = self.input.param("xdcr-certificate", None)
        wrong_cert = self.input.param("wrong-certificate", None)

        cli_command = "xdcr-setup"
        options = "--create"
        options += (" --xdcr-cluster-name=\'{0}\'".format(xdcr_cluster_name), "")[xdcr_cluster_name is None]
        print ("Value of xdcr_home is {0}".format(xdcr_hostname))
        if xdcr_hostname is not None:
            RbacBase().create_user_source(testuser, 'builtin', self.servers[xdcr_hostname])
            RbacBase().add_user_role(rolelist, RestConnection(self.servers[xdcr_hostname]), 'builtin')
            options += " --xdcr-hostname={0}".format(self.servers[xdcr_hostname].ip)
        options += (" --xdcr-username={0}".format(xdcr_username), "")[xdcr_username is None]
        options += (" --xdcr-password={0}".format(xdcr_password), "")[xdcr_password is None]
        options += (" --xdcr-demand-encryption={0}".format(demand_encyrption))

        cluster_host = self.servers[xdcr_hostname].ip
        output, _ = self.__execute_cli(cli_command="ssl-manage", options="--retrieve-cert={0}".format(xdcr_cert), cluster_host=cluster_host, user='******', password='******')
        options += (" --xdcr-certificate={0}".format(xdcr_cert), "")[xdcr_cert is None]
        #self.assertNotEqual(output[0].find("SUCCESS"), -1, "ssl-manage CLI failed to retrieve certificate")

        output, error = self.__execute_cli(cli_command=cli_command, options=options)
        return output, error, xdcr_cluster_name, xdcr_hostname, cli_command, options

    def testXDCRSetup(self):
        if self.role in ['views_admin[*]','bucket_admin[*]']:
            result = "Forbidden"
        elif self.role in ['admin','cluster_admin','replication_admin']:
            result = 'SUCCESS'
        output, _, xdcr_cluster_name, xdcr_hostname, cli_command, options = self.__xdcr_setup_create()
        self._validate_roles(output,result)

        if xdcr_cluster_name:
            options = options.replace("--create ", "--edit ")
            output, _ = self.__execute_cli(cli_command=cli_command, options=options)
            self._validate_roles(output,result)

        if not xdcr_cluster_name:
            options = "--delete --xdcr-cluster-name=\'{0}\'".format("remote cluster")
        else:
            options = "--delete --xdcr-cluster-name=\'{0}\'".format(xdcr_cluster_name)
        output, _ = self.__execute_cli(cli_command=cli_command, options=options)
        self._validate_roles(output,result)



    def testXdcrReplication(self):
        if self.role in ['views_admin[*]','bucket_admin[*]']:
            result = "Forbidden"
        elif self.role in ['admin','cluster_admin','replication_admin']:
            result = 'SUCCESS'

        '''xdcr-replicate OPTIONS:
        --create                               create and start a new replication
        --delete                               stop and cancel a replication
        --list                                 list all xdcr replications
        --xdcr-from-bucket=BUCKET              local bucket name to replicate from
        --xdcr-cluster-name=CLUSTERNAME        remote cluster to replicate to
        --xdcr-to-bucket=BUCKETNAME            remote bucket to replicate to'''
        to_bucket = self.input.param("xdcr-to-bucket", None)
        from_bucket = self.input.param("xdcr-from-bucket", None)
        error_expected = self.input.param("error-expected", False)
        replication_mode = self.input.param("replication_mode", None)
        pause_resume = self.input.param("pause-resume", None)
        _, _, xdcr_cluster_name, xdcr_hostname, _, _ = self.__xdcr_setup_create()
        cli_command = "xdcr-replicate"
        options = "--create"
        options += (" --xdcr-cluster-name=\'{0}\'".format(xdcr_cluster_name), "")[xdcr_cluster_name is None]
        options += (" --xdcr-from-bucket=\'{0}\'".format(from_bucket), "")[from_bucket is None]
        options += (" --xdcr-to-bucket=\'{0}\'".format(to_bucket), "")[to_bucket is None]
        options += (" --xdcr-replication-mode=\'{0}\'".format(replication_mode), "")[replication_mode is None]
        self.bucket_size = self._get_bucket_size(self.quota, 1)
        if from_bucket:
            bucket_params = self._create_bucket_params(server=self.master, size=self.bucket_size,
                                                              replicas=self.num_replicas,
                                                              enable_replica_index=self.enable_replica_index)
            self.cluster.create_default_bucket(bucket_params)

        if to_bucket:
            bucket_params = self._create_bucket_params(server=self.servers[xdcr_hostname], size=self.bucket_size,
                                                              replicas=self.num_replicas,
                                                              enable_replica_index=self.enable_replica_index)
            self.cluster.create_default_bucket(bucket_params)

        output, _ = self.__execute_cli(cli_command, options)
        self._validate_roles(output,result)

        self.sleep(8)
        options = "--list"
        output, _ = self.__execute_cli(cli_command, options)
        for value in output:
            if value.startswith("stream id"):
                replicator = value.split(":")[1].strip()
                if pause_resume is not None:
                    # pause replication
                    options = "--pause"
                    options += (" --xdcr-replicator={0}".format(replicator))
                    output, _ = self.__execute_cli(cli_command, options)
                    self._validate_roles(output,result)

                    self.sleep(60)
                    # resume replication
                    options = "--resume"
                    options += (" --xdcr-replicator={0}".format(replicator))
                    output, _ = self.__execute_cli(cli_command, options)
                    self._validate_roles(output,result)

                options = "--delete"
                options += (" --xdcr-replicator={0}".format(replicator))
                output, _ = self.__execute_cli(cli_command, options)
                self._validate_roles(output,result)
예제 #41
0
class NodeHelper:
    def __init__(self, node):
        self.node = node
        self.ip = node.ip
        self.params = params
        self.build = None
        self.queue = None
        self.thread = None
        self.rest = None
        self.install_success = False
        self.connect_ok = False
        self.shell = None
        self.info = None
        self.enable_ipv6 = False
        self.check_node_reachable()
        self.nonroot = self.shell.nonroot
        self.actions_dict = install_constants.NON_ROOT_CMDS \
            if self.nonroot else install_constants.CMDS

    def check_node_reachable(self):
        start_time = time.time()
        # Try 3 times
        while time.time() < start_time + 60:
            try:
                self.shell = RemoteMachineShellConnection(self.node)
                self.info = self.shell.extract_remote_info()
                self.connect_ok = True
                if self.connect_ok:
                    break
            except Exception as e:
                log.warning("{0} unreachable, {1}, retrying..".format(
                    self.ip, e))
                time.sleep(20)

    def get_os(self):
        os = self.info.distribution_version.lower()
        to_be_replaced = ['\n', ' ', 'gnu/linux']
        for _ in to_be_replaced:
            if _ in os:
                os = os.replace(_, '')
        if self.info.deliverable_type == "dmg":
            major_version = os.split('.')
            os = major_version[0] + '.' + major_version[1]
        return os

    def uninstall_cb(self):
        need_nonroot_relogin = False
        if self.shell.nonroot:
            self.node.ssh_username = "******"
            self.shell = RemoteMachineShellConnection(self.node)
            need_nonroot_relogin = True
        if self.actions_dict[self.info.deliverable_type]["uninstall"]:
            cmd = self.actions_dict[self.info.deliverable_type]["uninstall"]
            if "msi" in cmd:
                '''WINDOWS UNINSTALL'''
                self.shell.terminate_processes(
                    self.info, [s for s in testconstants.WIN_PROCESSES_KILLED])
                self.shell.terminate_processes(
                    self.info,
                    [s + "-*" for s in testconstants.COUCHBASE_FROM_VERSION_3])
                installed_version, _ = self.shell.execute_command(
                    "cat " +
                    install_constants.DEFAULT_INSTALL_DIR["WINDOWS_SERVER"] +
                    "VERSION.txt")
                if len(installed_version) == 1:
                    installed_msi, _ = self.shell.execute_command(
                        "cd " +
                        install_constants.DOWNLOAD_DIR["WINDOWS_SERVER"] +
                        "; ls *" + installed_version[0] + "*.msi")
                    if len(installed_msi) == 1:
                        self.shell.execute_command(self.actions_dict[
                            self.info.deliverable_type]["uninstall"].replace(
                                "installed-msi", installed_msi[0]))
                for browser in install_constants.WIN_BROWSERS:
                    self.shell.execute_command("taskkill /F /IM " + browser +
                                               " /T")
            else:
                duration, event, timeout = install_constants.WAIT_TIMES[
                    self.info.deliverable_type]["uninstall"]
                start_time = time.time()
                while time.time() < start_time + timeout:
                    try:
                        o, e = self.shell.execute_command(
                            cmd, debug=self.params["debug_logs"])
                        if o == ['1']:
                            break
                        self.wait_for_completion(duration, event)
                    except Exception as e:
                        log.warning(
                            "Exception {0} occurred on {1}, retrying..".format(
                                e, self.ip))
                        self.wait_for_completion(duration, event)
            self.shell.terminate_processes(
                self.info, install_constants.PROCESSES_TO_TERMINATE)

        if need_nonroot_relogin:
            self.node.ssh_username = "******"
            self.shell = RemoteMachineShellConnection(self.node)

    def pre_install_cb(self):
        if self.actions_dict[self.info.deliverable_type]["pre_install"]:
            cmd = self.actions_dict[self.info.deliverable_type]["pre_install"]
            duration, event, timeout = install_constants.WAIT_TIMES[
                self.info.deliverable_type]["pre_install"]
            if cmd is not None and "HDIUTIL_DETACH_ATTACH" in cmd:
                start_time = time.time()
                while time.time() < start_time + timeout:
                    try:
                        ret = hdiutil_attach(self.shell, self.build.path)
                        if ret:
                            break
                        self.wait_for_completion(duration, event)
                    except Exception as e:
                        log.warning(
                            "Exception {0} occurred on {1}, retrying..".format(
                                e, self.ip))
                        self.wait_for_completion(duration, event)

    def set_vm_swappiness_and_thp(self):
        # set vm_swapiness to 0, and thp to never by default
        # Check if this key is defined for this distribution/os
        if "set_vm_swappiness_and_thp" in self.actions_dict[
                self.info.deliverable_type]:
            try:
                cmd = self.actions_dict[
                    self.info.deliverable_type]["set_vm_swappiness_and_thp"]
                o, e = self.shell.execute_command(
                    cmd, debug=self.params["debug_logs"])
            except Exception as e:
                log.warning(
                    "Could not set vm swappiness/THP.Exception {0} occurred on {1} "
                    .format(e, self.ip))

    def install_cb(self):
        self.pre_install_cb()
        self.set_vm_swappiness_and_thp()
        if self.actions_dict[self.info.deliverable_type]["install"]:
            if "suse" in self.get_os():
                cmd = self.actions_dict[
                    self.info.deliverable_type]["suse_install"]
            else:
                cmd = self.actions_dict[self.info.deliverable_type]["install"]
                cmd_d = self.actions_dict[
                    self.info.deliverable_type]["install"]
                cmd_debug = None
            cmd = cmd.replace("buildbinary", self.build.name)
            cmd = cmd.replace("buildpath", self.build.path)
            if self.get_os() in install_constants.DEBUG_INFO_SUPPORTED:
                cmd_debug = cmd_d.replace("buildpath", self.build.debug_path)
            cmd = cmd.replace("mountpoint",
                              "/tmp/couchbase-server-" + params["version"])
            duration, event, timeout = install_constants.WAIT_TIMES[
                self.info.deliverable_type]["install"]
            start_time = time.time()
            while time.time() < start_time + timeout:
                try:
                    o, e = self.shell.execute_command(
                        cmd, debug=self.params["debug_logs"])
                    if o == ['1']:
                        break
                    self.wait_for_completion(duration, event)
                except Exception as e:
                    log.warning(
                        "Exception {0} occurred on {1}, retrying..".format(
                            e, self.ip))
                    self.wait_for_completion(duration, event)
            if cmd_debug is not None:
                self.shell_debug = RemoteMachineShellConnection(self.node)
                start_time = time.time()
                while time.time() < start_time + timeout:
                    try:
                        ou, er = self.shell_debug.execute_command(
                            cmd_debug, debug=self.params["debug_logs"])
                        if ou == ['1']:
                            break
                        self.wait_for_completion(duration, event)
                    except Exception as e:
                        log.warning(
                            "Exception {0} occurred on {1}, retrying..".format(
                                e, self.ip))
                        self.wait_for_completion(duration, event)
        self.post_install_cb()

    def post_install_cb(self):
        duration, event, timeout = install_constants.WAIT_TIMES[
            self.info.deliverable_type]["post_install"]
        start_time = time.time()
        while time.time() < start_time + timeout:
            try:
                if self.actions_dict[
                        self.info.deliverable_type]["post_install"]:
                    cmd = self.actions_dict[
                        self.info.deliverable_type]["post_install"].replace(
                            "buildversion", self.build.version)
                    o, e = self.shell.execute_command(
                        cmd, debug=self.params["debug_logs"])
                    if o == ['1']:
                        break
                    else:
                        if self.actions_dict[self.info.deliverable_type][
                                "post_install_retry"]:
                            if self.info.deliverable_type == "msi":
                                check_if_downgrade, _ = self.shell.execute_command(
                                    "cd " + install_constants.
                                    DOWNLOAD_DIR["WINDOWS_SERVER"] +
                                    "; vi +\"set nobomb | set fenc=ascii | x\" install_status.txt; "
                                    "grep 'Adding WIX_DOWNGRADE_DETECTED property' install_status.txt"
                                )
                                print((check_if_downgrade * 10))
                            else:
                                self.shell.execute_command(
                                    self.actions_dict[
                                        self.info.deliverable_type]
                                    ["post_install_retry"],
                                    debug=self.params["debug_logs"])
                        self.wait_for_completion(duration, event)
            except Exception as e:
                log.warning("Exception {0} occurred on {1}, retrying..".format(
                    e, self.ip))
                self.wait_for_completion(duration, event)

    def set_cbft_env_options(self, name, value, retries=3):
        if self.get_os() in install_constants.LINUX_DISTROS:
            while retries > 0:
                if self.shell.file_exists("/opt/couchbase/bin/",
                                          "couchbase-server"):
                    ret, _ = self.shell.execute_command(
                        install_constants.CBFT_ENV_OPTIONS[name].format(value))
                    self.shell.stop_server()
                    self.shell.start_server()
                    time.sleep(10)
                    if ret == ['1']:
                        log.info("{0} set to {1} on {2}".format(
                            name, value, self.ip))
                        break
                else:
                    time.sleep(20)
                retries -= 1
            else:
                print_result_and_exit(
                    "Unable to set fts_query_limit on {0}".format(self.ip))

    def _get_cli_path(self):
        if self.get_os() in install_constants.LINUX_DISTROS:
            return install_constants.DEFAULT_CLI_PATH["LINUX_DISTROS"]
        elif self.get_os() in install_constants.MACOS_VERSIONS:
            return install_constants.DEFAULT_CLI_PATH["MACOS_VERSIONS"]
        elif self.get_os() in install_constants.WINDOWS_SERVER:
            return install_constants.DEFAULT_CLI_PATH["WINDOWS_SERVER"]

    def _set_ip_version(self):
        if params["enable_ipv6"]:
            self.enable_ipv6 = True
            if self.node.ip.startswith("["):
                hostname = self.node.ip[self.node.ip.find("[") +
                                        1:self.node.ip.find("]")]
            else:
                hostname = self.node.ip
            cmd = install_constants.NODE_INIT["ipv6"]\
                .format(self._get_cli_path(),
                        self.ip,
                        hostname,
                        self.node.rest_username,
                        self.node.rest_password)
        else:
            cmd = install_constants.NODE_INIT["ipv4"]\
                .format(self._get_cli_path(),
                        self.ip,
                        self.node.rest_username,
                        self.node.rest_password)

        self.shell.execute_command(cmd)

    def pre_init_cb(self):
        try:
            self._set_ip_version()

            if params["fts_query_limit"] > 0:
                self.set_cbft_env_options("fts_query_limit",
                                          params["fts_query_limit"])
        except Exception as e:
            log.warning("Exception {0} occurred during pre-init".format(e))

    def post_init_cb(self):
        # Optionally change node name and restart server
        if params.get('use_domain_names', False):
            RemoteUtilHelper.use_hostname_for_server_settings(self.node)

        # Optionally disable consistency check
        if params.get('disable_consistency', False):
            self.rest.set_couchdb_option(section='couchdb',
                                         option='consistency_check_ratio',
                                         value='0.0')

    def get_services(self):
        if not self.node.services:
            return ["kv"]
        elif self.node.services:
            return self.node.services.split(',')

    def allocate_memory_quotas(self):
        kv_quota = 0
        info = self.rest.get_nodes_self()

        start_time = time.time()
        while time.time() < start_time + 30 and kv_quota == 0:
            kv_quota = int(info.mcdMemoryReserved *
                           testconstants.CLUSTER_QUOTA_RATIO)
            time.sleep(1)

        self.services = self.get_services()
        if "index" in self.services:
            log.info("Setting INDEX memory quota as {0} MB on {1}".format(
                testconstants.INDEX_QUOTA, self.ip))
            self.rest.set_service_mem_quota(
                {CbServer.Settings.INDEX_MEM_QUOTA: testconstants.INDEX_QUOTA})
            kv_quota -= testconstants.INDEX_QUOTA
        if "fts" in self.services:
            log.info("Setting FTS memory quota as {0} MB on {1}".format(
                params["fts_quota"], self.ip))
            self.rest.set_service_mem_quota(
                {CbServer.Settings.FTS_MEM_QUOTA: params["fts_quota"]})
            kv_quota -= params["fts_quota"]
        if "cbas" in self.services:
            log.info("Setting CBAS memory quota as {0} MB on {1}".format(
                testconstants.CBAS_QUOTA, self.ip))
            self.rest.set_service_mem_quota(
                {CbServer.Settings.CBAS_MEM_QUOTA: testconstants.CBAS_QUOTA})
            kv_quota -= testconstants.CBAS_QUOTA
        if "kv" in self.services:
            if kv_quota < testconstants.MIN_KV_QUOTA:
                log.warning(
                    "KV memory quota is {0}MB but needs to be at least {1}MB on {2}"
                    .format(kv_quota, testconstants.MIN_KV_QUOTA, self.ip))
                kv_quota = testconstants.MIN_KV_QUOTA
            log.info("Setting KV memory quota as {0} MB on {1}".format(
                kv_quota, self.ip))
        self.rest.set_service_mem_quota(
            {CbServer.Settings.KV_MEM_QUOTA: kv_quota})

    def init_cb(self):
        duration, event, timeout = install_constants.WAIT_TIMES[
            self.info.deliverable_type]["init"]
        self.wait_for_completion(duration * 2, event)
        start_time = time.time()
        while time.time() < start_time + timeout:
            try:
                init_success = False
                self.pre_init_cb()

                self.rest = RestConnection(self.node)
                # Make sure that data_and index_path are writable by couchbase user
                for path in set([
                        _f
                        for _f in [self.node.data_path, self.node.index_path]
                        if _f
                ]):
                    for cmd in (
                            "rm -rf {0}/*".format(path),
                            "chown -R couchbase:couchbase {0}".format(path)):
                        self.shell.execute_command(cmd)
                self.rest.set_data_path(data_path=self.node.data_path,
                                        index_path=self.node.index_path)
                self.allocate_memory_quotas()
                self.rest.init_node_services(hostname=None,
                                             username=self.node.rest_username,
                                             password=self.node.rest_password,
                                             services=self.get_services())

                if "index" in self.get_services():
                    self.rest.set_indexer_storage_mode(
                        storageMode=params["storage_mode"])

                self.rest.init_cluster(username=self.node.rest_username,
                                       password=self.node.rest_password)
                init_success = True
                if init_success:
                    break
                self.wait_for_completion(duration, event)
            except Exception as e:
                log.warning("Exception {0} occurred on {1}, retrying..".format(
                    e, self.ip))
                self.wait_for_completion(duration, event)
        self.post_init_cb()

    def wait_for_completion(self, duration, event):
        if params["debug_logs"]:
            log.info(event.format(duration, self.ip))
        time.sleep(duration)

    def cleanup_cb(self):
        cmd = self.actions_dict[self.info.deliverable_type]["cleanup"]
        if cmd:
            try:
                # Delete all but the most recently accessed build binaries
                self.shell.execute_command(cmd,
                                           debug=self.params["debug_logs"])
            except:
                pass
예제 #42
0
 def test_node_memcached_failure_in_series(self):
     timeout = self.timeout // 2
     status = self.rest.update_autoreprovision_settings(True, 1)
     if not status:
         self.fail('failed to change autoreprovision_settings!')
     self.sleep(5)
     data_lost = False
     for i in reversed(range(len(self.servers))):
         print(self.servers[i])
         operation = random.choice(['stop', 'memcached_failure', 'restart', 'failover', 'reboot'])
         shell = RemoteMachineShellConnection(self.servers[i])
         print("operation", operation)
         if i == 0:
             self.master = self.servers[1]
         if operation == 'stop':
             self._stop_couchbase(self.servers[i])
         elif operation == 'memcached_failure':
             self._pause_couchbase(self.servers[i])
         elif operation == 'restart':
             shell.restart_couchbase()
         elif operation == 'failover':
             RemoteUtilHelper.enable_firewall(self.servers[i])
         elif operation == 'reboot':
             if shell.extract_remote_info().type.lower() == 'windows':
                 o, r = shell.execute_command("shutdown -r -f -t 0")
                 self.sleep(200)
             elif shell.extract_remote_info().type.lower() == 'linux':
                 o, r = shell.execute_command("reboot")
             shell.log_command_output(o, r)
             self.sleep(60)
         self.sleep(40)
         if operation == 'memcached_failure':
             AutoReprovisionBaseTest.wait_for_warmup_or_assert(self.master, 1,
                                                               timeout + AutoReprovisionBaseTest.MAX_FAIL_DETECT_TIME,
                                                               self)
         if operation != 'restart' and operation != 'memcached_failure' and operation != 'reboot':
             AutoReprovisionBaseTest.wait_for_failover_or_assert(self.master, 1,
                                                                 timeout + AutoReprovisionBaseTest.MAX_FAIL_DETECT_TIME,
                                                                 self)
         if operation != 'restart':
             RemoteUtilHelper.common_basic_setup([self.servers[i]])
         AutoReprovisionBaseTest.wait_for_failover_or_assert(self.master, 0,
                                                             timeout + AutoReprovisionBaseTest.MAX_FAIL_DETECT_TIME,
                                                             self)
         helper = RestHelper(RestConnection(self.master))
         self.assertTrue(helper.is_cluster_healthy(), "cluster status is not healthy")
         self.sleep(40)
         if operation == 'memcached_failure' or operation == 'failover':
             self.assertTrue(helper.is_cluster_rebalanced(), "cluster is not balanced")
         else:
             if 'kv' in self.servers[i].services and self.replicas > 0:
                 self.assertFalse(helper.is_cluster_rebalanced(), "cluster is balanced")
                 self.rest.rebalance(otpNodes=[node.id for node in self.rest.node_statuses()], ejectedNodes=[])
                 self.assertTrue(self.rest.monitorRebalance())
             else:
                 self.assertTrue(helper.is_cluster_rebalanced(), "cluster is not balanced")
         buckets = self.rest.get_buckets()
         if self.replicas == 0 and (operation == 'restart' or operation == 'reboot'):
             data_lost = True
         for bucket in buckets:
             if not data_lost:
                 self.verify_loaded_data(self.master, bucket.name, self.loaded_items[bucket.name])
예제 #43
0
 def setUp(self):
     super(EventingTools, self).setUp()
     self.rest.set_service_memoryQuota(service='memoryQuota', memoryQuota=500)
     if self.create_functions_buckets:
         self.bucket_size = 100
         log.info(self.bucket_size)
         bucket_params = self._create_bucket_params(server=self.server, size=self.bucket_size,
                                                    replicas=self.num_replicas)
         self.cluster.create_standard_bucket(name=self.src_bucket_name, port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.src_bucket = RestConnection(self.master).get_buckets()
         self.cluster.create_standard_bucket(name=self.dst_bucket_name, port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.cluster.create_standard_bucket(name=self.metadata_bucket_name, port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.buckets = RestConnection(self.master).get_buckets()
     self.gens_load = self.generate_docs(self.docs_per_day)
     self.expiry = 3
     handler_code = self.input.param('handler_code', 'bucket_op')
     if handler_code == 'bucket_op':
         self.handler_code = HANDLER_CODE.DELETE_BUCKET_OP_ON_DELETE
     elif handler_code == 'bucket_op_with_timers':
         self.handler_code = HANDLER_CODE.BUCKET_OPS_WITH_TIMERS
     elif handler_code == 'bucket_op_with_cron_timers':
         self.handler_code = HANDLER_CODE.BUCKET_OPS_WITH_CRON_TIMERS
     elif handler_code == 'n1ql_op_with_timers':
         # index is required for delete operation through n1ql
         self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
         self.n1ql_helper = N1QLHelper(shell=self.shell,
                                       max_verify=self.max_verify,
                                       buckets=self.buckets,
                                       item_flag=self.item_flag,
                                       n1ql_port=self.n1ql_port,
                                       full_docs_list=self.full_docs_list,
                                       log=self.log, input=self.input,
                                       master=self.master,
                                       use_rest=True
                                       )
         self.n1ql_helper.create_primary_index(using_gsi=True, server=self.n1ql_node)
         self.handler_code = HANDLER_CODE.N1QL_OPS_WITH_TIMERS
     else:
         self.handler_code = HANDLER_CODE.DELETE_BUCKET_OP_ON_DELETE
     self.backupset = Backupset()
     self.backupset.directory = self.input.param("dir", "/tmp/entbackup")
     self.backupset.name = self.input.param("name", "backup")
     self.backupset.backup_host = self.servers[0]
     self.backupset.cluster_host = self.servers[0]
     self.backupset.cluster_host_username = self.servers[0].rest_username
     self.backupset.cluster_host_password = self.servers[0].rest_password
     self.backupset.restore_cluster_host = self.servers[1]
     self.backupset.restore_cluster_host_username = self.servers[1].rest_username
     self.backupset.restore_cluster_host_password = self.servers[1].rest_password
     self.num_shards = self.input.param("num_shards", None)
     self.debug_logs = self.input.param("debug-logs", False)
     cmd = 'curl -g %s:8091/diag/eval -u Administrator:password ' % self.master.ip
     cmd += '-d "path_config:component_path(bin)."'
     bin_path = subprocess.check_output(cmd, shell=True)
     try:
         bin_path = bin_path.decode()
     except AttributeError:
         pass
     if "bin" not in bin_path:
         self.fail("Check if cb server install on %s" % self.master.ip)
     else:
         self.cli_command_location = bin_path.replace('"', '') + "/"
     shell = RemoteMachineShellConnection(self.servers[0])
     info = shell.extract_remote_info().type.lower()
     self.root_path = LINUX_ROOT_PATH
     self.wget = "wget"
     self.os_name = "linux"
     self.tmp_path = "/tmp/"
     self.long_help_flag = "--help"
     self.short_help_flag = "-h"
     if info == 'linux':
         if self.nonroot:
             base_path = "/home/%s" % self.master.ssh_username
             self.database_path = "%s%s" % (base_path, COUCHBASE_DATA_PATH)
             self.root_path = "/home/%s/" % self.master.ssh_username
     elif info == 'windows':
         self.os_name = "windows"
         self.cmd_ext = ".exe"
         self.wget = "/cygdrive/c/automation/wget.exe"
         self.database_path = WIN_COUCHBASE_DATA_PATH_RAW
         self.root_path = WIN_ROOT_PATH
         self.tmp_path = WIN_TMP_PATH
         self.long_help_flag = "help"
         self.short_help_flag = "h"
         win_format = "C:/Program Files"
         cygwin_format = "/cygdrive/c/Program\ Files"
         if win_format in self.cli_command_location:
             self.cli_command_location = self.cli_command_location.replace(win_format,
                                                                           cygwin_format)
         self.backupset.directory = self.input.param("dir", WIN_TMP_PATH_RAW + "entbackup")
     elif info == 'mac':
         self.backupset.directory = self.input.param("dir", "/tmp/entbackup")
     else:
         raise Exception("OS not supported.")
     self.backup_validation_files_location = "/tmp/backuprestore" + self.master.ip
     self.backups = []
     self.validation_helper = BackupRestoreValidations(self.backupset,
                                                       self.cluster_to_backup,
                                                       self.cluster_to_restore,
                                                       self.buckets,
                                                       self.backup_validation_files_location,
                                                       self.backups,
                                                       self.num_items,
                                                       self.vbuckets)
     self.restore_only = self.input.param("restore-only", False)
     self.same_cluster = self.input.param("same-cluster", False)
     self.reset_restore_cluster = self.input.param("reset-restore-cluster", True)
     self.no_progress_bar = self.input.param("no-progress-bar", True)
     self.multi_threads = self.input.param("multi_threads", False)
     self.threads_count = self.input.param("threads_count", 1)
     self.bucket_delete = self.input.param("bucket_delete", False)
     self.bucket_flush = self.input.param("bucket_flush", False)
     include_buckets = self.input.param("include-buckets", "")
     include_buckets = include_buckets.split(",") if include_buckets else []
     exclude_buckets = self.input.param("exclude-buckets", "")
     exclude_buckets = exclude_buckets.split(",") if exclude_buckets else []
     self.backupset.exclude_buckets = exclude_buckets
     self.backupset.include_buckets = include_buckets
     self.backupset.disable_bucket_config = self.input.param("disable-bucket-config", False)
     self.backupset.disable_views = self.input.param("disable-views", False)
     self.backupset.disable_gsi_indexes = self.input.param("disable-gsi-indexes", False)
     self.backupset.disable_ft_indexes = self.input.param("disable-ft-indexes", False)
     self.backupset.disable_data = self.input.param("disable-data", False)
     self.backupset.disable_conf_res_restriction = self.input.param("disable-conf-res-restriction", None)
     self.backupset.force_updates = self.input.param("force-updates", True)
     self.backupset.resume = self.input.param("resume", False)
     self.backupset.purge = self.input.param("purge", False)
     self.backupset.threads = self.input.param("threads", self.number_of_processors())
     self.backupset.start = self.input.param("start", 1)
     self.backupset.end = self.input.param("stop", 1)
     self.backupset.number_of_backups = self.input.param("number_of_backups", 1)
     self.backupset.number_of_backups_after_upgrade = \
         self.input.param("number_of_backups_after_upgrade", 0)
     self.backupset.filter_keys = self.input.param("filter-keys", "")
     self.backupset.random_keys = self.input.param("random_keys", False)
     self.backupset.filter_values = self.input.param("filter-values", "")
     self.backupset.no_ssl_verify = self.input.param("no-ssl-verify", False)
     self.backupset.secure_conn = self.input.param("secure-conn", False)
     self.backupset.bk_no_cert = self.input.param("bk-no-cert", False)
     self.backupset.rt_no_cert = self.input.param("rt-no-cert", False)
     self.backupset.backup_list_name = self.input.param("list-names", None)
     self.backupset.backup_incr_backup = self.input.param("incr-backup", None)
     self.backupset.bucket_backup = self.input.param("bucket-backup", None)
     self.backupset.backup_to_compact = self.input.param("backup-to-compact", 0)
     self.backupset.map_buckets = self.input.param("map-buckets", None)
     self.add_node_services = self.input.param("add-node-services", "kv")
     self.backupset.backup_compressed = \
         self.input.param("backup-conpressed", False)
     self.number_of_backups_taken = 0
     self.vbucket_seqno = []
     self.expires = self.input.param("expires", 0)
     self.auto_failover = self.input.param("enable-autofailover", False)
     self.auto_failover_timeout = self.input.param("autofailover-timeout", 30)
     self.graceful = self.input.param("graceful", False)
     self.recoveryType = self.input.param("recoveryType", "full")
     self.skip_buckets = self.input.param("skip_buckets", False)
     self.lww_new = self.input.param("lww_new", False)
     self.skip_consistency = self.input.param("skip_consistency", False)
     self.master_services = self.get_services([self.backupset.cluster_host],
                                              self.services_init, start_node=0)
     if not self.master_services:
         self.master_services = ["kv"]
     self.per_node = self.input.param("per_node", True)
     if not os.path.exists(self.backup_validation_files_location):
         os.mkdir(self.backup_validation_files_location)
     self.total_buckets = len(self.buckets)
     self.replace_ttl = self.input.param("replace-ttl", None)
     self.replace_ttl_with = self.input.param("replace-ttl-with", None)
     self.verify_before_expired = self.input.param("verify-before-expired", False)
     self.vbucket_filter = self.input.param("vbucket-filter", None)
     self.new_replicas = self.input.param("new-replicas", None)
     self.should_fail = self.input.param("should-fail", False)
     self.restore_compression_mode = self.input.param("restore-compression-mode", None)
     self.enable_firewall = False
     self.vbuckets_filter_no_data = False
     self.test_fts = self.input.param("test_fts", False)
     self.restore_should_fail = self.input.param("restore_should_fail", False)
예제 #44
0
class rbacclitests(BaseTestCase):
    def setUp(self):
        self.times_teardown_called = 1
        super(rbacclitests, self).setUp()
        self.r = random.Random()
        self.vbucket_count = 1024
        self.shell = RemoteMachineShellConnection(self.master)
        info = self.shell.extract_remote_info()
        type = info.type.lower()
        self.excluded_commands = self.input.param("excluded_commands", None)
        self.os = 'linux'
        self.cli_command_path = LINUX_COUCHBASE_BIN_PATH
        if type == 'windows':
            self.os = 'windows'
            self.cli_command_path = WIN_COUCHBASE_BIN_PATH
        if info.distribution_type.lower() == 'mac':
            self.os = 'mac'
            self.cli_command_path = MAC_COUCHBASE_BIN_PATH
        self.couchbase_usrname = "%s" % (self.input.membase_settings.rest_username)
        self.couchbase_password = "******" % (self.input.membase_settings.rest_password)
        self.cli_command = self.input.param("cli_command", None)
        self.command_options = self.input.param("command_options", None)
        if self.command_options is not None:
            self.command_options = self.command_options.split(";")
        TestInputSingleton.input.test_params["default_bucket"] = False
        self.eventID = self.input.param('id', None)
        AuditTemp = audit(host=self.master)
        self.ipAddress = self.getLocalIPAddress()
        self.ldapUser = self.input.param('ldapUser', 'Administrator')
        self.ldapPass = self.input.param('ldapPass', 'password')
        self.source = self.input.param('source', 'ns_server')
        self.role = self.input.param('role','admin')
        if self.role in ['bucket_admin','views_admin']:
            self.role = self.role + "[*]"
        self.log.info (" value of self.role is {0}".format(self.role))
        if type == 'windows' and self.source == 'saslauthd':
            raise Exception(" Ldap Tests cannot run on windows");
        else:
            if self.source == 'saslauthd':
                rest = RestConnection(self.master)
                self.setupLDAPSettings(rest)
                #rest.ldapUserRestOperation(True, [[self.ldapUser]], exclude=None)
                self.set_user_role(rest,self.ldapUser,user_role=self.role)


    def tearDown(self):
        super(rbacclitests, self).tearDown()

    def getLocalIPAddress(self):
        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        s.connect(('couchbase.com', 0))
        return s.getsockname()[0]
        '''
        status, ipAddress = commands.getstatusoutput("ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 |awk '{print $1}'")
        return ipAddress
        '''

    def setupLDAPSettings (self,rest):
        api = rest.baseUrl + 'settings/saslauthdAuth'
        params = urllib.urlencode({"enabled":'true',"admins":[],"roAdmins":[]})
        status, content, header = rest._http_request(api, 'POST', params)
        return status, content, header

    def del_runCmd_value(self, output):
        if "runCmd" in output[0]:
            output = output[1:]
        return output

    def set_user_role(self,rest,username,user_role='admin'):
        payload = "name=" + username + "&roles=" + user_role
        content =  rest.set_user_roles(user_id=username,payload=payload)

    def _validate_roles(self,output,result):
        print output
        final_result = True
        for outputs in output:
            print outputs
            if result not in outputs:
                final_result = False
            else:
                final_result = True
        self.assertTrue(final_result,"Incorrect Message for the role")

    #Wrapper around auditmain
    def checkConfig(self, eventID, host, expectedResults):
        Audit = audit(eventID=eventID, host=host)
        fieldVerification, valueVerification = Audit.validateEvents(expectedResults)
        self.assertTrue(fieldVerification, "One of the fields is not matching")
        self.assertTrue(valueVerification, "Values for one of the fields is not matching")

    def _create_bucket(self, remote_client, bucket="default", bucket_type="couchbase", bucket_port=11211,
                        bucket_ramsize=200, bucket_replica=1, wait=False, enable_flush=None, enable_index_replica=None, \
                       user=None,password=None):
        options = "--bucket={0} --bucket-type={1} --bucket-ramsize={2} --bucket-replica={3}".\
            format(bucket, bucket_type, bucket_ramsize, bucket_replica)
        options += (" --enable-flush={0}".format(enable_flush), "")[enable_flush is None]
        options += (" --enable-index-replica={0}".format(enable_index_replica), "")[enable_index_replica is None]
        options += (" --enable-flush={0}".format(enable_flush), "")[enable_flush is None]
        options += (" --wait", "")[wait]
        cli_command = "bucket-create"
        if user is None:
            user = self.ldapUser

        if password is None:
            password = self.ldapPass

        output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=user, password=password)
        return output

    def testClusterEdit(self):
        options = "--server-add={0}:8091 --server-add-username=Administrator --server-add-password=password".format(self.servers[num + 1].ip)
        remote_client = RemoteMachineShellConnection(self.master)
        output, error = remote_client.execute_couchbase_cli(cli_command='cluster-edit', options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)

    def testAddRemoveNodes(self):
        if self.role in ['replication_admin','views_admin[*]','bucket_admin[*]']:
            result = "Forbidden"
        elif self.role in ['admin','cluster_admin']:
            result = 'SUCCESS'
        nodes_add = self.input.param("nodes_add", 1)
        nodes_rem = self.input.param("nodes_rem", 1)
        nodes_failover = self.input.param("nodes_failover", 0)
        force_failover = self.input.param("force_failover", False)
        nodes_readd = self.input.param("nodes_readd", 0)
        cli_command = self.input.param("cli_command", None)
        source = self.source
        remote_client = RemoteMachineShellConnection(self.master)
        for num in xrange(nodes_add):
            options = "--server-add={0}:8091 --server-add-username=Administrator --server-add-password=password".format(self.servers[num + 1].ip)
            output, error = remote_client.execute_couchbase_cli(cli_command='server-add', options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
        output, error = remote_client.execute_couchbase_cli(cli_command='rebalance', cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
        self._validate_roles(output,result)

        if (cli_command == 'server-remove'):
            for num in xrange(nodes_rem):
                cli_command = "rebalance"
                options = "--server-remove={0}:8091".format(self.servers[nodes_add - num].ip)
                output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
            self._validate_roles(output,result)


        if (cli_command in ["failover"]):
            cli_command = 'failover'
            for num in xrange(nodes_failover):
                self.log.info("failover node {0}".format(self.servers[nodes_add - nodes_rem - num].ip))
                options = "--server-failover={0}:8091".format(self.servers[nodes_add - nodes_rem - num].ip)
                options += " --force"
                output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
            self._validate_roles(output,result)

        if (cli_command == "server-readd"):
            for num in xrange(nodes_readd):
                cli_command = 'failover'
                self.log.info("failover node {0}".format(self.servers[nodes_add - nodes_rem - num].ip))
                options = "--server-failover={0}:8091".format(self.servers[nodes_add - nodes_rem - num].ip)
                options += " --force"
                output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
                self._validate_roles(output,result)
                self.log.info("add back node {0} to cluster".format(self.servers[nodes_add - nodes_rem - num ].ip))
                cli_command = "server-readd"
                options = "--server-add={0}:8091".format(self.servers[nodes_add - nodes_rem - num ].ip)
                output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
            self._validate_roles(output,result)
        remote_client.disconnect()


    def testBucketCreation(self):
        if self.role in ['replication_admin','views_admin[*]','bucket_admin[*]']:
            result = "Forbidden"
        elif self.role in ['admin','cluster_admin']:
            result = 'SUCCESS'
        bucket_name = self.input.param("bucket", "default")
        bucket_type = self.input.param("bucket_type", "couchbase")
        bucket_port = self.input.param("bucket_port", 11211)
        bucket_replica = self.input.param("bucket_replica", 1)
        bucket_password = self.input.param("bucket_password", None)
        bucket_ramsize = self.input.param("bucket_ramsize", 200)
        wait = self.input.param("wait", False)
        enable_flush = self.input.param("enable_flush", None)
        enable_index_replica = self.input.param("enable_index_replica", None)

        remote_client = RemoteMachineShellConnection(self.master)
        output = self._create_bucket(remote_client, bucket=bucket_name, bucket_type=bucket_type, bucket_port=bucket_port, \
                        bucket_ramsize=bucket_ramsize, bucket_replica=bucket_replica, wait=wait, enable_flush=enable_flush, enable_index_replica=enable_index_replica)
        self._validate_roles(output,result)
        remote_client.disconnect()


    def testBucketModification(self):
        if self.role in ['replication_admin','views_admin[*]']:
            result = "Forbidden"
        elif self.role in ['admin','cluster_admin','bucket_admin[*]']:
            result = 'SUCCESS'
        cli_command = "bucket-edit"
        bucket_type = self.input.param("bucket_type", "couchbase")
        enable_flush = self.input.param("enable_flush", None)
        bucket_port_new = self.input.param("bucket_port_new", None)
        bucket_password_new = self.input.param("bucket_password_new", None)
        bucket_ramsize_new = self.input.param("bucket_ramsize_new", None)
        enable_flush_new = self.input.param("enable_flush_new", None)
        enable_index_replica_new = self.input.param("enable_index_replica_new", None)
        bucket_ramsize_new = self.input.param("bucket_ramsize_new", None)
        bucket = self.input.param("bucket", "default")
        bucket_ramsize = self.input.param("bucket_ramsize", 200)
        bucket_replica = self.input.param("bucket_replica", 1)
        enable_flush = self.input.param("enable_flush", None)
        enable_index_replica = self.input.param("enable_index_replica", None)
        wait = self.input.param("wait", False)

        remote_client = RemoteMachineShellConnection(self.master)

        self._create_bucket(remote_client, bucket, bucket_type=bucket_type, bucket_ramsize=bucket_ramsize,
                            bucket_replica=bucket_replica, wait=wait, enable_flush=enable_flush,
                            enable_index_replica=enable_index_replica,user="******",password='******')

        cli_command = "bucket-edit"
        options = "--bucket={0}".format(bucket)
        options += (" --enable-flush={0}".format(enable_flush_new), "")[enable_flush_new is None]
        options += (" --enable-index-replica={0}".format(enable_index_replica_new), "")[enable_index_replica_new is None]
        #options += (" --bucket-port={0}".format(bucket_port_new), "")[bucket_port_new is None]
        options += (" --bucket-ramsize={0}".format(bucket_ramsize_new), "")[bucket_ramsize_new is None]

        output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
        self._validate_roles(output,result)

        cli_command = "bucket-flush --force"
        options = "--bucket={0}".format(bucket)
        if enable_flush_new is not None:
            output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
        self._validate_roles(output,result)

        cli_command = "bucket-delete"
        output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
        expectedResults = {"bucket_name":"BBB", "source":self.source, "user":self.ldapUser, "ip":"127.0.0.1", "port":57457}
        self._validate_roles(output,result)

        remote_client.disconnect()

    def testSettingCompacttion(self):
        if self.role in ['replication_admin','views_admin[*]','bucket_admin[*]']:
            result = "Forbidden"
        elif self.role in ['admin','cluster_admin']:
            result = 'SUCCESS'
        cli_command = "bucket-edit"
        '''setting-compacttion OPTIONS:
        --compaction-db-percentage=PERCENTAGE     at which point database compaction is triggered
        --compaction-db-size=SIZE[MB]             at which point database compaction is triggered
        --compaction-view-percentage=PERCENTAGE   at which point view compaction is triggered
        --compaction-view-size=SIZE[MB]           at which point view compaction is triggered
        --compaction-period-from=HH:MM            allow compaction time period from
        --compaction-period-to=HH:MM              allow compaction time period to
        --enable-compaction-abort=[0|1]           allow compaction abort when time expires
        --enable-compaction-parallel=[0|1]        allow parallel compaction for database and view'''
        compaction_db_percentage = self.input.param("compaction-db-percentage", None)
        compaction_db_size = self.input.param("compaction-db-size", None)
        compaction_view_percentage = self.input.param("compaction-view-percentage", None)
        compaction_view_size = self.input.param("compaction-view-size", None)
        compaction_period_from = self.input.param("compaction-period-from", None)
        compaction_period_to = self.input.param("compaction-period-to", None)
        enable_compaction_abort = self.input.param("enable-compaction-abort", None)
        enable_compaction_parallel = self.input.param("enable-compaction-parallel", None)
        bucket = self.input.param("bucket", "default")
        output = self.input.param("output", '')
        remote_client = RemoteMachineShellConnection(self.master)
        cli_command = "setting-compaction"
        options = (" --compaction-db-percentage={0}".format(compaction_db_percentage), "")[compaction_db_percentage is None]
        options += (" --compaction-db-size={0}".format(compaction_db_size), "")[compaction_db_size is None]
        options += (" --compaction-view-percentage={0}".format(compaction_view_percentage), "")[compaction_view_percentage is None]
        options += (" --compaction-view-size={0}".format(compaction_view_size), "")[compaction_view_size is None]
        options += (" --compaction-period-from={0}".format(compaction_period_from), "")[compaction_period_from is None]
        options += (" --compaction-period-to={0}".format(compaction_period_to), "")[compaction_period_to is None]
        options += (" --enable-compaction-abort={0}".format(enable_compaction_abort), "")[enable_compaction_abort is None]
        options += (" --enable-compaction-parallel={0}".format(enable_compaction_parallel), "")[enable_compaction_parallel is None]

        output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
        self._validate_roles(output,result)
        remote_client.disconnect()



    def testSettingEmail(self):
        if self.role in ['replication_admin','views_admin[*]','bucket_admin[*]']:
            result = "Forbidden"
        elif self.role in ['admin','cluster_admin']:
            result = 'SUCCESS'
        setting_enable_email_alert = self.input.param("enable-email-alert", 1)
        setting_email_recipients = self.input.param("email-recipients", '*****@*****.**')
        setting_email_sender = self.input.param("email-sender", '*****@*****.**')
        setting_email_user = self.input.param("email-user", 'ritam')
        setting_emaiL_password = self.input.param("email-password", 'password')
        setting_email_host = self.input.param("email-host", 'localhost')
        setting_email_port = self.input.param("email-port", '25')
        #setting_email_encrypt = self.input.param("enable-email-encrypt", 1)

        remote_client = RemoteMachineShellConnection(self.master)
        cli_command = "setting-alert"
        options = (" --enable-email-alert={0}".format(setting_enable_email_alert))
        options += (" --email-recipients={0}".format(setting_email_recipients))
        options += (" --email-sender={0}".format(setting_email_sender))
        options += (" --email-user={0}".format(setting_email_user))
        options += (" --email-password={0}".format(setting_emaiL_password))
        options += (" --email-host={0}".format(setting_email_host))
        options += (" --email-port={0}".format(setting_email_port))
        #options += (" --enable-email-encrypt={0}".format(setting_email_encrypt))
        options += (" --alert-auto-failover-node")
        options += (" --alert-auto-failover-max-reached")
        options += (" --alert-auto-failover-node-down")
        options += (" --alert-auto-failover-cluster-small")
        options += (" --alert-ip-changed")
        options += (" --alert-disk-space")
        options += (" --alert-meta-overhead")
        options += (" --alert-meta-oom")
        options += (" --alert-write-failed")

        output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
        self._validate_roles(output,result)
        remote_client.disconnect()

    def testSettingNotification(self):
        if self.role in ['replication_admin','views_admin[*]','bucket_admin[*]']:
            result = "Forbidden"
        elif self.role in ['admin','cluster_admin']:
            result = 'SUCCESS'
        setting_enable_notification = self.input.param("enable-notification", 1)

        remote_client = RemoteMachineShellConnection(self.master)
        cli_command = "setting-notification"
        options = (" --enable-notification={0}".format(setting_enable_notification))

        output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
        self._validate_roles(output,result)
        remote_client.disconnect()

    def testSettingFailover(self):
        if self.role in ['replication_admin','views_admin[*]','bucket_admin[*]']:
            result = "Forbidden"
        elif self.role in ['admin','cluster_admin']:
            result = 'SUCCESS'
        setting_enable_auto_failover = self.input.param("enable-auto-failover", 1)
        setting_auto_failover_timeout = self.input.param("auto-failover-timeout", 50)

        remote_client = RemoteMachineShellConnection(self.master)
        cli_command = "setting-autofailover"
        options = (" --enable-auto-failover={0}".format(setting_enable_auto_failover))
        options += (" --auto-failover-timeout={0}".format(setting_auto_failover_timeout))

        output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
        self._validate_roles(output,result)
        remote_client.disconnect()

    def testSSLManage(self):
        '''ssl-manage OPTIONS:
        --retrieve-cert=CERTIFICATE            retrieve cluster certificate AND save to a pem file
        --regenerate-cert=CERTIFICATE          regenerate cluster certificate AND save to a pem file'''
        xdcr_cert = self.input.param("xdcr-certificate", None)
        xdcr_cert = "/tmp/" + xdcr_cert
        cli_command = "ssl-manage"
        remote_client = RemoteMachineShellConnection(self.master)
        options = "--regenerate-cert={0}".format(xdcr_cert)
        output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
        self.assertFalse(error, "Error thrown during CLI execution %s" % error)
        self.shell.execute_command("rm {0}".format(xdcr_cert))
        expectedResults = {"real_userid:source":self.source, "real_userid:user":self.ldapUser, "remote:ip":"127.0.0.1", "port":60035}
        self.checkConfig(8226, self.master, expectedResults)


    """ tests for the group-manage option. group creation, renaming and deletion are tested .
        These tests require a cluster of four or more nodes. """
    def testCreateRenameDeleteGroup(self):
        if self.role in ['replication_admin','views_admin[*]','bucket_admin[*]']:
            result = "Forbidden"
        elif self.role in ['admin','cluster_admin']:
            result = 'SUCCESS'

        remote_client = RemoteMachineShellConnection(self.master)
        cli_command = "group-manage"
        source = self.source
        user = self.ldapUser
        rest = RestConnection(self.master)

        if self.os == "linux":
            # create group
            options = " --create --group-name=group2"
            output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \
                    options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
            self._validate_roles(output,result)

            if result != 'Forbidden':
                # rename group test
                options = " --rename=group3 --group-name=group2"
                output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \
                        options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
                self._validate_roles(output,result)

                # delete group test
                options = " --delete --group-name=group3"
                output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \
                        options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
                self._validate_roles(output,result)

        if self.os == "windows":
            # create group
            options = " --create --group-name=group2"
            output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \
                    options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
            self._validate_roles(output,result)

            if result != 'Forbidden':
                # rename group test
                options = " --rename=group3 --group-name=group2"
                output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \
                        options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
                self._validate_roles(output,result)

                # delete group test
                options = " --delete --group-name=group3"
                output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \
                        options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
                self._validate_roles(output,result)

        remote_client.disconnect()
예제 #45
0
class CliBaseTest(BaseTestCase):
    vbucketId = 0

    def setUp(self):
        self.times_teardown_called = 1
        super(CliBaseTest, self).setUp()
        self.r = random.Random()
        self.vbucket_count = 1024
        self.cluster = Cluster()
        self.clusters_dic = self.input.clusters
        if self.clusters_dic:
            if len(self.clusters_dic) > 1:
                self.dest_nodes = self.clusters_dic[1]
                self.dest_master = self.dest_nodes[0]
            elif len(self.clusters_dic) == 1:
                self.log.error("=== need 2 cluster to setup xdcr in ini file ===")
        else:
            self.log.error("**** Cluster config is setup in ini file. ****")
        self.shell = RemoteMachineShellConnection(self.master)
        self.rest = RestConnection(self.master)
        self.import_back = self.input.param("import_back", False)
        if self.import_back:
            if len(self.servers) < 3:
                self.fail("This test needs minimum of 3 vms to run ")
        self.test_type = self.input.param("test_type", "import")
        self.import_file = self.input.param("import_file", None)
        self.imex_type = self.input.param("imex_type", "json")
        self.format_type = self.input.param("format_type", None)
        self.import_method = self.input.param("import_method", "file://")
        self.node_version = self.rest.get_nodes_version()
        self.force_failover = self.input.param("force_failover", False)
        info = self.shell.extract_remote_info()
        type = info.type.lower()
        self.excluded_commands = self.input.param("excluded_commands", None)
        """ cli output message """
        self.cli_bucket_create_msg = "SUCCESS: Bucket created"
        self.cli_rebalance_msg = "SUCCESS: Rebalance complete"
        self.cb_version = self.rest.get_nodes_version()
        if self.cb_version[:3] == "4.6":
            self.cli_bucket_create_msg = "SUCCESS: bucket-create"
            self.cli_rebalance_msg = "SUCCESS: rebalanced cluster"
        self.os = 'linux'
        self.full_v = None
        self.short_v = None
        self.build_number = None
        self.cli_command_path = LINUX_COUCHBASE_BIN_PATH
        self.root_path = LINUX_ROOT_PATH
        self.tmp_path = "/tmp/"
        self.cmd_backup_path = LINUX_BACKUP_PATH
        self.backup_path = LINUX_BACKUP_PATH
        self.cmd_ext = ""
        self.src_file = ""
        self.des_file = ""
        self.sample_files_path = LINUX_COUCHBASE_SAMPLE_PATH
        self.log_path = LINUX_COUCHBASE_LOGS_PATH
        self.base_cb_path = LINUX_CB_PATH
        """ non root path """
        if self.nonroot:
            self.cli_command_path = "/home/%s%s" % (self.master.ssh_username,
                                                    LINUX_COUCHBASE_BIN_PATH)
            self.sample_files_path = "/home/%s%s" % (self.master.ssh_username,
                                                     LINUX_COUCHBASE_SAMPLE_PATH)
            self.log_path = "/home/%s%s" % (self.master.ssh_username,
                                            LINUX_COUCHBASE_LOGS_PATH)
            self.base_cb_path = "/home/%s%s" % (self.master.ssh_username,
                                                LINUX_CB_PATH)
            self.root_path = "/home/%s/" % self.master.ssh_username
        if type == 'windows':
            self.os = 'windows'
            self.cmd_ext = ".exe"
            self.root_path = WIN_ROOT_PATH
            self.tmp_path = WIN_TMP_PATH
            self.tmp_path_raw = WIN_TMP_PATH_RAW
            self.cmd_backup_path = WIN_BACKUP_C_PATH
            self.backup_path = WIN_BACKUP_PATH
            self.cli_command_path = WIN_COUCHBASE_BIN_PATH
            self.sample_files_path = WIN_COUCHBASE_SAMPLE_PATH
            self.log_path = WIN_COUCHBASE_LOGS_PATH
        if info.distribution_type.lower() == 'mac':
            self.os = 'mac'
            self.cli_command_path = MAC_COUCHBASE_BIN_PATH
        self.full_v, self.short_v, self.build_number = self.shell.get_cbversion(type)
        self.couchbase_usrname = "%s" % (self.input.membase_settings.rest_username)
        self.couchbase_password = "******" % (self.input.membase_settings.rest_password)
        self.cb_login_info = "%s:%s" % (self.couchbase_usrname,
                                        self.couchbase_password)
        self.path_type = self.input.param("path_type", None)
        if self.path_type is None:
            self.log.info("Test command with absolute path ")
        elif self.path_type == "local":
            self.log.info("Test command at %s dir " % self.cli_command_path)
            self.cli_command_path = "cd %s; ./" % self.cli_command_path
        self.cli_command = self.input.param("cli_command", None)
        self.command_options = self.input.param("command_options", None)
        if self.command_options is not None:
            self.command_options = self.command_options.split(";")
        if str(self.__class__).find('couchbase_clitest.CouchbaseCliTest') == -1:
            if len(self.servers) > 1 and int(self.nodes_init) == 1:
                servers_in = [self.servers[i + 1] for i in range(self.num_servers - 1)]
                self.cluster.rebalance(self.servers[:1], servers_in, [])

    def tearDown(self):
        if not self.input.param("skip_cleanup", True):
            if self.times_teardown_called > 1 :
                self.shell.disconnect()
        if self.input.param("skip_cleanup", True):
            if self.case_number > 1 or self.times_teardown_called > 1:
                self.shell.disconnect()
        self.times_teardown_called += 1
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)
        zones = rest.get_zone_names()
        for zone in zones:
            if zone != "Group 1":
                rest.delete_zone(zone)
        self.clusters_dic = self.input.clusters
        if self.clusters_dic:
            if len(self.clusters_dic) > 1:
                self.dest_nodes = self.clusters_dic[1]
                self.dest_master = self.dest_nodes[0]
                if self.dest_nodes and len(self.dest_nodes) > 1:
                    self.log.info("======== clean up destination cluster =======")
                    rest = RestConnection(self.dest_nodes[0])
                    rest.remove_all_remote_clusters()
                    rest.remove_all_replications()
                    BucketOperationHelper.delete_all_buckets_or_assert(self.dest_nodes, self)
                    ClusterOperationHelper.cleanup_cluster(self.dest_nodes)
            elif len(self.clusters_dic) == 1:
                self.log.error("=== need 2 cluster to setup xdcr in ini file ===")
        else:
            self.log.info("**** If run xdcr test, need cluster config is setup in ini file. ****")
        super(CliBaseTest, self).tearDown()


    """ in sherlock, there is an extra value called runCmd in the 1st element """
    def del_runCmd_value(self, output):
        if "runCmd" in output[0]:
            output = output[1:]
        return output

    def verifyCommandOutput(self, output, expect_error, message):
        """Inspects each line of the output and checks to see if the expected error was found

        Options:
        output - A list of output lines
        expect_error - Whether or not the command should have succeeded or failed
        message - The success or error message

        Returns a boolean indicating whether or not the error/success message was found in the output
        """
        if expect_error:
            for line in output:
                if line == "ERROR: " + message:
                    return True
            log.info("Did not receive expected error message `ERROR: %s`", message)
            return False
        else:
            for line in output:
                if line == "SUCCESS: " + message:
                    return True
            log.info("Did not receive expected success message `SUCCESS: %s`", message)
            return False

    def verifyWarningOutput(self, output, message):
        for line in output:
            if line == "WARNING: " + message:
                return True
        log.info("Did not receive expected error message `WARNING: %s`", message)
        return False

    def verifyServices(self, server, expected_services):
        """Verifies that the services on a given node match the expected service

            Options:
            server - A TestInputServer object of the server to connect to
            expected_services - A comma separated list of services

            Returns a boolean corresponding to whether or not the expected services are available on the server.
        """
        rest = RestConnection(server)
        hostname = "%s:%s" % (server.ip, server.port)
        expected_services = expected_services.replace("data", "kv")
        expected_services = expected_services.replace("query", "n1ql")
        expected_services = expected_services.split(",")

        nodes_services = rest.get_nodes_services()
        for node, services in nodes_services.iteritems():
            if node.encode('ascii') == hostname:
                if len(services) != len(expected_services):
                    log.info("Services on %s do not match expected services (%s vs. %s)",
                             hostname, services, expected_services)
                    return False
                for service in services:
                    if service.encode("ascii") not in expected_services:
                        log.info("Services on %s do not match expected services (%s vs. %s)",
                                 hostname, services, expected_services)
                        return False
                return True

        log.info("Services on %s not found, the server may not exist", hostname)
        return False

    def verifyRamQuotas(self, server, data, index, fts):
        """Verifies that the RAM quotas for each service are set properly

        Options:
        server - A TestInputServer object of the server to connect to
        data - An int containing the data service RAM quota, None will skip the check
        index - An int containing the index service RAM quota, None will skip the check
        fts - An int containing the FTS service RAM quota, None will skip the check

        Returns a boolean corresponding to whether or not the RAM quotas were set properly
        """
        rest = RestConnection(server)
        settings = rest.get_pools_default()
        if data:
            if "memoryQuota" not in settings:
                log.info("Unable to get data service ram quota")
                return False
            if int(settings["memoryQuota"]) != int(data):
                log.info("Data service memory quota does not match (%d vs %d)",
                         int(settings["memoryQuota"]), int(data))
                return False

        if index:
            if "indexMemoryQuota" not in settings:
                log.info("Unable to get index service ram quota")
                return False
            if int(settings["indexMemoryQuota"]) != int(index):
                log.info(
                    "Index service memory quota does not match (%d vs %d)",
                    int(settings["indexMemoryQuota"]), int(index))
                return False

        if fts:
            if "ftsMemoryQuota" not in settings:
                log.info("Unable to get fts service ram quota")
                return False
            if int(settings["ftsMemoryQuota"]) != int(fts):
                log.info("FTS service memory quota does not match (%d vs %d)",
                         int(settings["ftsMemoryQuota"]), int(fts))
                return False

        return True

    def verifyBucketSettings(self, server, bucket_name, bucket_password,
                             bucket_type, memory_quota, eviction_policy,
                             replica_count, enable_index_replica, priority,
                             enable_flush):
        rest = RestConnection(server)
        result = rest.get_bucket_json(bucket_name)
        if bucket_password is not None and bucket_password != result[
            "saslPassword"]:
            log.info("Bucket password does not match (%s vs %s)",
                     bucket_password, result["saslPassword"])
            return False

        if bucket_type == "couchbase":
            bucket_type = "membase"

        if bucket_type is not None and bucket_type != result["bucketType"]:
            log.info("Memory quota does not match (%s vs %s)", bucket_type,
                     result["bucketType"])
            return False

        quota = result["quota"]["rawRAM"] / 1024 / 1024
        if memory_quota is not None and memory_quota != quota:
            log.info("Bucket quota does not match (%s vs %s)", memory_quota,
                     quota)
            return False

        if eviction_policy is not None and eviction_policy != result[
            "evictionPolicy"]:
            log.info("Eviction policy does not match (%s vs %s)",
                     eviction_policy, result["evictionPolicy"])
            return False

        if replica_count is not None and replica_count != result[
            "replicaNumber"]:
            log.info("Replica count does not match (%s vs %s)", replica_count,
                     result["replicaNumber"])
            return False

        if enable_index_replica == 1:
            enable_index_replica = True
        elif enable_index_replica == 0:
            enable_index_replica = False

        if enable_index_replica is not None and enable_index_replica != result[
            "replicaIndex"]:
            log.info("Replica index enabled does not match (%s vs %s)",
                     enable_index_replica, result["replicaIndex"])
            return False

        if priority == "high":
            priority = 8
        elif priority == "low":
            priority = 3

        if priority is not None and priority != result["threadsNumber"]:
            log.info("Bucket priority does not match (%s vs %s)", priority,
                     result["threadsNumber"])
            return False

        if enable_flush is not None:
            if enable_flush == 1 and "flush" not in result["controllers"]:
                log.info("Bucket flush is not enabled, but it should be")
                return False
            elif enable_flush == 0 and "flush" in result["controllers"]:
                log.info("Bucket flush is not enabled, but it should be")
                return False

        return True

    def verifyContainsBucket(self, server, name):
        rest = RestConnection(server)
        buckets = rest.get_buckets()

        for bucket in buckets:
            if bucket.name == name:
                return True
        return False

    def verifyClusterName(self, server, name):
        rest = RestConnection(server)
        settings = rest.get_pools_default("waitChange=0")

        if name is None:
            name = ""

        if "clusterName" not in settings:
            log.info("Unable to get cluster name from server")
            return False
        if settings["clusterName"] != name:
            log.info("Cluster name does not match (%s vs %s)",
                     settings["clusterName"], name)
            return False

        return True

    def isClusterInitialized(self, server):
        """Checks whether or not the server is initialized

        Options:
        server - A TestInputServer object of the server to connect to

        Checks to see whether or not the default pool was created in order to determine whether
        or no the server was initialized. Returns a boolean value to indicate initialization.
        """
        rest = RestConnection(server)
        settings = rest.get_pools_info()
        if "pools" in settings and len(settings["pools"]) > 0:
            return True

        return False

    def verifyNotificationsEnabled(self, server):
        rest = RestConnection(server)
        enabled = rest.get_notifications()
        if enabled:
            return True
        return False

    def verifyIndexSettings(self, server, max_rollbacks, stable_snap_interval,
                            mem_snap_interval,
                            storage_mode, threads, log_level):
        rest = RestConnection(server)
        settings = rest.get_global_index_settings()

        if storage_mode == "default":
            storage_mode = "forestdb"
        elif storage_mode == "memopt":
            storage_mode = "memory_optimized"

        if max_rollbacks and str(settings["maxRollbackPoints"]) != str(
                max_rollbacks):
            log.info("Max rollbacks does not match (%s vs. %s)",
                     str(settings["maxRollbackPoints"]), str(max_rollbacks))
            return False
        if stable_snap_interval and str(
                settings["stableSnapshotInterval"]) != str(
                stable_snap_interval):
            log.info("Stable snapshot interval does not match (%s vs. %s)",
                     str(settings["stableSnapshotInterval"]),
                     str(stable_snap_interval))
            return False
        if mem_snap_interval and str(
                settings["memorySnapshotInterval"]) != str(mem_snap_interval):
            log.info("Memory snapshot interval does not match (%s vs. %s)",
                     str(settings["memorySnapshotInterval"]),
                     str(mem_snap_interval))
            return False
        if storage_mode and str(settings["storageMode"]) != str(storage_mode):
            log.info("Storage mode does not match (%s vs. %s)",
                     str(settings["storageMode"]), str(storage_mode))
            return False
        if threads and str(settings["indexerThreads"]) != str(threads):
            log.info("Threads does not match (%s vs. %s)",
                     str(settings["indexerThreads"]), str(threads))
            return False
        if log_level and str(settings["logLevel"]) != str(log_level):
            log.info("Log level does not match (%s vs. %s)",
                     str(settings["logLevel"]), str(log_level))
            return False

        return True

    def verifyAutofailoverSettings(self, server, enabled, timeout):
        rest = RestConnection(server)
        settings = rest.get_autofailover_settings()

        if enabled and not ((str(enabled) == "1" and settings.enabled) or (
                str(enabled) == "0" and not settings.enabled)):
            log.info("Enabled does not match (%s vs. %s)", str(enabled),
                     str(settings.enabled))
            return False
        if timeout and str(settings.timeout) != str(timeout):
            log.info("Timeout does not match (%s vs. %s)", str(timeout),
                     str(settings.timeout))
            return False

        return True

    def verifyAuditSettings(self, server, enabled, log_path, rotate_interval):
        rest = RestConnection(server)
        settings = rest.getAuditSettings()

        if enabled and not (
            (str(enabled) == "1" and settings["auditdEnabled"]) or (
                str(enabled) == "0" and not settings["auditdEnabled"])):
            log.info("Enabled does not match (%s vs. %s)", str(enabled),
                     str(settings["auditdEnabled"]))
            return False
        if log_path and str(str(settings["logPath"])) != str(log_path):
            log.info("Log path does not match (%s vs. %s)", str(log_path),
                     str(settings["logPath"]))
            return False

        if rotate_interval and str(str(settings["rotateInterval"])) != str(
                rotate_interval):
            log.info("Rotate interval does not match (%s vs. %s)",
                     str(rotate_interval), str(settings["rotateInterval"]))
            return False

        return True

    def verifyPendingServer(self, server, server_to_add, group_name, services):
        rest = RestConnection(server)
        settings = rest.get_all_zones_info()
        if not settings or "groups" not in settings:
            log.info("Group settings payload appears to be invalid")
            return False

        expected_services = services.replace("data", "kv")
        expected_services = expected_services.replace("query", "n1ql")
        expected_services = expected_services.split(",")

        for group in settings["groups"]:
            for node in group["nodes"]:
                if node["hostname"] == server_to_add:
                    if node["clusterMembership"] != "inactiveAdded":
                        log.info("Node `%s` not in pending status",
                                 server_to_add)
                        return False

                    if group["name"] != group_name:
                        log.info("Node `%s` not in correct group (%s vs %s)",
                                 node["hostname"], group_name,
                                 group["name"])
                        return False

                    if len(node["services"]) != len(expected_services):
                        log.info("Services do not match on %s (%s vs %s) ",
                                 node["hostname"], services,
                                 ",".join(node["services"]))
                        return False

                    for service in node["services"]:
                        if service not in expected_services:
                            log.info("Services do not match on %s (%s vs %s) ",
                                     node["hostname"], services,
                                     ",".join(node["services"]))
                            return False
                    return True

        log.info("Node `%s` not found in nodes list", server_to_add)
        return False

    def verifyPendingServerDoesNotExist(self, server, server_to_add):
        rest = RestConnection(server)
        settings = rest.get_all_zones_info()
        if not settings or "groups" not in settings:
            log.info("Group settings payload appears to be invalid")
            return False

        for group in settings["groups"]:
            for node in group["nodes"]:
                if node["hostname"] == server_to_add:
                    return False

        log.info("Node `%s` not found in nodes list", server_to_add)
        return True

    def verifyActiveServers(self, server, expected_num_servers):
        return self._verifyServersByStatus(server, expected_num_servers,
                                           "active")

    def verifyFailedServers(self, server, expected_num_servers):
        return self._verifyServersByStatus(server, expected_num_servers,
                                           "inactiveFailed")

    def _verifyServersByStatus(self, server, expected_num_servers, status):
        rest = RestConnection(server)
        settings = rest.get_pools_default()

        count = 0
        for node in settings["nodes"]:
            if node["clusterMembership"] == status:
                count += 1

        return count == expected_num_servers

    def verifyRecoveryType(self, server, recovery_servers, recovery_type):
        rest = RestConnection(server)
        settings = rest.get_all_zones_info()
        if not settings or "groups" not in settings:
            log.info("Group settings payload appears to be invalid")
            return False

        if not recovery_servers:
            return True

        num_found = 0
        recovery_servers = recovery_servers.split(",")
        for group in settings["groups"]:
            for node in group["nodes"]:
                for rs in recovery_servers:
                    if node["hostname"] == rs:
                        if node["recoveryType"] != recovery_type:
                            log.info(
                                "Node %s doesn't contain recovery type %s ",
                                rs, recovery_type)
                            return False
                        else:
                            num_found = num_found + 1

        if num_found == len(recovery_servers):
            return True

        log.info("Node `%s` not found in nodes list",
                 ",".join(recovery_servers))
        return False

    def verifyReadOnlyUser(self, server, username):
        rest = RestConnection(server)
        ro_user, status = rest.get_ro_user()
        if not status:
            log.info("Getting the read only user failed")
            return False

        if ro_user.startswith('"') and ro_user.endswith('"'):
            ro_user = ro_user[1:-1]

        if ro_user != username:
            log.info("Read only user name does not match (%s vs %s)", ro_user,
                     username)
            return False
        return True

    def verifyLdapSettings(self, server, admins, ro_admins, default, enabled):
        rest = RestConnection(server)
        settings = rest.ldapRestOperationGetResponse()

        if admins is None:
            admins = []
        else:
            admins = admins.split(",")

        if ro_admins is None:
            ro_admins = []
        else:
            ro_admins = ro_admins.split(",")

        if str(enabled) == "0":
            admins = []
            ro_admins = []

        if default == "admins" and str(enabled) == "1":
            if settings["admins"] != "asterisk":
                log.info("Admins don't match (%s vs asterisk)",
                         settings["admins"])
                return False
        elif not self._list_compare(settings["admins"], admins):
            log.info("Admins don't match (%s vs %s)", settings["admins"],
                     admins)
            return False

        if default == "roadmins" and str(enabled) == "1":
            if settings["roAdmins"] != "asterisk":
                log.info("Read only admins don't match (%s vs asterisk)",
                         settings["roAdmins"])
                return False
        elif not self._list_compare(settings["roAdmins"], ro_admins):
            log.info("Read only admins don't match (%s vs %s)",
                     settings["roAdmins"], ro_admins)
            return False

        return True

    def verifyAlertSettings(self, server, enabled, email_recipients,
                            email_sender, email_username, email_password,
                            email_host,
                            email_port, encrypted, alert_af_node,
                            alert_af_max_reached, alert_af_node_down,
                            alert_af_small,
                            alert_af_disable, alert_ip_changed,
                            alert_disk_space, alert_meta_overhead,
                            alert_meta_oom,
                            alert_write_failed, alert_audit_dropped):
        rest = RestConnection(server)
        settings = rest.get_alerts_settings()
        print settings

        if not enabled:
            if not settings["enabled"]:
                return True
            else:
                log.info("Alerts should be disabled")
                return False

        if encrypted is None or encrypted == "0":
            encrypted = False
        else:
            encrypted = True

        if email_recipients is not None and not self._list_compare(
                email_recipients.split(","), settings["recipients"]):
            log.info("Email recipients don't match (%s vs %s)",
                     email_recipients.split(","), settings["recipients"])
            return False

        if email_sender is not None and email_sender != settings["sender"]:
            log.info("Email sender does not match (%s vs %s)", email_sender,
                     settings["sender"])
            return False

        if email_username is not None and email_username != \
                settings["emailServer"]["user"]:
            log.info("Email username does not match (%s vs %s)",
                     email_username, settings["emailServer"]["user"])
            return False

        if email_host is not None and email_host != settings["emailServer"][
            "host"]:
            log.info("Email host does not match (%s vs %s)", email_host,
                     settings["emailServer"]["host"])
            return False

        if email_port is not None and email_port != settings["emailServer"][
            "port"]:
            log.info("Email port does not match (%s vs %s)", email_port,
                     settings["emailServer"]["port"])
            return False

        if encrypted is not None and encrypted != settings["emailServer"][
            "encrypt"]:
            log.info("Email encryption does not match (%s vs %s)", encrypted,
                     settings["emailServer"]["encrypt"])
            return False

        alerts = list()
        if alert_af_node:
            alerts.append('auto_failover_node')
        if alert_af_max_reached:
            alerts.append('auto_failover_maximum_reached')
        if alert_af_node_down:
            alerts.append('auto_failover_other_nodes_down')
        if alert_af_small:
            alerts.append('auto_failover_cluster_too_small')
        if alert_af_disable:
            alerts.append('auto_failover_disabled')
        if alert_ip_changed:
            alerts.append('ip')
        if alert_disk_space:
            alerts.append('disk')
        if alert_meta_overhead:
            alerts.append('overhead')
        if alert_meta_oom:
            alerts.append('ep_oom_errors')
        if alert_write_failed:
            alerts.append('ep_item_commit_failed')
        if alert_audit_dropped:
            alerts.append('audit_dropped_events')

        if not self._list_compare(alerts, settings["alerts"]):
            log.info("Alerts don't match (%s vs %s)", alerts,
                     settings["alerts"])
            return False

        return True

    def verify_node_settings(self, server, data_path, index_path, hostname):
        rest = RestConnection(server)
        node_settings = rest.get_nodes_self()

        if data_path != node_settings.storage[0].path:
            log.info("Data path does not match (%s vs %s)", data_path,
                     node_settings.storage[0].path)
            return False
        if index_path != node_settings.storage[0].index_path:
            log.info("Index path does not match (%s vs %s)", index_path,
                     node_settings.storage[0].index_path)
            return False
        if hostname is not None:
            if hostname != node_settings.hostname:
                log.info("Hostname does not match (%s vs %s)", hostname,
                         node_settings.hostname)
                return True
        return True

    def verifyCompactionSettings(self, server, db_frag_perc, db_frag_size,
                                 view_frag_perc, view_frag_size, from_period,
                                 to_period, abort_outside, parallel_compact,
                                 purgeInt):
        rest = RestConnection(server)
        settings = rest.get_auto_compaction_settings()
        ac = settings["autoCompactionSettings"]

        if db_frag_perc is not None and str(db_frag_perc) != str(
                ac["databaseFragmentationThreshold"]["percentage"]):
            log.info("DB frag perc does not match (%s vs %s)",
                     str(db_frag_perc),
                     str(ac["databaseFragmentationThreshold"]["percentage"]))
            return False

        if db_frag_size is not None and str(db_frag_size * 1024 ** 2) != str(
                ac["databaseFragmentationThreshold"]["size"]):
            log.info("DB frag size does not match (%s vs %s)",
                     str(db_frag_size * 1024 ** 2),
                     str(ac["databaseFragmentationThreshold"]["size"]))
            return False

        if view_frag_perc is not None and str(view_frag_perc) != str(
                ac["viewFragmentationThreshold"]["percentage"]):
            log.info("View frag perc does not match (%s vs %s)",
                     str(view_frag_perc),
                     str(ac["viewFragmentationThreshold"]["percentage"]))
            return False

        if view_frag_size is not None and str(
                        view_frag_size * 1024 ** 2) != str(
                ac["viewFragmentationThreshold"]["size"]):
            log.info("View frag size does not match (%s vs %s)",
                     str(view_frag_size * 1024 ** 2),
                     str(ac["viewFragmentationThreshold"]["size"]))
            return False

        print from_period, to_period
        if from_period is not None:
            fromHour, fromMin = from_period.split(":", 1)
            if int(fromHour) != int(ac["allowedTimePeriod"]["fromHour"]):
                log.info("From hour does not match (%s vs %s)", str(fromHour),
                         str(ac["allowedTimePeriod"]["fromHour"]))
                return False
            if int(fromMin) != int(ac["allowedTimePeriod"]["fromMinute"]):
                log.info("From minute does not match (%s vs %s)", str(fromMin),
                         str(ac["allowedTimePeriod"]["fromMinute"]))
                return False

        if to_period is not None:
            toHour, toMin = to_period.split(":", 1)
            if int(toHour) != int(ac["allowedTimePeriod"]["toHour"]):
                log.info("To hour does not match (%s vs %s)", str(toHour),
                         str(ac["allowedTimePeriod"]["toHour"]))
                return False
            if int(toMin) != int(ac["allowedTimePeriod"]["toMinute"]):
                log.info("To minute does not match (%s vs %s)", str(toMin),
                         str(ac["allowedTimePeriod"]["toMinute"]))
                return False

        if str(abort_outside) == "1":
            abort_outside = True
        elif str(abort_outside) == "0":
            abort_outside = False

        if abort_outside is not None and abort_outside != \
                ac["allowedTimePeriod"]["abortOutside"]:
            log.info("Abort outside does not match (%s vs %s)", abort_outside,
                     ac["allowedTimePeriod"]["abortOutside"])
            return False

        if str(parallel_compact) == "1":
            parallel_compact = True
        elif str(parallel_compact) == "0":
            parallel_compact = False

        if parallel_compact is not None and parallel_compact != ac[
            "parallelDBAndViewCompaction"]:
            log.info("Parallel compact does not match (%s vs %s)",
                     str(parallel_compact),
                     str(ac["parallelDBAndViewCompaction"]))
            return False

        if purgeInt is not None and str(purgeInt) != str(
                settings["purgeInterval"]):
            log.info("Purge interval does not match (%s vs %s)", str(purgeInt),
                     str(settings["purgeInterval"]))
            return False

        return True

    def verify_gsi_compact_settings(self, compact_mode, compact_percent,
                                    compact_interval,
                                    from_period, to_period, enable_abort):
        settings = self.rest.get_auto_compaction_settings()
        ac = settings["autoCompactionSettings"]["indexFragmentationThreshold"]
        cc = settings["autoCompactionSettings"]["indexCircularCompaction"]
        if compact_mode is not None:
            if compact_mode == "append":
                self.log.info("append compactino settings %s " % ac)
                if compact_percent is not None and \
                                compact_percent != ac["percentage"]:
                    raise Exception(
                        "setting percent does not match.  Set: %s vs %s :Actual"
                        % (compact_percent, ac["percentage"]))
            if compact_mode == "circular":
                self.log.info("circular compaction settings %s " % cc)
                if enable_abort and not cc["interval"]["abortOutside"]:
                    raise Exception("setting enable abort failed")
                if compact_interval is not None:
                    if compact_interval != cc["daysOfWeek"]:
                        raise Exception(
                            "Failed to set compaction on %s " % compact_interval)
                    elif from_period is None and int(
                            cc["interval"]["fromHour"]) != 0 and \
                                    int(cc["interval"]["fromMinute"]) != 0:
                        raise Exception(
                            "fromHour and fromMinute should be zero")
                if compact_interval is None:
                    if from_period != \
                                            str(cc["interval"][
                                                    "fromHour"]) + ":" + str(
                                cc["interval"]["fromMinute"]) \
                            and str(cc["interval"]["toHour"]) + ":" + str(
                                cc["interval"]["toMinute"]):
                        raise Exception(
                            "fromHour and fromMinute do not set correctly")
        return True

    def verifyGroupExists(self, server, name):
        rest = RestConnection(server)
        groups = rest.get_zone_names()
        print groups

        for gname, _ in groups.iteritems():
            if name == gname:
                return True

        return False

    def _list_compare(self, list1, list2):
        if len(list1) != len(list2):
            return False
        for elem1 in list1:
            found = False
            for elem2 in list2:
                if elem1 == elem2:
                    found = True
                    break
            if not found:
                return False
        return True

    def waitForItemCount(self, server, bucket_name, count, timeout=30):
        rest = RestConnection(server)
        for sec in range(timeout):
            items = int(
                rest.get_bucket_json(bucket_name)["basicStats"]["itemCount"])
            if items != count:
                time.sleep(1)
            else:
                return True
        log.info("Waiting for item count to be %d timed out", count)
        return False
예제 #46
0
 def test_eventing_lifecycle_with_couchbase_cli(self):
     # load some data in the source bucket
     self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
               batch_size=self.batch_size)
     # This value is hardcoded in the exported function name
     script_dir = os.path.dirname(__file__)
     abs_file_path = os.path.join(script_dir, EXPORTED_FUNCTION.NEW_BUCKET_OP)
     fh = open(abs_file_path, "r")
     lines = fh.read()
     shell = RemoteMachineShellConnection(self.servers[0])
     info = shell.extract_remote_info().type.lower()
     if info == 'linux':
         self.cli_command_location = testconstants.LINUX_COUCHBASE_BIN_PATH
     elif info == 'windows':
         self.cmd_ext = ".exe"
         self.cli_command_location = testconstants.WIN_COUCHBASE_BIN_PATH_RAW
     elif info == 'mac':
         self.cli_command_location = testconstants.MAC_COUCHBASE_BIN_PATH
     else:
         raise Exception("OS not supported.")
     # create the json file need on the node
     eventing_node = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=False)
     remote_client = RemoteMachineShellConnection(eventing_node)
     remote_client.write_remote_file_single_quote("/root", "Function_396275055_test_export_function.json", lines)
     # import the function
     self._couchbase_cli_eventing(eventing_node, "Function_396275055_test_export_function", "import",
                                  "SUCCESS: Events imported",
                                  file_name="Function_396275055_test_export_function.json")
     # deploy the function
     self._couchbase_cli_eventing(eventing_node, "Function_396275055_test_export_function", "deploy --boundary from-everything",
                                  "SUCCESS: Request to deploy the function was accepted")
     self.wait_for_handler_state("Function_396275055_test_export_function","deployed")
     # verify result
     self.verify_eventing_results("Function_396275055_test_export_function", self.docs_per_day * 2016,
                                  skip_stats_validation=True)
     # pause function
     self._couchbase_cli_eventing(eventing_node, "Function_396275055_test_export_function",
                                  "pause",
                                  "SUCCESS: Function was paused")
     self.wait_for_handler_state("Function_396275055_test_export_function", "paused")
     # delete all documents
     self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
               batch_size=self.batch_size, op_type='delete')
     # resume function
     self._couchbase_cli_eventing(eventing_node, "Function_396275055_test_export_function",
                                  "resume", "SUCCESS: Function was resumed")
     self.wait_for_handler_state("Function_396275055_test_export_function", "deployed")
     # verify result
     self.verify_eventing_results("Function_396275055_test_export_function", 0,
                                  skip_stats_validation=True)
     # list the function
     self._couchbase_cli_eventing(eventing_node, "Function_396275055_test_export_function", "list",
                                  " Status: Deployed")
     # export the function
     self._couchbase_cli_eventing(eventing_node, "Function_396275055_test_export_function", "export",
                                  "SUCCESS: Function exported to: Function_396275055_test_export_function2.json",
                                  file_name="Function_396275055_test_export_function2.json")
     # check if the exported function actually exists
     exists = remote_client.file_exists("/root", "Function_396275055_test_export_function2.json")
     # check if the exported file exists
     if not exists:
         self.fail("file does not exist after export")
     # export-all functions
     self._couchbase_cli_eventing(eventing_node, "Function_396275055_test_export_function", "export-all",
                                  "SUCCESS: All functions exported to: export_all.json",
                                 file_name="export_all.json", name=False)
     # check if the exported function actually exists
     exists = remote_client.file_exists("/root", "export_all.json")
     # check if the exported file exists
     if not exists:
         self.fail("file does not exist after export-all")
     # undeploy the function
     self._couchbase_cli_eventing(eventing_node, "Function_396275055_test_export_function", "undeploy",
                                  "SUCCESS: Request to undeploy the function was accepted")
     self.wait_for_handler_state("Function_396275055_test_export_function","undeployed")
     # delete the function
     self._couchbase_cli_eventing(eventing_node, "Function_396275055_test_export_function", "delete",
                                  "SUCCESS: Request to delete the function was accepted")
예제 #47
0
class auditcli(BaseTestCase):
    def setUp(self):
        self.times_teardown_called = 1
        super(auditcli, self).setUp()
        self.r = random.Random()
        self.vbucket_count = 1024
        self.shell = RemoteMachineShellConnection(self.master)
        info = self.shell.extract_remote_info()
        type = info.type.lower()
        self.excluded_commands = self.input.param("excluded_commands", None)
        self.os = 'linux'
        self.cli_command_path = LINUX_COUCHBASE_BIN_PATH
        if type == 'windows':
            self.os = 'windows'
            self.cli_command_path = WIN_COUCHBASE_BIN_PATH
        if info.distribution_type.lower() == 'mac':
            self.os = 'mac'
            self.cli_command_path = MAC_COUCHBASE_BIN_PATH
        self.couchbase_usrname = "%s" % (self.input.membase_settings.rest_username)
        self.couchbase_password = "******" % (self.input.membase_settings.rest_password)
        self.cli_command = self.input.param("cli_command", None)
        self.command_options = self.input.param("command_options", None)
        if self.command_options is not None:
            self.command_options = self.command_options.split(";")
        TestInputSingleton.input.test_params["default_bucket"] = False
        self.eventID = self.input.param('id', None)
        AuditTemp = audit(host=self.master)
        currentState = AuditTemp.getAuditStatus()
        self.log.info("Current status of audit on ip - {0} is {1}".format(self.master.ip, currentState))
        if not currentState:
            self.log.info("Enabling Audit ")
            AuditTemp.setAuditEnable('true')
            self.sleep(30)
        self.ipAddress = self.getLocalIPAddress()
        self.ldapUser = self.input.param('ldapUser', 'Administrator')
        self.ldapPass = self.input.param('ldapPass', 'password')
        self.source = self.input.param('source', 'ns_server')
        if type == 'windows' and self.source == 'saslauthd':
            raise Exception(" Ldap Tests cannot run on windows");
        else:
            if self.source == 'saslauthd':
                self.auth_type = 'sasl'
                rest = RestConnection(self.master)
                self.setupLDAPSettings(rest)
                #rest.ldapUserRestOperation(True, [[self.ldapUser]], exclude=None)
                self.set_user_role(rest, self.ldapUser)

    def tearDown(self):
        super(auditcli, self).tearDown()

    def getLocalIPAddress(self):
        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        s.connect(('couchbase.com', 0))
        return s.getsockname()[0]
        '''
        status, ipAddress = commands.getstatusoutput("ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 |awk '{print $1}'")
        return ipAddress
        '''

    def setupLDAPSettings (self, rest):
        api = rest.baseUrl + 'settings/saslauthdAuth'
        params = urllib.parse.urlencode({"enabled":'true',"admins":[],"roAdmins":[]})
        status, content, header = rest._http_request(api, 'POST', params)
        return status, content, header

    def del_runCmd_value(self, output):
        if "runCmd" in output[0]:
            output = output[1:]
        return output

    def set_user_role(self,rest,username,user_role='admin'):
        payload = "name=" + username + "&roles=" + user_role
        content =  rest.set_user_roles(user_id=username, payload=payload)


    #Wrapper around auditmain
    def checkConfig(self, eventID, host, expectedResults):
        Audit = audit(eventID=eventID, host=host)
        fieldVerification, valueVerification = Audit.validateEvents(expectedResults)
        self.assertTrue(fieldVerification, "One of the fields is not matching")
        self.assertTrue(valueVerification, "Values for one of the fields is not matching")

    def _create_bucket(self, remote_client, bucket="default", bucket_type="couchbase",
                       bucket_ramsize=200, bucket_replica=1, wait=False, enable_flush=None, enable_index_replica=None):
        options = "--bucket={0} --bucket-type={1} --bucket-ramsize={2} --bucket-replica={3}".\
            format(bucket, bucket_type, bucket_ramsize, bucket_replica)
        options += (" --enable-flush={0}".format(enable_flush), "")[enable_flush is None]
        options += (" --enable-index-replica={0}".format(enable_index_replica), "")[enable_index_replica is None]
        options += (" --enable-flush={0}".format(enable_flush), "")[enable_flush is None]
        options += (" --wait", "")[wait]
        cli_command = "bucket-create"

        output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)

    def testClusterEdit(self):
        options = "--server-add={0}:8091 --server-add-username=Administrator --server-add-password=password".format(self.servers[num + 1].ip)
        remote_client = RemoteMachineShellConnection(self.master)
        output, error = remote_client.execute_couchbase_cli(cli_command='cluster-edit', options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)

    def testAddRemoveNodes(self):
        nodes_add = self.input.param("nodes_add", 1)
        nodes_rem = self.input.param("nodes_rem", 1)
        nodes_failover = self.input.param("nodes_failover", 0)
        force_failover = self.input.param("force_failover", False)
        nodes_readd = self.input.param("nodes_readd", 0)
        cli_command = self.input.param("cli_command", None)
        source = self.source
        remote_client = RemoteMachineShellConnection(self.master)
        for num in range(nodes_add):
            options = "--server-add={0} --server-add-username=Administrator --server-add-password=password".format(self.servers[num + 1].ip)
            output, error = remote_client.execute_couchbase_cli(cli_command='server-add', options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
        output, error = remote_client.execute_couchbase_cli(cli_command='rebalance', cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)

        if (cli_command == "server-add"):
            expectedResults = {"services":['kv'], 'port':18091, 'hostname':self.servers[num + 1].ip,
                                   'groupUUID':"0", 'node':'ns_1@' + self.servers[num + 1].ip, 'source':source,
                                   'user':self.master.rest_username, "real_userid:user":self.ldapUser, "ip":'::1', "remote:port":57457}
            self.checkConfig(self.eventID, self.master, expectedResults)
            expectedResults = {"delta_recovery_buckets":"all", 'known_nodes':["ns_1@" + self.master.ip, "ns_1@" + self.servers[num + 1].ip],
                                    'ejected_nodes':[], 'source':'ns_server', 'source':source, 'user':self.master.rest_username,
                                    "ip":'::1', "port":57457, "real_userid:user":self.ldapUser}
            self.checkConfig(8200, self.master, expectedResults)

        if (cli_command == 'server-remove'):
            for num in range(nodes_rem):
                cli_command = "rebalance"
                options = "--server-remove={0}:8091".format(self.servers[nodes_add - num].ip)
                output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
                #expectedResults = {'node':'ns_1@' + self.servers[num + 1].ip, 'source':source, 'user':self.master.rest_username, "ip":'127.0.0.1', "port":57457}
                #self.checkConfig(self.eventID, self.master, expectedResults)
                expectedResults = {"delta_recovery_buckets":"all", 'known_nodes':["ns_1@" + self.master.ip, "ns_1@" + self.servers[num + 1].ip],
                                    'ejected_nodes':["ns_1@" + self.servers[num + 1].ip], 'source':source, 'user':self.master.rest_username,
                                    "ip":'::1', "port":57457, "real_userid:user":self.ldapUser}
                self.checkConfig(8200, self.master, expectedResults)


        if (cli_command in ["failover"]):
            cli_command = 'failover'
            for num in range(nodes_failover):
                self.log.info("failover node {0}".format(self.servers[nodes_add - nodes_rem - num].ip))
                options = "--server-failover={0}:8091".format(self.servers[nodes_add - nodes_rem - num].ip)
                options += " --force"
                output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
                expectedResults = {'source':source, "real_userid:user":self.ldapUser, 'user':self.master.rest_username, "ip":'::1', "port":57457, 'type':'hard', 'nodes':'[ns_1@' + self.servers[nodes_add - nodes_rem - num].ip + ']'}
                self.checkConfig(self.eventID, self.master, expectedResults)

        if (cli_command == "recovery"):
            for num in range(nodes_readd):
                cli_command = 'failover'
                self.log.info("failover node {0}".format(self.servers[nodes_add - nodes_rem - num].ip))
                options = "--server-failover={0}:8091".format(self.servers[nodes_add - nodes_rem - num].ip)
                options += " --hard"
                output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
                self.log.info("add back node {0} to cluster".format(self.servers[nodes_add - nodes_rem - num ].ip))
                cli_command = "recovery"
                options = "--server-recovery={0}:8091 --recovery-type full".format(self.servers[nodes_add - nodes_rem - num ].ip)
                output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
                expectedResults = {'node':'ns_1@' + self.servers[nodes_add - nodes_rem - num ].ip, 'type':'full', "real_userid:user":self.ldapUser, 'source':source, 'user':self.master.rest_username, "ip":'::1', "port":57457}
                self.checkConfig(self.eventID, self.master, expectedResults)

        remote_client.disconnect()


    def testBucketCreation(self):
        bucket_name = self.input.param("bucket", "default")
        bucket_type = self.input.param("bucket_type", "couchbase")
        bucket_port = self.input.param("bucket_port", 11211)
        bucket_replica = self.input.param("bucket_replica", 1)
        bucket_ramsize = self.input.param("bucket_ramsize", 200)
        wait = self.input.param("wait", False)
        enable_flush = self.input.param("enable_flush", None)
        enable_index_replica = self.input.param("enable_index_replica", None)

        remote_client = RemoteMachineShellConnection(self.master)
        self._create_bucket(remote_client, bucket=bucket_name, bucket_type=bucket_type, \
                        bucket_ramsize=bucket_ramsize, bucket_replica=bucket_replica, wait=wait, enable_flush=enable_flush, enable_index_replica=enable_index_replica)
        expectedResults = {'bucket_name':'default', 'ram_quota':209715200, 'num_replicas':1,
                               'replica_index':True, 'eviction_policy':'value_only', 'type':'membase', \
                               'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", \
                                "flush_enabled":False, "num_threads":3, "source":self.source, \
                               "user":self.ldapUser, "ip":'::1', "port":57457, 'sessionid':'', \
                               'conflict_resolution_type':'seqno','storage_mode':'couchstore'}
        self.checkConfig(8201, self.master, expectedResults)
        remote_client.disconnect()


    def testBucketModification(self):
        cli_command = "bucket-edit"
        bucket_type = self.input.param("bucket_type", "couchbase")
        bucket_password = self.input.param("bucket_password", None)
        bucket_port = self.input.param("bucket_port", 11211)
        enable_flush = self.input.param("enable_flush", None)
        bucket_port_new = self.input.param("bucket_port_new", None)
        bucket_password_new = self.input.param("bucket_password_new", None)
        bucket_ramsize_new = self.input.param("bucket_ramsize_new", None)
        enable_flush_new = self.input.param("enable_flush_new", None)
        enable_index_replica_new = self.input.param("enable_index_replica_new", None)
        bucket_ramsize_new = self.input.param("bucket_ramsize_new", None)
        bucket = self.input.param("bucket", "default")
        bucket_ramsize = self.input.param("bucket_ramsize", 200)
        bucket_replica = self.input.param("bucket_replica", 1)
        enable_flush = self.input.param("enable_flush", None)
        enable_index_replica = self.input.param("enable_index_replica", None)
        wait = self.input.param("wait", False)

        remote_client = RemoteMachineShellConnection(self.master)

        self._create_bucket(remote_client, bucket, bucket_type=bucket_type, bucket_ramsize=bucket_ramsize,
                            bucket_replica=bucket_replica, wait=wait, enable_flush=enable_flush, enable_index_replica=enable_index_replica)

        cli_command = "bucket-edit"
        options = "--bucket={0}".format(bucket)
        options += (" --enable-flush={0}".format(enable_flush_new), "")[enable_flush_new is None]
        options += (" --enable-index-replica={0}".format(enable_index_replica_new), "")[enable_index_replica_new is None]
        options += (" --bucket-ramsize={0}".format(bucket_ramsize_new), "")[bucket_ramsize_new is None]

        output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
        expectedResults = {'bucket_name':'BBB', 'ram_quota':465567744, 'num_replicas':1,
                            'replica_index':True, 'eviction_policy':'value_only', 'type':'membase', \
                            'auth_type':'none', "autocompaction":'false', "purge_interval":"undefined", \
                            "flush_enabled":True, "num_threads":3, "source":self.source, \
                            "user":self.ldapUser, "ip":'::1', "port":57457, 'sessionid':'',
                            'auth_type':self.source, 'storage_mode': 'couchstore'}
        self.checkConfig(8202, self.master, expectedResults)

        cli_command = "bucket-flush --force"
        options = "--bucket={0}".format(bucket)
        if enable_flush_new is not None:
            output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
        expectedResults = {"bucket_name":"BBB", "source":self.source, "user":self.ldapUser, "ip":"::1", 'port':57457}
        self.checkConfig(8204, self.master, expectedResults)

        cli_command = "bucket-delete"
        output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
        expectedResults = {"bucket_name":"BBB", "source":self.source, "user":self.ldapUser, "ip":"::1", "port":57457}
        self.checkConfig(8203, self.master, expectedResults)

        remote_client.disconnect()

    def testSettingCompacttion(self):
        '''setting-compacttion OPTIONS:
        --compaction-db-percentage=PERCENTAGE     at which point database compaction is triggered
        --compaction-db-size=SIZE[MB]             at which point database compaction is triggered
        --compaction-view-percentage=PERCENTAGE   at which point view compaction is triggered
        --compaction-view-size=SIZE[MB]           at which point view compaction is triggered
        --compaction-period-from=HH:MM            allow compaction time period from
        --compaction-period-to=HH:MM              allow compaction time period to
        --enable-compaction-abort=[0|1]           allow compaction abort when time expires
        --enable-compaction-parallel=[0|1]        allow parallel compaction for database and view'''
        compaction_db_percentage = self.input.param("compaction-db-percentage", None)
        compaction_db_size = self.input.param("compaction-db-size", None)
        compaction_view_percentage = self.input.param("compaction-view-percentage", None)
        compaction_view_size = self.input.param("compaction-view-size", None)
        compaction_period_from = self.input.param("compaction-period-from", None)
        compaction_period_to = self.input.param("compaction-period-to", None)
        enable_compaction_abort = self.input.param("enable-compaction-abort", None)
        enable_compaction_parallel = self.input.param("enable-compaction-parallel", None)
        bucket = self.input.param("bucket", "default")
        output = self.input.param("output", '')
        remote_client = RemoteMachineShellConnection(self.master)
        cli_command = "setting-compaction"
        options = (" --compaction-db-percentage={0}".format(compaction_db_percentage), "")[compaction_db_percentage is None]
        options += (" --compaction-db-size={0}".format(compaction_db_size), "")[compaction_db_size is None]
        options += (" --compaction-view-percentage={0}".format(compaction_view_percentage), "")[compaction_view_percentage is None]
        options += (" --compaction-view-size={0}".format(compaction_view_size), "")[compaction_view_size is None]
        options += (" --compaction-period-from={0}".format(compaction_period_from), "")[compaction_period_from is None]
        options += (" --compaction-period-to={0}".format(compaction_period_to), "")[compaction_period_to is None]
        options += (" --enable-compaction-abort={0}".format(enable_compaction_abort), "")[enable_compaction_abort is None]
        options += (" --enable-compaction-parallel={0}".format(enable_compaction_parallel), "")[enable_compaction_parallel is None]

        output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
        expectedResults = {"parallel_db_and_view_compaction":False, "database_fragmentation_threshold:size":10485760, "database_fragmentation_threshold:view_fragmentation_threshold":{},
                           "real_userid:source":self.source, "real_userid:user":self.ldapUser, "remote:ip":"::1", "remote:port":60019}
        self.checkConfig(8225, self.master, expectedResults)
        remote_client.disconnect()



    def testSettingEmail(self):
        setting_enable_email_alert = self.input.param("enable-email-alert", 1)
        setting_email_recipients = self.input.param("email-recipients", '*****@*****.**')
        setting_email_sender = self.input.param("email-sender", '*****@*****.**')
        setting_email_user = self.input.param("email-user", 'ritam')
        setting_emaiL_password = self.input.param("email-password", 'password')
        setting_email_host = self.input.param("email-host", 'localhost')
        setting_email_port = self.input.param("email-port", '25')
        setting_email_encrypt = self.input.param("enable-email-encrypt", 0)

        remote_client = RemoteMachineShellConnection(self.master)
        cli_command = "setting-alert"
        options = (" --enable-email-alert={0}".format(setting_enable_email_alert))
        options += (" --email-recipients={0}".format(setting_email_recipients))
        options += (" --email-sender={0}".format(setting_email_sender))
        options += (" --email-user={0}".format(setting_email_user))
        options += (" --email-password={0}".format(setting_emaiL_password))
        options += (" --email-host={0}".format(setting_email_host))
        options += (" --email-port={0}".format(setting_email_port))
        options += (" --enable-email-encrypt={0}".format(setting_email_encrypt))
        options += (" --alert-auto-failover-node")
        options += (" --alert-auto-failover-max-reached")
        options += (" --alert-auto-failover-node-down")
        options += (" --alert-auto-failover-cluster-small")
        options += (" --alert-ip-changed")
        options += (" --alert-disk-space")
        options += (" --alert-meta-overhead")
        options += (" --alert-meta-oom")
        options += (" --alert-write-failed")

        output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
        expectedResults = {"email_server:encrypt":False, "email_server:port":25, "email_server:host":"localhost", "email_server:user":"******",
                           "alerts":["auto_failover_node", "auto_failover_maximum_reached", "auto_failover_other_nodes_down", "auto_failover_cluster_too_small", "ip", "disk", "overhead", "ep_oom_errors", "ep_item_commit_failed"],
                           "recipients":["*****@*****.**"], "sender":"*****@*****.**", "real_userid:source":self.source, "real_userid:user":self.ldapUser,
                           "remote:ip":"127.0.0.1", "port":60025}
        self.checkConfig(8223, self.master, expectedResults)
        remote_client.disconnect()

    def testSettingNotification(self):
        setting_enable_notification = self.input.param("enable-notification", 1)

        remote_client = RemoteMachineShellConnection(self.master)
        cli_command = "setting-notification"
        options = (" --enable-notification={0}".format(setting_enable_notification))

        output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
        remote_client.disconnect()

    def testSettingFailover(self):
        setting_enable_auto_failover = self.input.param("enable-auto-failover", 1)
        setting_auto_failover_timeout = self.input.param("auto-failover-timeout", 50)

        remote_client = RemoteMachineShellConnection(self.master)
        cli_command = "setting-autofailover"
        options = (" --enable-auto-failover={0}".format(setting_enable_auto_failover))
        options += (" --auto-failover-timeout={0}".format(setting_auto_failover_timeout))

        output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
        expectedResults = {"max_nodes":1, "timeout":50, "real_userid:source":self.source, "real_userid:user":self.ldapUser, "remote:ip":"::1", "port":60033}
        self.checkConfig(8220, self.master, expectedResults)
        remote_client.disconnect()

    def testSSLManage(self):
        '''ssl-manage OPTIONS:
        --retrieve-cert=CERTIFICATE            retrieve cluster certificate AND save to a pem file
        --regenerate-cert=CERTIFICATE          regenerate cluster certificate AND save to a pem file'''
        xdcr_cert = self.input.param("xdcr-certificate", None)
        xdcr_cert = "/tmp/" + xdcr_cert
        cli_command = "ssl-manage"
        remote_client = RemoteMachineShellConnection(self.master)
        options = "--regenerate-cert={0}".format(xdcr_cert)
        output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
        self.assertFalse(error, "Error thrown during CLI execution %s" % error)
        self.shell.execute_command("rm {0}".format(xdcr_cert))
        expectedResults = {"real_userid:source":self.source, "real_userid:user":self.ldapUser, "remote:ip":"::1", "port":60035}
        self.checkConfig(8226, self.master, expectedResults)


    """ tests for the group-manage option. group creation, renaming and deletion are tested .
        These tests require a cluster of four or more nodes. """
    def testCreateRenameDeleteGroup(self):
        remote_client = RemoteMachineShellConnection(self.master)
        cli_command = "group-manage"
        source = self.source
        user = self.ldapUser
        rest = RestConnection(self.master)

        if self.os == "linux":
            # create group
            options = " --create --group-name=group2"
            output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \
                    options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
            output = self.del_runCmd_value(output)
            self.assertEqual(output[1], "SUCCESS: Server group created")
            expectedResults = {'group_name':'group2', 'source':source, 'user':user, 'ip':'127.0.0.1', 'port':1234}
            tempStr = rest.get_zone_uri()[expectedResults['group_name']]
            tempStr = (tempStr.split("/"))[4]
            expectedResults['uuid'] = tempStr
            self.checkConfig(8210, self.master, expectedResults)

            # rename group test
            options = " --rename=group3 --group-name=group2"
            output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \
                    options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
            output = self.del_runCmd_value(output)
            self.assertEqual(output[1], ["SUCCESS: group renamed group"])
            expectedResults = {}
            expectedResults = {'group_name':'group3', 'source':source, 'user':user, 'ip':'127.0.0.1', 'port':1234, 'nodes':[]}
            expectedResults['uuid'] = tempStr
            self.checkConfig(8212, self.master, expectedResults)

            # delete group test
            options = " --delete --group-name=group3"
            output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \
                    options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
            output = self.del_runCmd_value(output)
            self.assertEqual(output[1], ["SUCCESS: group deleted group"])
            expectedResults = {}
            expectedResults = {'group_name':'group3', 'source':source, 'user':user, 'ip':'127.0.0.1', 'port':1234}
            expectedResults['uuid'] = tempStr
            self.checkConfig(8211, self.master, expectedResults)


        if self.os == "windows":
            # create group
            options = " --create --group-name=group2"
            output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \
                    options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
            self.assertEqual(output[0], "SUCCESS: group created group2")
            expectedResults = {'group_name':'group2', 'source':source, 'user':user, 'ip':'127.0.0.1', 'port':1234}
            tempStr = rest.get_zone_uri()[expectedResults['group_name']]
            tempStr = (tempStr.split("/"))[4]
            expectedResults['uuid'] = tempStr
            self.checkConfig(8210, self.master, expectedResults)

            # rename group test
            options = " --rename=group3 --group-name=group2"
            output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \
                    options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
            self.assertEqual(output[0], "SUCCESS: group renamed group2")
            expectedResults = {}
            expectedResults = {'group_name':'group3', 'source':source, 'user':user, 'ip':'127.0.0.1', 'port':1234, 'nodes':[]}
            expectedResults['uuid'] = tempStr
            self.checkConfig(8212, self.master, expectedResults)

            # delete group test
            options = " --delete --group-name=group3"
            output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \
                    options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
            self.assertEqual(output[0], "SUCCESS: group deleted group3")
            expectedResults = {}
            expectedResults = {'group_name':'group3', 'source':source, 'user':user, 'ip':'127.0.0.1', 'port':1234}
            expectedResults['uuid'] = tempStr
            self.checkConfig(8211, self.master, expectedResults)

            remote_client.disconnect()
예제 #48
0
    def test_replica_distribution_in_zone(self):
        if len(self.servers) < int(self.num_node):
            msg = "This test needs minimum {1} servers to run.\n  Currently in ini file \
                   has only {0} servers".format(len(self.servers),
                                                self.num_node)
            self.log.error("{0}".format(msg))
            raise Exception(msg)
        if self.shutdown_zone >= self.zone:
            msg = "shutdown zone should smaller than zone"
            raise Exception(msg)
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)
        zones = []
        zones.append("Group 1")
        nodes_in_zone = {}
        nodes_in_zone["Group 1"] = [serverInfo.ip]
        """ Create zone base on params zone in test"""
        if int(self.zone) > 1:
            for i in range(1, int(self.zone)):
                a = "Group "
                zones.append(a + str(i + 1))
                rest.add_zone(a + str(i + 1))
        servers_rebalanced = []
        self.user = serverInfo.rest_username
        self.password = serverInfo.rest_password
        if len(self.servers) % int(self.zone) != 0:
            msg = "unbalance zone.  Recaculate to make balance ratio node/zone"
            raise Exception(msg)
        """ Add node to each zone """
        k = 1
        for i in range(0, self.zone):
            if "Group 1" in zones[i]:
                total_node_per_zone = int(len(self.servers)) / int(
                    self.zone) - 1
            else:
                nodes_in_zone[zones[i]] = []
                total_node_per_zone = int(len(self.servers)) / int(self.zone)
            for n in range(0, total_node_per_zone):
                nodes_in_zone[zones[i]].append(self.servers[k].ip)
                rest.add_node(user=self.user, password=self.password, \
                    remoteIp=self.servers[k].ip, port='8091', zone_name=zones[i])
                k += 1
        otpNodes = [node.id for node in rest.node_statuses()]
        """ Start rebalance and monitor it. """
        started = rest.rebalance(otpNodes, [])

        if started:
            try:
                result = rest.monitorRebalance()
            except RebalanceFailedException as e:
                self.log.error("rebalance failed: {0}".format(e))
                return False, servers_rebalanced
            msg = "successfully rebalanced cluster {0}"
            self.log.info(msg.format(result))
        """ Verify replica of one node should not in same zone of active. """
        self._verify_replica_distribution_in_zones(nodes_in_zone, "tap")
        """ Simulate entire nodes down in zone(s) by killing erlang process"""
        if self.shutdown_zone >= 1 and self.zone >= 2:
            self.log.info("Start to shutdown nodes in zone to failover")
            for down_zone in range(1, self.shutdown_zone + 1):
                down_zone = "Group " + str(down_zone + 1)
                for sv in nodes_in_zone[down_zone]:
                    for si in self.servers:
                        if si.ip == sv:
                            server = si

                    shell = RemoteMachineShellConnection(server)
                    os_info = shell.extract_remote_info()
                    shell.kill_erlang(os_info)
                    """ Failover down node(s)"""
                    failed_over = rest.fail_over("ns_1@" + server.ip)
                    if not failed_over:
                        self.log.info(
                            "unable to failover the node the first time. \
                                       try again in 75 seconds..")
                        time.sleep(75)
                        failed_over = rest.fail_over("ns_1@" + server.ip)
                    self.assertTrue(
                        failed_over,
                        "unable to failover node after erlang killed")
        otpNodes = [node.id for node in rest.node_statuses()]
        self.log.info("start rebalance after failover.")
        """ Start rebalance and monitor it. """
        started = rest.rebalance(otpNodes, [])
        if started:
            try:
                result = rest.monitorRebalance()
            except RebalanceFailedException as e:
                self.log.error("rebalance failed: {0}".format(e))
                return False, servers_rebalanced
            msg = "successfully rebalanced in selected nodes from the cluster ? {0}"
            self.log.info(msg.format(result))
        """ Compare current keys in bucekt with initial loaded keys count. """
        self._verify_total_keys(self.servers[0], self.num_items)
예제 #49
0
class unidirectional(XDCRNewBaseTest):
    def setUp(self):
        super(unidirectional, self).setUp()
        self.src_cluster = self.get_cb_cluster_by_name('C1')
        self.src_master = self.src_cluster.get_master_node()
        self.dest_cluster = self.get_cb_cluster_by_name('C2')
        self.dest_master = self.dest_cluster.get_master_node()

    def tearDown(self):
        super(unidirectional, self).tearDown()

    def suite_setUp(self):
        self.log.info("*** unidirectional: suite_setUp() ***")

    def suite_tearDown(self):
        self.log.info("*** unidirectional: suite_tearDown() ***")

    """Testing Unidirectional load( Loading only at source) Verifying whether XDCR replication is successful on
    subsequent destination clusters.Create/Update/Delete operations are performed based on doc-ops specified by the user. """

    def load_with_ops(self):
        self.setup_xdcr_and_load()
        self.perform_update_delete()
        self.verify_results()

    """Testing Unidirectional load( Loading only at source) Verifying whether XDCR replication is successful on
    subsequent destination clusters. Create/Update/Delete are performed in parallel- doc-ops specified by the user. """

    def load_with_async_ops(self):
        self.setup_xdcr_and_load()
        self.async_perform_update_delete()
        self.verify_results()

    def load_with_async_ops_diff_data_size(self):
        # Load 1 item with size 1
        # 52 alphabets (small and capital letter)
        self.src_cluster.load_all_buckets(52, value_size=1)
        # Load items with below sizes of 1M
        self.src_cluster.load_all_buckets(5, value_size=1000000)
        # Load items with size 10MB
        # Getting memory issue with 20MB data on VMs.
        self.src_cluster.load_all_buckets(1, value_size=10000000)

        self.verify_results()

    """Testing Unidirectional load( Loading only at source). Failover node at Source/Destination while
    Create/Update/Delete are performed after based on doc-ops specified by the user.
    Verifying whether XDCR replication is successful on subsequent destination clusters. """

    def load_with_ops_with_warmup(self):
        self.setup_xdcr_and_load()
        warmupnodes = []
        if "C1" in self._warmup:
            warmupnodes.append(self.src_cluster.warmup_node())
        if "C2" in self._warmup:
            warmupnodes.append(self.dest_cluster.warmup_node())

        self.sleep(self._wait_timeout)
        self.perform_update_delete()
        self.sleep(self._wait_timeout // 2)

        NodeHelper.wait_warmup_completed(warmupnodes)

        self.verify_results()

    def load_with_ops_with_warmup_master(self):
        self.setup_xdcr_and_load()
        warmupnodes = []
        if "C1" in self._warmup:
            warmupnodes.append(self.src_cluster.warmup_node(master=True))
        if "C2" in self._warmup:
            warmupnodes.append(self.dest_cluster.warmup_node(master=True))

        self.sleep(self._wait_timeout)
        self.perform_update_delete()
        self.sleep(self._wait_timeout // 2)

        NodeHelper.wait_warmup_completed(warmupnodes)

        self.verify_results()

    def load_with_async_ops_with_warmup(self):
        bucket_type = self._input.param("bucket_type", "membase")
        if bucket_type == "ephemeral":
            "Test case does not apply for Ephemeral buckets"
            return
        self.setup_xdcr_and_load()
        warmupnodes = []
        if "C1" in self._warmup:
            warmupnodes.append(self.src_cluster.warmup_node())
        if "C2" in self._warmup:
            warmupnodes.append(self.dest_cluster.warmup_node())

        self.sleep(self._wait_timeout)
        self.async_perform_update_delete()
        self.sleep(self._wait_timeout // 2)

        NodeHelper.wait_warmup_completed(warmupnodes)

        self.verify_results()

    def load_with_async_ops_with_warmup_master(self):
        bucket_type = self._input.param("bucket_type", "membase")
        if bucket_type == "ephemeral":
            "Test case does not apply for Ephemeral buckets"
            return
        self.setup_xdcr_and_load()
        warmupnodes = []
        if "C1" in self._warmup:
            warmupnodes.append(self.src_cluster.warmup_node(master=True))
        if "C2" in self._warmup:
            warmupnodes.append(self.dest_cluster.warmup_node(master=True))

        self.sleep(self._wait_timeout)
        self.async_perform_update_delete()
        self.sleep(self._wait_timeout // 2)

        NodeHelper.wait_warmup_completed(warmupnodes)

        self.sleep(300)

        self.verify_results()

    def load_with_failover(self):
        self.setup_xdcr_and_load()

        if "C1" in self._failover:
            self.src_cluster.failover_and_rebalance_nodes()
        if "C2" in self._failover:
            self.dest_cluster.failover_and_rebalance_nodes()

        self.sleep(self._wait_timeout // 6)
        self.perform_update_delete()

        self.verify_results()

    def load_with_failover_then_add_back(self):

        self.setup_xdcr_and_load()

        if "C1" in self._failover:
            self.src_cluster.failover_and_rebalance_nodes(rebalance=False)
            self.src_cluster.add_back_node()
        if "C2" in self._failover:
            self.dest_cluster.failover_and_rebalance_nodes(rebalance=False)
            self.dest_cluster.add_back_node()

        self.perform_update_delete()

        self.verify_results()

    """Testing Unidirectional load( Loading only at source). Failover node at Source/Destination while
    Create/Update/Delete are performed in parallel based on doc-ops specified by the user.
    Verifying whether XDCR replication is successful on subsequent destination clusters. """

    def load_with_failover_master(self):
        self.setup_xdcr_and_load()

        if "C1" in self._failover:
            self.src_cluster.failover_and_rebalance_master()
        if "C2" in self._failover:
            self.dest_cluster.failover_and_rebalance_master()

        self.sleep(self._wait_timeout // 6)
        self.perform_update_delete()

        self.sleep(300)

        self.verify_results()

    """Testing Unidirectional load( Loading only at source). Failover node at Source/Destination while
    Create/Update/Delete are performed in parallel based on doc-ops specified by the user.
    Verifying whether XDCR replication is successful on subsequent destination clusters. """

    def load_with_async_failover(self):
        self.setup_xdcr_and_load()

        tasks = []
        if "C1" in self._failover:
            tasks.append(self.src_cluster.async_failover())
        if "C2" in self._failover:
            tasks.append(self.dest_cluster.async_failover())

        self.perform_update_delete()
        self.sleep(self._wait_timeout // 4)

        for task in tasks:
            task.result()

        if "C1" in self._failover:
            self.src_cluster.rebalance_failover_nodes()
        if "C2" in self._failover:
            self.dest_cluster.rebalance_failover_nodes()

        self.verify_results()

    """Replication with compaction ddocs and view queries on both clusters.

        This test begins by loading a given number of items on the source cluster.
        It creates num_views as development/production view with default
        map view funcs(_is_dev_ddoc = True by default) on both clusters.
        Then we disabled compaction for ddoc on src cluster. While we don't reach
        expected fragmentation for ddoc on src cluster we update docs and perform
        view queries for all views. Then we start compaction when fragmentation
        was reached fragmentation_value. When compaction was completed we perform
        a full verification: wait for the disk queues to drain
        and then verify that there has been any data loss on all clusters."""

    def replication_with_ddoc_compaction(self):
        bucket_type = self._input.param("bucket_type", "membase")
        if bucket_type == "ephemeral":
            self.log.info("Test case does not apply to ephemeral")
            return

        self.setup_xdcr_and_load()

        num_views = self._input.param("num_views", 5)
        is_dev_ddoc = self._input.param("is_dev_ddoc", True)
        fragmentation_value = self._input.param("fragmentation_value", 80)
        for bucket in self.src_cluster.get_buckets():
            views = Utility.make_default_views(bucket.name, num_views,
                                               is_dev_ddoc)

        ddoc_name = "ddoc1"
        prefix = ("", "dev_")[is_dev_ddoc]

        query = {"full_set": "true", "stale": "false"}

        tasks = self.src_cluster.async_create_views(ddoc_name, views,
                                                    BUCKET_NAME.DEFAULT)
        tasks += self.dest_cluster.async_create_views(ddoc_name, views,
                                                      BUCKET_NAME.DEFAULT)
        for task in tasks:
            task.result(self._poll_timeout)

        self.src_cluster.disable_compaction()
        fragmentation_monitor = self.src_cluster.async_monitor_view_fragmentation(
            prefix + ddoc_name, fragmentation_value, BUCKET_NAME.DEFAULT)

        # generate load until fragmentation reached
        while fragmentation_monitor.state != "FINISHED":
            # update docs to create fragmentation
            self.src_cluster.update_delete_data(OPS.UPDATE)
            for view in views:
                # run queries to create indexes
                self.src_cluster.query_view(prefix + ddoc_name, view.name,
                                            query)
        fragmentation_monitor.result()

        compaction_task = self.src_cluster.async_compact_view(
            prefix + ddoc_name, 'default')

        self.assertTrue(compaction_task.result())

        self.verify_results()

    """Replication with disabled/enabled ddoc compaction on source cluster.

        This test begins by loading a given number of items on the source cluster.
        Then we disabled or enabled compaction on both clusters( set via params).
        Then we mutate and delete data on the source cluster 3 times.
        After deletion we recreate deleted items. When data was changed 3 times
        we perform a full verification: wait for the disk queues to drain
        and then verify that there has been no data loss on both all clusters."""

    def replication_with_disabled_ddoc_compaction(self):
        self.setup_xdcr_and_load()

        if "C1" in self._disable_compaction:
            self.src_cluster.disable_compaction()
        if "C2" in self._disable_compaction:
            self.dest_cluster.disable_compaction()

        # perform doc's ops 3 times to increase rev number
        for _ in range(3):
            self.async_perform_update_delete()
            # restore(re-creating) deleted items
            if 'C1' in self._del_clusters:
                c1_kv_gen = self.src_cluster.get_kv_gen()
                gen_delete = copy.deepcopy(c1_kv_gen[OPS.DELETE])
                self.src_cluster.load_all_buckets_from_generator(
                    kv_gen=gen_delete)
                self.sleep(5)

        self.sleep(600)
        self.verify_results()

    def replication_while_rebooting_a_non_master_destination_node(self):
        self.setup_xdcr_and_load()
        self.src_cluster.set_xdcr_param("xdcrFailureRestartInterval", 1)
        self.perform_update_delete()
        self.sleep(self._wait_timeout // 2)
        rebooted_node = self.dest_cluster.reboot_one_node(self)
        NodeHelper.wait_node_restarted(rebooted_node,
                                       self,
                                       wait_time=self._wait_timeout * 4,
                                       wait_if_warmup=True)

        self.verify_results()

    def replication_with_firewall_enabled(self):
        self.src_cluster.set_xdcr_param("xdcrFailureRestartInterval", 1)
        self.setup_xdcr_and_load()
        self.perform_update_delete()

        NodeHelper.enable_firewall(self.dest_master)
        self.sleep(30)
        NodeHelper.disable_firewall(self.dest_master)
        self.verify_results()

    """Testing Unidirectional append ( Loading only at source) Verifying whether XDCR replication is successful on
    subsequent destination clusters. """

    def test_append(self):
        self.setup_xdcr_and_load()
        self.verify_results()
        loop_count = self._input.param("loop_count", 20)
        for i in range(loop_count):
            self.log.info("Append iteration # %s" % i)
            gen_append = BlobGenerator('loadOne',
                                       'loadOne',
                                       self._value_size,
                                       end=self._num_items)
            self.src_cluster.load_all_buckets_from_generator(gen_append,
                                                             ops=OPS.APPEND,
                                                             batch_size=1)
            self.sleep(self._wait_timeout)
        self.verify_results()

    '''
    This method runs cbcollectinfo tool after setting up uniXDCR and check
    whether the log generated by cbcollectinfo contains xdcr log file or not.
    '''

    def collectinfotest_for_xdcr(self):
        self.load_with_ops()
        self.node_down = self._input.param("node_down", False)
        self.log_filename = self._input.param("file_name", "collectInfo")
        self.shell = RemoteMachineShellConnection(self.src_master)
        self.shell.execute_cbcollect_info("%s.zip" % (self.log_filename))
        from clitest import collectinfotest
        # HACK added self.buckets data member.
        self.buckets = self.src_cluster.get_buckets()
        collectinfotest.CollectinfoTests.verify_results(
            self, self.log_filename)

    """ Verify the fix for MB-9548"""

    def verify_replications_deleted_after_bucket_deletion(self):
        self.setup_xdcr_and_load()
        self.verify_results()
        rest_conn = RestConnection(self.src_master)
        replications = rest_conn.get_replications()
        self.assertTrue(replications, "Number of replications should not be 0")
        self.src_cluster.delete_all_buckets()
        self.sleep(60)
        replications = rest_conn.get_replications()
        self.log.info("Replications : %s" % replications)
        self.assertTrue(
            not replications,
            "Rest returns replication list even after source bucket is deleted "
        )

    """ Verify fix for MB-9862"""

    def test_verify_memcache_connections(self):
        allowed_memcached_conn = self._input.param("allowed_connections", 100)
        max_ops_per_second = self._input.param("max_ops_per_second", 2500)
        min_item_size = self._input.param("min_item_size", 128)
        num_docs = self._input.param("num_docs", 30000)
        # start load, max_ops_per_second is the combined limit for all buckets
        mcsodaLoad = LoadWithMcsoda(self.src_master, num_docs, prefix='')
        mcsodaLoad.cfg["max-ops"] = 0
        mcsodaLoad.cfg["max-ops-per-sec"] = max_ops_per_second
        mcsodaLoad.cfg["exit-after-creates"] = 1
        mcsodaLoad.cfg["min-value-size"] = min_item_size
        mcsodaLoad.cfg["json"] = 0
        mcsodaLoad.cfg["batch"] = 100
        loadDataThread = Thread(target=mcsodaLoad.load_data,
                                name='mcloader_default')
        loadDataThread.daemon = True
        loadDataThread.start()

        src_remote_shell = RemoteMachineShellConnection(self.src_master)
        machine_type = src_remote_shell.extract_remote_info().type.lower()
        while (loadDataThread.isAlive() and machine_type == 'linux'):
            command = "netstat -lpnta | grep 11210 | grep TIME_WAIT | wc -l"
            output, _ = src_remote_shell.execute_command(command)
            if int(output[0]) > allowed_memcached_conn:
                # stop load
                mcsodaLoad.load_stop()
                loadDataThread.join()
                self.fail("Memcached connections {0} are increased above {1} \
                            on Source node".format(allowed_memcached_conn,
                                                   int(output[0])))
            self.sleep(5)

        # stop load
        mcsodaLoad.load_stop()
        loadDataThread.join()

    # Test to verify MB-10116
    def verify_ssl_private_key_not_present_in_logs(self):
        zip_file = "%s.zip" % (self._input.param("file_name", "collectInfo"))
        try:
            self.shell = RemoteMachineShellConnection(self.src_master)
            self.load_with_ops()
            self.shell.execute_cbcollect_info(zip_file)
            if self.shell.extract_remote_info().type.lower() != "windows":
                command = "unzip %s" % (zip_file)
                output, error = self.shell.execute_command(command)
                self.shell.log_command_output(output, error)
                if len(error) > 0:
                    raise Exception(
                        "unable to unzip the files. Check unzip command output for help"
                    )
                cmd = 'grep -R "BEGIN RSA PRIVATE KEY" cbcollect_info*/'
                output, _ = self.shell.execute_command(cmd)
            else:
                cmd = "curl -0 http://{1}:{2}@{0}:8091/diag 2>/dev/null | grep 'BEGIN RSA PRIVATE KEY'".format(
                    self.src_master.ip, self.src_master.rest_username,
                    self.src_master.rest_password)
                output, _ = self.shell.execute_command(cmd)
            self.assertTrue(
                not output,
                "XDCR SSL Private Key is found diag logs -> %s" % output)
        finally:
            self.shell.delete_files(zip_file)
            self.shell.delete_files("cbcollect_info*")

    # Buckets States
    def delete_recreate_dest_buckets(self):
        self.setup_xdcr_and_load()

        # Remove destination buckets
        self.dest_cluster.delete_all_buckets()

        # Code for re-create_buckets
        self.create_buckets_on_cluster("C2")

        self._resetup_replication_for_recreate_buckets("C2")

        self.async_perform_update_delete()
        self.verify_results()

    def flush_dest_buckets(self):
        self.setup_xdcr_and_load()

        # flush destination buckets
        self.dest_cluster.flush_buckets()

        self.async_perform_update_delete()
        self.verify_results()

    # Nodes Crashing Scenarios
    def __kill_processes(self, crashed_nodes=[]):
        for node in crashed_nodes:
            try:
                NodeHelper.kill_erlang(node)
            except:
                self.log.info(
                    'Could not kill erlang process on node, continuing..')

    def __start_cb_server(self, node):
        shell = RemoteMachineShellConnection(node)
        shell.start_couchbase()
        shell.disconnect()

    def test_node_crash_master(self):
        self.setup_xdcr_and_load()

        crashed_nodes = []
        crash = self._input.param("crash", "").split('-')
        if "C1" in crash:
            crashed_nodes.append(self.src_master)
        if "C2" in crash:
            crashed_nodes.append(self.dest_master)

        self.__kill_processes(crashed_nodes)

        for crashed_node in crashed_nodes:
            self.__start_cb_server(crashed_node)

        bucket_type = self._input.param("bucket_type", "membase")
        if bucket_type == "ephemeral":
            self.sleep(self._wait_timeout)
        else:
            NodeHelper.wait_warmup_completed(crashed_nodes)

        self.async_perform_update_delete()
        self.verify_results()

    # Disaster at site.
    # 1. Crash Source Cluster., Sleep n second
    # 2. Crash Dest Cluster.
    # 3. Wait for Source Cluster to warmup. Load more data and perform mutations on Src.
    # 4. Wait for Dest to warmup.
    # 5. Verify data.
    def test_node_crash_cluster(self):
        self.setup_xdcr_and_load()

        crashed_nodes = []
        crash = self._input.param("crash", "").split('-')
        if "C1" in crash:
            crashed_nodes += self.src_cluster.get_nodes()
            self.__kill_processes(crashed_nodes)
            self.sleep(30)
        if "C2" in crash:
            crashed_nodes += self.dest_cluster.get_nodes()
            self.__kill_processes(crashed_nodes)

        for crashed_node in crashed_nodes:
            self.__start_cb_server(crashed_node)

        bucket_type = self._input.param("bucket_type", "membase")

        if "C1" in crash:
            if bucket_type == "ephemeral":
                self.sleep(self._wait_timeout)
            else:
                NodeHelper.wait_warmup_completed(self.src_cluster.get_nodes())
            gen_create = BlobGenerator('loadTwo',
                                       'loadTwo',
                                       self._value_size,
                                       end=self._num_items)
            self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.async_perform_update_delete()

        if "C2" in crash:
            if bucket_type == "ephemeral":
                self.sleep(self._wait_timeout)
            else:
                NodeHelper.wait_warmup_completed(self.dest_cluster.get_nodes())

        self.verify_results()

    """ Test if replication restarts 60s after idle xdcr following dest bucket flush """

    def test_idle_xdcr_dest_flush(self):
        self.setup_xdcr_and_load()
        self.verify_results()

        bucket = self.dest_cluster.get_bucket_by_name(BUCKET_NAME.DEFAULT)
        self.dest_cluster.flush_buckets([bucket])

        self.sleep(self._wait_timeout)

        self.verify_results()

    """ Test if replication restarts 60s after idle xdcr following dest bucket recreate """

    def test_idle_xdcr_dest_recreate(self):
        self.setup_xdcr_and_load()
        self.verify_results()

        bucket = self.dest_cluster.get_bucket_by_name(BUCKET_NAME.DEFAULT)
        self.dest_cluster.delete_bucket(BUCKET_NAME.DEFAULT)
        bucket_params = self._create_bucket_params(size=bucket.bucket_size)
        self.dest_cluster.create_default_bucket(bucket_params)
        self.sleep(self._wait_timeout)

        self.verify_results()

    """ Test if replication restarts 60s after idle xdcr following dest failover """

    def test_idle_xdcr_dest_failover(self):
        self.setup_xdcr_and_load()
        self.verify_results()

        self.dest_cluster.failover_and_rebalance_nodes()

        self.sleep(self._wait_timeout)

        self.verify_results()

    def test_optimistic_replication(self):
        """Tests with 2 buckets with customized optimisic replication thresholds
           one greater than value_size, other smaller
        """
        from .xdcrnewbasetests import REPL_PARAM
        self.setup_xdcr_and_load()
        self._wait_for_replication_to_catchup()
        for remote_cluster in self.src_cluster.get_remote_clusters():
            for replication in remote_cluster.get_replications():
                src_bucket_name = replication.get_src_bucket().name
                opt_repl_threshold = replication.get_xdcr_setting(
                    REPL_PARAM.OPTIMISTIC_THRESHOLD)
                docs_opt_replicated_stat = 'replications/%s/docs_opt_repd' % replication.get_repl_id(
                )
                opt_replicated = RestConnection(
                    self.src_master).fetch_bucket_xdcr_stats(src_bucket_name)[
                        'op']['samples'][docs_opt_replicated_stat][-1]
                self.log.info(
                    "Bucket: %s, value size: %s, optimistic threshold: %s"
                    " number of mutations optimistically replicated: %s" %
                    (src_bucket_name, self._value_size, opt_repl_threshold,
                     opt_replicated))
                if self._value_size <= opt_repl_threshold:
                    if opt_replicated == self._num_items:
                        self.log.info(
                            "SUCCESS: All keys in bucket %s were optimistically"
                            " replicated" %
                            (replication.get_src_bucket().name))
                    else:
                        self.fail(
                            "Value size: %s, optimistic threshold: %s,"
                            " number of docs optimistically replicated: %s" %
                            (self._value_size, opt_repl_threshold,
                             opt_replicated))
                else:
                    if opt_replicated == 0:
                        self.log.info(
                            "SUCCESS: No key in bucket %s was optimistically"
                            " replicated" %
                            (replication.get_src_bucket().name))
                    else:
                        self.fail("Partial optimistic replication detected!")

    def test_disk_full(self):
        self.setup_xdcr_and_load()
        self.verify_results()

        self.sleep(self._wait_timeout)

        zip_file = "%s.zip" % (self._input.param("file_name", "collectInfo"))
        try:
            for node in [self.src_master, self.dest_master]:
                self.shell = RemoteMachineShellConnection(node)
                self.shell.execute_cbcollect_info(zip_file)
                if self.shell.extract_remote_info().type.lower() != "windows":
                    command = "unzip %s" % (zip_file)
                    output, error = self.shell.execute_command(command)
                    self.shell.log_command_output(output, error)
                    if len(error) > 0:
                        raise Exception(
                            "unable to unzip the files. Check unzip command output for help"
                        )
                    cmd = 'grep -R "Approaching full disk warning." cbcollect_info*/'
                    output, _ = self.shell.execute_command(cmd)
                else:
                    cmd = "curl -0 http://{1}:{2}@{0}:8091/diag 2>/dev/null | grep 'Approaching full disk warning.'".format(
                        self.src_master.ip, self.src_master.rest_username,
                        self.src_master.rest_password)
                    output, _ = self.shell.execute_command(cmd)
                self.assertNotEqual(
                    len(output), 0,
                    "Full disk warning not generated as expected in %s" %
                    node.ip)
                self.log.info("Full disk warning generated as expected in %s" %
                              node.ip)

                self.shell.delete_files(zip_file)
                self.shell.delete_files("cbcollect_info*")
        except Exception as e:
            self.log.info(e)

    def test_retry_connections_on_errors_before_restart(self):
        """
        CBQE-3373: Do not restart pipeline as soon as connection errors are
        detected, backoff and retry 5 times before trying to restart pipeline.
        """
        passed = False
        # start data load after setting up xdcr
        load_tasks = self.setup_xdcr_async_load()
        goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
                     + '/goxdcr.log*'

        # block port 11210 on target to simulate a connection error
        shell = RemoteMachineShellConnection(self.dest_master)
        out, err = shell.execute_command(
            "/sbin/iptables -A INPUT -p tcp --dport"
            " 11210 -j DROP")
        shell.log_command_output(out, err)
        out, err = shell.execute_command("/sbin/iptables -L")
        shell.log_command_output(out, err)

        # complete loading
        for task in load_tasks:
            task.result()

        # wait for goxdcr to detect i/o timeout and try repairing
        self.sleep(self._wait_timeout * 5)

        # unblock port 11210 so replication can continue
        out, err = shell.execute_command(
            "/sbin/iptables -D INPUT -p tcp --dport"
            " 11210 -j DROP")
        shell.log_command_output(out, err)
        out, err = shell.execute_command("/sbin/iptables -L")
        shell.log_command_output(out, err)
        shell.disconnect()

        # check logs for traces of retry attempts
        for node in self.src_cluster.get_nodes():
            count1 = NodeHelper.check_goxdcr_log(
                node, "Failed to repair connections to target cluster",
                goxdcr_log)
            count2 = NodeHelper.check_goxdcr_log(
                node, "Failed to set up connections to target cluster",
                goxdcr_log)
            count = count1 + count2
            if count > 0:
                self.log.info('SUCCESS: We tried to repair connections before'
                              ' restarting pipeline')
                passed = True

        if not passed:
            self.fail(
                "No attempts were made to repair connections on %s before"
                " restarting pipeline" % self.src_cluster.get_nodes())
        self.verify_results()

    def test_verify_mb19802_1(self):
        load_tasks = self.setup_xdcr_async_load()
        goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
                     + '/goxdcr.log*'

        conn = RemoteMachineShellConnection(
            self.dest_cluster.get_master_node())
        conn.stop_couchbase()

        for task in load_tasks:
            task.result()

        conn.start_couchbase()
        self.sleep(300)

        for node in self.src_cluster.get_nodes():
            count = NodeHelper.check_goxdcr_log(
                node, "batchGetMeta received fatal error and had to abort",
                goxdcr_log)
            self.assertEqual(
                count, 0,
                "batchGetMeta error message found in " + str(node.ip))
            self.log.info("batchGetMeta error message not found in " +
                          str(node.ip))

        self.verify_results()

    def test_verify_mb19802_2(self):
        load_tasks = self.setup_xdcr_async_load()
        goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
                     + '/goxdcr.log*'

        self.dest_cluster.failover_and_rebalance_master()

        for task in load_tasks:
            task.result()

        for node in self.src_cluster.get_nodes():
            count = NodeHelper.check_goxdcr_log(
                node, "batchGetMeta received fatal error and had to abort",
                goxdcr_log)
            self.assertEqual(
                count, 0, "batchGetMeta timed out error message found in " +
                str(node.ip))
            self.log.info("batchGetMeta error message not found in " +
                          str(node.ip))

        self.sleep(300)
        self.verify_results()

    def test_verify_mb19697(self):
        self.setup_xdcr_and_load()
        goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
                     + '/goxdcr.log*'

        self.src_cluster.pause_all_replications()

        gen = BlobGenerator("C1-", "C1-", self._value_size, end=100000)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.src_cluster.resume_all_replications()
        self._wait_for_replication_to_catchup()

        gen = BlobGenerator("C1-", "C1-", self._value_size, end=100000)
        load_tasks = self.src_cluster.async_load_all_buckets_from_generator(
            gen)

        self.src_cluster.rebalance_out()

        for task in load_tasks:
            task.result()

        self._wait_for_replication_to_catchup()

        self.src_cluster.rebalance_in()

        gen = BlobGenerator("C1-", "C1-", self._value_size, end=100000)
        load_tasks = self.src_cluster.async_load_all_buckets_from_generator(
            gen)

        self.src_cluster.failover_and_rebalance_master()

        for task in load_tasks:
            task.result()

        self._wait_for_replication_to_catchup()

        for node in self.src_cluster.get_nodes():
            count = NodeHelper.check_goxdcr_log(
                node,
                "counter .+ goes backward, maybe due to the pipeline is restarted",
                goxdcr_log)
            self.assertEqual(
                count, 0,
                "counter goes backward, maybe due to the pipeline is restarted "
                "error message found in " + str(node.ip))
            self.log.info(
                "counter goes backward, maybe due to the pipeline is restarted "
                "error message not found in " + str(node.ip))

        self.sleep(300)
        self.verify_results()

    def test_verify_mb20463(self):
        src_version = NodeHelper.get_cb_version(
            self.src_cluster.get_master_node())
        if float(src_version[:3]) != 4.5:
            self.log.info("Source cluster has to be at 4.5 for this test")
            return

        servs = self._input.servers[2:4]
        params = {}
        params['num_nodes'] = len(servs)
        params['product'] = 'cb'
        params['version'] = '4.1.2-6088'
        params['vbuckets'] = [1024]
        self.log.info("will install {0} on {1}".format('4.1.2-6088',
                                                       [s.ip for s in servs]))
        InstallerJob().parallel_install(servs, params)

        if params['product'] in ["couchbase", "couchbase-server", "cb"]:
            success = True
            for server in servs:
                success &= RemoteMachineShellConnection(
                    server).is_couchbase_installed()
                if not success:
                    self.fail(
                        "some nodes were not installed successfully on target cluster!"
                    )

        self.log.info("4.1.2 installed successfully on target cluster")

        conn = RestConnection(self.dest_cluster.get_master_node())
        conn.add_node(user=self._input.servers[3].rest_username,
                      password=self._input.servers[3].rest_password,
                      remoteIp=self._input.servers[3].ip)
        self.sleep(30)
        conn.rebalance(otpNodes=[node.id for node in conn.node_statuses()])
        self.sleep(30)
        conn.create_bucket(bucket='default', ramQuotaMB=512)

        tasks = self.setup_xdcr_async_load()

        self.sleep(30)

        NodeHelper.enable_firewall(self.dest_master)
        self.sleep(30)
        NodeHelper.disable_firewall(self.dest_master)

        for task in tasks:
            task.result()

        self._wait_for_replication_to_catchup(timeout=600)

        self.verify_results()

    def test_rollback(self):
        bucket = self.src_cluster.get_buckets()[0]
        nodes = self.src_cluster.get_nodes()

        # Stop Persistence on Node A & Node B
        for node in nodes:
            mem_client = MemcachedClientHelper.direct_client(node, bucket)
            mem_client.stop_persistence()

        goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
                     + '/goxdcr.log*'
        self.setup_xdcr()

        self.src_cluster.pause_all_replications()

        gen = BlobGenerator("C1-",
                            "C1-",
                            self._value_size,
                            end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.src_cluster.resume_all_replications()

        # Perform mutations on the bucket
        self.async_perform_update_delete()

        rest1 = RestConnection(self.src_cluster.get_master_node())
        rest2 = RestConnection(self.dest_cluster.get_master_node())

        # Fetch count of docs in src and dest cluster
        _count1 = rest1.fetch_bucket_stats(
            bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
        _count2 = rest2.fetch_bucket_stats(
            bucket=bucket.name)["op"]["samples"]["curr_items"][-1]

        self.log.info(
            "Before rollback src cluster count = {0} dest cluster count = {1}".
            format(_count1, _count2))

        # Kill memcached on Node A so that Node B becomes master
        shell = RemoteMachineShellConnection(
            self.src_cluster.get_master_node())
        shell.kill_memcached()

        # Start persistence on Node B
        mem_client = MemcachedClientHelper.direct_client(nodes[1], bucket)
        mem_client.start_persistence()

        # Failover Node B
        failover_task = self.src_cluster.async_failover()
        failover_task.result()

        # Wait for Failover & rollback to complete
        self.sleep(60)

        # Fetch count of docs in src and dest cluster
        _count1 = rest1.fetch_bucket_stats(
            bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
        _count2 = rest2.fetch_bucket_stats(
            bucket=bucket.name)["op"]["samples"]["curr_items"][-1]

        self.log.info(
            "After rollback src cluster count = {0} dest cluster count = {1}".
            format(_count1, _count2))

        self.assertTrue(
            self.src_cluster.wait_for_outbound_mutations(),
            "Mutations in source cluster not replicated to target after rollback"
        )
        self.log.info(
            "Mutations in source cluster replicated to target after rollback")

        count = NodeHelper.check_goxdcr_log(
            nodes[0],
            "Received rollback from DCP stream",
            goxdcr_log,
            timeout=60)
        self.assertGreater(count, 0, "rollback did not happen as expected")
        self.log.info("rollback happened as expected")

    def test_verify_mb19181(self):
        load_tasks = self.setup_xdcr_async_load()
        goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0]) \
                     + '/goxdcr.log*'

        self.dest_cluster.failover_and_rebalance_master()

        for task in load_tasks:
            task.result()

        for node in self.src_cluster.get_nodes():
            count = NodeHelper.check_goxdcr_log(
                node, "Can't move update state from", goxdcr_log)
            self.assertEqual(
                count, 0,
                "Can't move update state from - error message found in " +
                str(node.ip))
            self.log.info(
                "Can't move update state from - error message not found in " +
                str(node.ip))

        self.verify_results()

    def test_verify_mb21369(self):
        repeat = self._input.param("repeat", 5)
        load_tasks = self.setup_xdcr_async_load()

        conn = RemoteMachineShellConnection(self.src_cluster.get_master_node())
        output, error = conn.execute_command(
            "netstat -an | grep " + self.src_cluster.get_master_node().ip +
            ":11210 | wc -l")
        conn.log_command_output(output, error)
        before = output[0]
        self.log.info("No. of memcached connections before: {0}".format(
            output[0]))

        for i in range(0, repeat):
            self.src_cluster.pause_all_replications()
            self.sleep(30)
            self.src_cluster.resume_all_replications()
            self.sleep(self._wait_timeout)
            output, error = conn.execute_command(
                "netstat -an | grep " + self.src_cluster.get_master_node().ip +
                ":11210 | wc -l")
            conn.log_command_output(output, error)
            self.log.info(
                "No. of memcached connections in iteration {0}:  {1}".format(
                    i + 1, output[0]))
            if int(output[0]) - int(before) > 5:
                self.fail(
                    "Number of memcached connections changed beyond allowed limit"
                )

        for task in load_tasks:
            task.result()

        self.log.info(
            "No. of memcached connections did not increase with pausing and resuming replication multiple times"
        )

    def test_maxttl_setting(self):
        maxttl = int(self._input.param("maxttl", None))
        self.setup_xdcr_and_load()
        self.merge_all_buckets()
        self._wait_for_replication_to_catchup()
        self.sleep(maxttl, "waiting for docs to expire per maxttl properly")
        for bucket in self.src_cluster.get_buckets():
            items = RestConnection(
                self.src_master).get_active_key_count(bucket)
            self.log.info(
                "Docs in source bucket is {0} after maxttl has elapsed".format(
                    items))
            if items != 0:
                self.fail(
                    "Docs in source bucket is not 0 after maxttl has elapsed")
        self._wait_for_replication_to_catchup()
예제 #50
0
 def _verify_replica_distribution_in_zones(self,
                                           nodes,
                                           commmand,
                                           saslPassword=""):
     shell = RemoteMachineShellConnection(self.servers[0])
     info = shell.extract_remote_info()
     if info.type.lower() == 'linux':
         cbstat_command = "%scbstats" % (
             testconstants.LINUX_COUCHBASE_BIN_PATH)
     elif info.type.lower() == 'windows':
         cbstat_command = "%scbstats.exe" % (
             testconstants.WIN_COUCHBASE_BIN_PATH)
     elif info.type.lower() == 'mac':
         cbstat_command = "%scbstats" % (
             testconstants.MAC_COUCHBASE_BIN_PATH)
     else:
         raise Exception("Not support OS")
     saslPassword = ''
     versions = RestConnection(self.master).get_nodes_versions()
     for group in nodes:
         for node in nodes[group]:
             if versions[0][:5] in COUCHBASE_VERSION_2:
                 command = "tap"
                 if not info.type.lower() == 'windows':
                     commands = "%s %s:11210 %s -b %s -p \"%s\" |grep :vb_filter: |  awk '{print $1}' \
                         | xargs | sed 's/eq_tapq:replication_ns_1@//g'  | sed 's/:vb_filter://g' \
                         " % (cbstat_command, node, command, "default",
                              saslPassword)
                 elif info.type.lower() == 'windows':
                     """ standalone gawk.exe should be copy to ../ICW/bin for command below to work.
                         Ask IT to do this if you don't know how """
                     commands = "%s %s:11210 %s -b %s -p \"%s\" | grep.exe :vb_filter: | gawk.exe '{print $1}' \
                            | sed.exe 's/eq_tapq:replication_ns_1@//g'  | sed.exe 's/:vb_filter://g' \
                            " % (cbstat_command, node, command, "default",
                                 saslPassword)
                 output, error = shell.execute_command(commands)
             elif versions[0][:5] in COUCHBASE_FROM_VERSION_3:
                 command = "dcp"
                 if not info.type.lower() == 'windows':
                     commands = "%s %s:11210 %s -b %s -p \"%s\" | grep :replication:ns_1@%s |  grep vb_uuid | \
                                 awk '{print $1}' | sed 's/eq_dcpq:replication:ns_1@%s->ns_1@//g' | \
                                 sed 's/:.*//g' | sort -u | xargs \
                                " % (cbstat_command, node, command,
                                     "default", saslPassword, node, node)
                     output, error = shell.execute_command(commands)
                 elif info.type.lower() == 'windows':
                     commands = "%s %s:11210 %s -b %s -p \"%s\" | grep.exe :replication:ns_1@%s |  grep vb_uuid | \
                                 gawk.exe '{print $1}' | sed.exe 's/eq_dcpq:replication:ns_1@%s->ns_1@//g' | \
                                 sed.exe 's/:.*//g' \
                                " % (cbstat_command, node, command,
                                     "default", saslPassword, node, node)
                     output, error = shell.execute_command(commands)
                     output = sorted(set(output))
             shell.log_command_output(output, error)
             output = output[0].split(" ")
             if node not in output:
                 self.log.info("{0}".format(nodes))
                 self.log.info(
                     "replicas of node {0} are in nodes {1}".format(
                         node, output))
                 self.log.info(
                     "replicas of node {0} are not in its zone {1}".format(
                         node, group))
             else:
                 raise Exception(
                     "replica of node {0} are on its own zone {1}".format(
                         node, group))
     shell.disconnect()
예제 #51
0
class QueryTests(BaseTestCase):
    def setUp(self):
        if not self._testMethodName == 'suite_setUp':
            self.skip_buckets_handle = True
        super(QueryTests, self).setUp()
        self.version = self.input.param("cbq_version", "sherlock")
        if self.input.tuq_client and "client" in self.input.tuq_client:
            self.shell = RemoteMachineShellConnection(self.input.tuq_client["client"])
        else:
            self.shell = RemoteMachineShellConnection(self.master)
        if not self._testMethodName == 'suite_setUp' and self.input.param("cbq_version", "sherlock") != 'sherlock':
            self._start_command_line_query(self.master)
        self.use_rest = self.input.param("use_rest", True)
        self.max_verify = self.input.param("max_verify", None)
        self.buckets = RestConnection(self.master).get_buckets()
        self.docs_per_day = self.input.param("doc-per-day", 49)
        self.item_flag = self.input.param("item_flag", 4042322160)
        self.n1ql_port = self.input.param("n1ql_port", 8093)
        self.analytics = self.input.param("analytics",False)
        self.dataset = self.input.param("dataset", "default")
        self.primary_indx_type = self.input.param("primary_indx_type", 'GSI')
        self.index_type = self.input.param("index_type", 'GSI')
        self.primary_indx_drop = self.input.param("primary_indx_drop", False)
        self.monitoring = self.input.param("monitoring",False)
        self.isprepared = False
        self.named_prepare = self.input.param("named_prepare", None)
        self.skip_primary_index = self.input.param("skip_primary_index",False)
        self.scan_consistency = self.input.param("scan_consistency", 'REQUEST_PLUS')
        shell = RemoteMachineShellConnection(self.master)
        type = shell.extract_remote_info().distribution_type
        self.path = testconstants.LINUX_COUCHBASE_BIN_PATH
        if type.lower() == 'windows':
            self.path = testconstants.WIN_COUCHBASE_BIN_PATH
        elif type.lower() == "mac":
            self.path = testconstants.MAC_COUCHBASE_BIN_PATH
        self.threadFailure = False
        if self.primary_indx_type.lower() == "gsi":
            self.gsi_type = self.input.param("gsi_type", 'plasma')
        else:
            self.gsi_type = None
        if self.input.param("reload_data", False):
            if self.analytics:
                self.cluster.rebalance([self.master, self.cbas_node], [], [self.cbas_node], services=['cbas'])
            for bucket in self.buckets:
                self.cluster.bucket_flush(self.master, bucket=bucket,
                                          timeout=self.wait_timeout * 5)
            self.gens_load = self.generate_docs(self.docs_per_day)
            self.load(self.gens_load, flag=self.item_flag)
            if self.analytics:
                self.cluster.rebalance([self.master, self.cbas_node], [self.cbas_node], [], services=['cbas'])
        self.gens_load = self.generate_docs(self.docs_per_day)
        if self.input.param("gomaxprocs", None):
            self.configure_gomaxprocs()
        self.gen_results = TuqGenerators(self.log, self.generate_full_docs_list(self.gens_load))
        if (self.analytics == False):
                self.create_primary_index_for_3_0_and_greater()
        if (self.analytics):
            self.setup_analytics()
            self.sleep(30,'wait for analytics setup')

    def suite_setUp(self):
        try:
            self.load(self.gens_load, flag=self.item_flag)
            if not self.input.param("skip_build_tuq", True):
                self._build_tuq(self.master)
            self.skip_buckets_handle = True
            if (self.analytics):
                self.cluster.rebalance([self.master, self.cbas_node], [self.cbas_node], [], services=['cbas'])
                self.setup_analytics()
                self.sleep(30,'wait for analytics setup')
        except:
            self.log.error('SUITE SETUP FAILED')
            self.tearDown()

    def tearDown(self):
        if self._testMethodName == 'suite_tearDown':
            self.skip_buckets_handle = False
        if self.analytics:
            bucket_username = "******"
            bucket_password = "******"
            data = 'use Default ;'
            for bucket in self.buckets:
                data += 'disconnect bucket {0} if connected;'.format(bucket.name)
                data += 'drop dataset {0} if exists;'.format(bucket.name+ "_shadow")
                data += 'drop bucket {0} if exists;'.format(bucket.name)
            filename = "file.txt"
            f = open(filename,'w')
            f.write(data)
            f.close()
            url = 'http://{0}:8095/analytics/service'.format(self.cbas_node.ip)
            cmd = 'curl -s --data pretty=true --data-urlencode "*****@*****.**" ' + url + " -u " + bucket_username + ":" + bucket_password
            os.system(cmd)
            os.remove(filename)
        super(QueryTests, self).tearDown()

    def suite_tearDown(self):
        if not self.input.param("skip_build_tuq", False):
            if hasattr(self, 'shell'):
                self.shell.execute_command("killall /tmp/tuq/cbq-engine")
                self.shell.execute_command("killall tuqtng")
                self.shell.disconnect()

##############################################################################################
#
#  Setup Helpers
##############################################################################################

    def setup_analytics(self):
        data = 'use Default;'
        bucket_username = "******"
        bucket_password = "******"
        for bucket in self.buckets:
#             data += 'create bucket {0} with {{"bucket":"{0}","nodes":"{1}"}} ;'.format(
#                 bucket.name, self.master.ip)
            data = 'create dataset {1} on {0}; '.format(bucket.name,
                                                                bucket.name + "_shadow")
            filename = "file.txt"
            f = open(filename,'w')
            f.write(data)
            f.close()
            url = 'http://{0}:8095/analytics/service'.format(self.cbas_node.ip)
            cmd = 'curl -s --data pretty=true --data-urlencode "*****@*****.**" ' + url + " -u " + bucket_username + ":" + bucket_password
            os.system(cmd)
            os.remove(filename)
        data = 'connect link Local;'.format(
            bucket.name, bucket_username, bucket_password)
        filename = "file.txt"
        f = open(filename,'w')
        f.write(data)
        f.close()
        url = 'http://{0}:8095/analytics/service'.format(self.cbas_node.ip)
        cmd = 'curl -s --data pretty=true --data-urlencode "*****@*****.**" ' + url + " -u " + bucket_username + ":" + bucket_password
        os.system(cmd)
        os.remove(filename)
        
    def run_active_requests(self, e, t):
        while not e.isSet():
            logging.debug('wait_for_event_timeout starting')
            event_is_set = e.wait(t)
            logging.debug('event set: %s', event_is_set)
            if event_is_set:
                result = self.run_cbq_query("select * from system:active_requests")
                self.assertTrue(result['metrics']['resultCount'] == 1)
                requestId = result['requestID']
                result = self.run_cbq_query(
                    'delete from system:active_requests where requestId  =  "%s"' % requestId)
                time.sleep(20)
                result = self.run_cbq_query(
                    'select * from system:active_requests  where requestId  =  "%s"' % requestId)
                self.assertTrue(result['metrics']['resultCount'] == 0)
                result = self.run_cbq_query("select * from system:completed_requests")
                requestId = result['requestID']
                result = self.run_cbq_query(
                    'delete from system:completed_requests where requestId  =  "%s"' % requestId)
                time.sleep(10)
                result = self.run_cbq_query(
                    'select * from system:completed_requests where requestId  =  "%s"' % requestId)
                self.assertTrue(result['metrics']['resultCount'] == 0)

##############################################################################################
#
#   COMMON FUNCTIONS
##############################################################################################

    def run_query_from_template(self, query_template):
        self.query = self.gen_results.generate_query(query_template)
        expected_result = self.gen_results.generate_expected_result()
        actual_result = self.run_cbq_query()
        return actual_result, expected_result

    def run_query_with_subquery_select_from_template(self, query_template):
        subquery_template = re.sub(r'.*\$subquery\(', '', query_template)
        subquery_template = subquery_template[:subquery_template.rfind(')')]
        keys_num = int(re.sub(r'.*KEYS \$', '', subquery_template).replace('KEYS $', ''))
        subquery_full_list = self.generate_full_docs_list(gens_load=self.gens_load,keys=self._get_keys(keys_num))
        subquery_template = re.sub(r'USE KEYS.*', '', subquery_template)
        sub_results = TuqGenerators(self.log, subquery_full_list)
        self.query = sub_results.generate_query(subquery_template)
        expected_sub = sub_results.generate_expected_result()
        alias = re.sub(r',.*', '', re.sub(r'.*\$subquery\(.*\)', '', query_template))
        alias = re.sub(r'.*as','', re.sub(r'FROM.*', '', alias)).strip()
        if not alias:
            alias = '$1'
        for item in self.gen_results.full_set:
            item[alias] = expected_sub[0]
        query_template = re.sub(r',.*\$subquery\(.*\).*%s' % alias, ',%s' % alias, query_template)
        self.query = self.gen_results.generate_query(query_template)
        expected_result = self.gen_results.generate_expected_result()
        actual_result = self.run_cbq_query()
        return actual_result, expected_result

    def run_query_with_subquery_from_template(self, query_template):
        subquery_template = re.sub(r'.*\$subquery\(', '', query_template)
        subquery_template = subquery_template[:subquery_template.rfind(')')]
        subquery_full_list = self.generate_full_docs_list(gens_load=self.gens_load)
        sub_results = TuqGenerators(self.log, subquery_full_list)
        self.query = sub_results.generate_query(subquery_template)
        expected_sub = sub_results.generate_expected_result()
        alias = re.sub(r',.*', '', re.sub(r'.*\$subquery\(.*\)', '', query_template))
        alias = re.sub(r'.*as ', '', alias).strip()
        self.gen_results = TuqGenerators(self.log, expected_sub)
        query_template = re.sub(r'\$subquery\(.*\).*%s' % alias, ' %s' % alias, query_template)
        self.query = self.gen_results.generate_query(query_template)
        expected_result = self.gen_results.generate_expected_result()
        actual_result = self.run_cbq_query()
        return actual_result, expected_result

    def negative_common_body(self, queries_errors={}):
        if not queries_errors:
            self.fail("No queries to run!")
        for bucket in self.buckets:
            for query_template, error in queries_errors.iteritems():
                try:
                    query = self.gen_results.generate_query(query_template)
                    actual_result = self.run_cbq_query(query.format(bucket.name))
                except CBQError as ex:
                    self.log.error(ex)
                    self.assertTrue(str(ex).find(error) != -1,
                                    "Error is incorrect.Actual %s.\n Expected: %s.\n" %(
                                                                str(ex).split(':')[-1], error))
                else:
                    self.fail("There were no errors. Error expected: %s" % error)

    def run_cbq_query(self, query=None, min_output_size=10, server=None):
        if query is None:
            query = self.query
        if server is None:
           server = self.master
           if server.ip == "127.0.0.1":
            self.n1ql_port = server.n1ql_port
        else:
            if server.ip == "127.0.0.1":
                self.n1ql_port = server.n1ql_port
            if self.input.tuq_client and "client" in self.input.tuq_client:
                server = self.tuq_client
        query_params = {}
        cred_params = {'creds': []}
        rest = RestConnection(server)
        username = rest.username
        password = rest.password
        cred_params['creds'].append({'user': username, 'pass': password})
        for bucket in self.buckets:
            if bucket.saslPassword:
                cred_params['creds'].append({'user': '******' % bucket.name, 'pass': bucket.saslPassword})
        query_params.update(cred_params)
        if self.use_rest:
            query_params.update({'scan_consistency': self.scan_consistency})
            self.log.info('RUN QUERY %s' % query)

            if self.analytics:
                query = query + ";"
                for bucket in self.buckets:
                    query = query.replace(bucket.name,bucket.name+"_shadow")
                result = CBASHelper(self.cbas_node).execute_statement_on_cbas(query, "immediate")
                result = json.loads(result)

            else :
                result = rest.query_tool(query, self.n1ql_port, query_params=query_params)


        else:
            if self.version == "git_repo":
                output = self.shell.execute_commands_inside("$GOPATH/src/github.com/couchbase/query/" +\
                                                            "shell/cbq/cbq ","","","","","","")
            else:
                os = self.shell.extract_remote_info().type.lower()
                if not(self.isprepared):
                    query = query.replace('"', '\\"')
                    query = query.replace('`', '\\`')

                cmd =  "%s/cbq  -engine=http://%s:%s/ -q -u %s -p %s" % (self.path, server.ip, server.port, username, password)

                output = self.shell.execute_commands_inside(cmd,query,"","","","","")
                if not(output[0] == '{'):
                    output1 = '{'+str(output)
                else:
                    output1 = output
                result = json.loads(output1)
        if isinstance(result, str) or 'errors' in result:
            raise CBQError(result, server.ip)
        self.log.info("TOTAL ELAPSED TIME: %s" % result["metrics"]["elapsedTime"])
        return result

    def build_url(self, version):
        info = self.shell.extract_remote_info()
        type = info.distribution_type.lower()
        if type in ["ubuntu", "centos", "red hat"]:
            url = "https://s3.amazonaws.com/packages.couchbase.com/releases/couchbase-query/dp1/"
            url += "couchbase-query_%s_%s_linux.tar.gz" %(
                                version, info.architecture_type)
        #TODO for windows
        return url

    def _build_tuq(self, server):
        if self.version == "git_repo":
            os = self.shell.extract_remote_info().type.lower()
            if os != 'windows':
                goroot = testconstants.LINUX_GOROOT
                gopath = testconstants.LINUX_GOPATH
            else:
                goroot = testconstants.WINDOWS_GOROOT
                gopath = testconstants.WINDOWS_GOPATH
            if self.input.tuq_client and "gopath" in self.input.tuq_client:
                gopath = self.input.tuq_client["gopath"]
            if self.input.tuq_client and "goroot" in self.input.tuq_client:
                goroot = self.input.tuq_client["goroot"]
            cmd = "rm -rf {0}/src/github.com".format(gopath)
            self.shell.execute_command(cmd)
            cmd= 'export GOROOT={0} && export GOPATH={1} &&'.format(goroot, gopath) +\
                ' export PATH=$PATH:$GOROOT/bin && ' +\
                'go get github.com/couchbaselabs/tuqtng;' +\
                'cd $GOPATH/src/github.com/couchbaselabs/tuqtng; ' +\
                'go get -d -v ./...; cd .'
            self.shell.execute_command(cmd)
            cmd = 'export GOROOT={0} && export GOPATH={1} &&'.format(goroot, gopath) +\
                ' export PATH=$PATH:$GOROOT/bin && ' +\
                'cd $GOPATH/src/github.com/couchbaselabs/tuqtng; go build; cd .'
            self.shell.execute_command(cmd)
            cmd = 'export GOROOT={0} && export GOPATH={1} &&'.format(goroot, gopath) +\
                ' export PATH=$PATH:$GOROOT/bin && ' +\
                'cd $GOPATH/src/github.com/couchbaselabs/tuqtng/tuq_client; go build; cd .'
            self.shell.execute_command(cmd)
        else:
            cbq_url = self.build_url(self.version)
            #TODO for windows
            cmd = "cd /tmp; mkdir tuq;cd tuq; wget {0} -O tuq.tar.gz;".format(cbq_url)
            cmd += "tar -xvf tuq.tar.gz;rm -rf tuq.tar.gz"
            self.shell.execute_command(cmd)

    def _start_command_line_query(self, server):
        if self.version == "git_repo":
            os = self.shell.extract_remote_info().type.lower()
            if os != 'windows':
                gopath = testconstants.LINUX_GOPATH
            else:
                gopath = testconstants.WINDOWS_GOPATH
            if self.input.tuq_client and "gopath" in self.input.tuq_client:
                gopath = self.input.tuq_client["gopath"]
            if os == 'windows':
                cmd = "cd %s/src/github.com/couchbase/query/server/main; " % (gopath) +\
                "./cbq-engine.exe -datastore http://%s:%s/ >/dev/null 2>&1 &" %(
                                                                server.ip, server.port)
            else:
                cmd = "cd %s/src/github.com/couchbase/query//server/main; " % (gopath) +\
                "./cbq-engine -datastore http://%s:%s/ >n1ql.log 2>&1 &" %(
                                                                server.ip, server.port)
            self.shell.execute_command(cmd)
        elif self.version == "sherlock":
            if self.services_init.find('n1ql') != -1:
                return
            os = self.shell.extract_remote_info().type.lower()
            if os != 'windows':
                couchbase_path = testconstants.LINUX_COUCHBASE_BIN_PATH
            else:
                couchbase_path = testconstants.WIN_COUCHBASE_BIN_PATH
            if self.input.tuq_client and "sherlock_path" in self.input.tuq_client:
                couchbase_path = "%s/bin" % self.input.tuq_client["sherlock_path"]
            if os == 'windows':
                cmd = "cd %s; " % (couchbase_path) +\
                "./cbq-engine.exe -datastore http://%s:%s/ >/dev/null 2>&1 &" %(
                                                                server.ip, server.port)
            else:
                cmd = "cd %s; " % (couchbase_path) +\
                "./cbq-engine -datastore http://%s:%s/ >n1ql.log 2>&1 &" %(
                                                                server.ip, server.port)
                n1ql_port = self.input.param("n1ql_port", None)
                if server.ip == "127.0.0.1" and server.n1ql_port:
                    n1ql_port = server.n1ql_port
                if n1ql_port:
                    cmd = "cd %s; " % (couchbase_path) +\
                './cbq-engine -datastore http://%s:%s/ -http=":%s">n1ql.log 2>&1 &' %(
                                                                server.ip, server.port, n1ql_port)
            self.shell.execute_command(cmd)
        else:
            os = self.shell.extract_remote_info().type.lower()
            if os != 'windows':
                cmd = "cd /tmp/tuq;./cbq-engine -couchbase http://%s:%s/ >/dev/null 2>&1 &" %(
                                                                server.ip, server.port)
            else:
                cmd = "cd /cygdrive/c/tuq;./cbq-engine.exe -couchbase http://%s:%s/ >/dev/null 2>&1 &" %(
                                                                server.ip, server.port)
            self.shell.execute_command(cmd)

    def _parse_query_output(self, output):
        if output.find("cbq>") == 0:
            output = output[output.find("cbq>") + 4:].strip()
        if output.find("tuq_client>") == 0:
            output = output[output.find("tuq_client>") + 11:].strip()
        if output.find("cbq>") != -1:
            output = output[:output.find("cbq>")].strip()
        if output.find("tuq_client>") != -1:
            output = output[:output.find("tuq_client>")].strip()
        return json.loads(output)

    def generate_docs(self, num_items, start=0):
        try:
            return getattr(self, 'generate_docs_' + self.dataset)(num_items, start)
        except:
            self.fail("There is no dataset %s, please enter a valid one" % self.dataset)

    def generate_docs_default(self, docs_per_day, start=0):
        json_generator = JsonGenerator()
        return json_generator.generate_docs_employee(docs_per_day, start)

    def generate_docs_sabre(self, docs_per_day, start=0):
        json_generator = JsonGenerator()
        return json_generator.generate_docs_sabre(docs_per_day, start)

    def generate_docs_employee(self, docs_per_day, start=0):
        json_generator = JsonGenerator()
        return json_generator.generate_docs_employee_data(docs_per_day = docs_per_day, start = start)

    def generate_docs_simple(self, docs_per_day, start=0):
        json_generator = JsonGenerator()
        return json_generator.generate_docs_employee_simple_data(docs_per_day = docs_per_day, start = start)

    def generate_docs_sales(self, docs_per_day, start=0):
        json_generator = JsonGenerator()
        return json_generator.generate_docs_employee_sales_data(docs_per_day = docs_per_day, start = start)

    def generate_docs_bigdata(self, docs_per_day, start=0):
        json_generator = JsonGenerator()
        return json_generator.generate_docs_bigdata(end=(1000*docs_per_day), start=start, value_size=self.value_size)


    def _verify_results(self, actual_result, expected_result, missing_count = 1, extra_count = 1):
        if len(actual_result) != len(expected_result):
            missing, extra = self.check_missing_and_extra(actual_result, expected_result)
            self.log.error("Missing items: %s.\n Extra items: %s" % (missing[:missing_count], extra[:extra_count]))
            self.fail("Results are incorrect.Actual num %s. Expected num: %s.\n" % (
                                            len(actual_result), len(expected_result)))
        if self.max_verify is not None:
            actual_result = actual_result[:self.max_verify]
            expected_result = expected_result[:self.max_verify]

        msg = "Results are incorrect.\n Actual first and last 100:  %s.\n ... \n %s" +\
        "Expected first and last 100: %s.\n  ... \n %s"
        self.assertTrue(actual_result == expected_result,
                          msg % (actual_result[:100],actual_result[-100:],
                                 expected_result[:100],expected_result[-100:]))

    def check_missing_and_extra(self, actual, expected):
        missing = []
        extra = []
        for item in actual:
            if not (item in expected):
                 extra.append(item)
        for item in expected:
            if not (item in actual):
                missing.append(item)
        return missing, extra

    def sort_nested_list(self, result):
        actual_result = []
        for item in result:
            curr_item = {}
            for key, value in item.iteritems():
                if isinstance(value, list) or isinstance(value, set):
                    curr_item[key] = sorted(value)
                else:
                    curr_item[key] = value
            actual_result.append(curr_item)
        return actual_result

    def configure_gomaxprocs(self):
        max_proc = self.input.param("gomaxprocs", None)
        cmd = "export GOMAXPROCS=%s" % max_proc
        for server in self.servers:
            shell_connection = RemoteMachineShellConnection(self.master)
            shell_connection.execute_command(cmd)

    def create_primary_index_for_3_0_and_greater(self):
        self.log.info("CREATE PRIMARY INDEX using %s" % self.primary_indx_type)
        rest = RestConnection(self.master)
        versions = rest.get_nodes_versions()
        if versions[0].startswith("4") or versions[0].startswith("3") or versions[0].startswith("5"):
            for bucket in self.buckets:
                if self.primary_indx_drop:
                    self.log.info("Dropping primary index for %s using %s ..." % (bucket.name,self.primary_indx_type))
                    self.query = "DROP PRIMARY INDEX ON %s USING %s" % (bucket.name,self.primary_indx_type)
                    #self.run_cbq_query()
                    self.sleep(3, 'Sleep for some time after index drop')
                self.query = 'select * from system:indexes where name="#primary" and keyspace_id = "%s"' % bucket.name
                res = self.run_cbq_query()
                self.sleep(10)
                if self.monitoring:
                    self.query = "delete from system:completed_requests"
                    self.run_cbq_query()
                if not self.skip_primary_index:
                    if (res['metrics']['resultCount'] == 0):
                        self.query = "CREATE PRIMARY INDEX ON %s USING %s" % (bucket.name, self.primary_indx_type)
                        self.log.info("Creating primary index for %s ..." % bucket.name)
                        try:
                            self.run_cbq_query()
                            self.primary_index_created = True
                            if self.primary_indx_type.lower() == 'gsi':
                                self._wait_for_index_online(bucket, '#primary')
                        except Exception, ex:
                            self.log.info(str(ex))
예제 #52
0
    def test_n1ql_through_rest_with_redaction_enabled(self):
        gen_create = BlobGenerator('logredac',
                                   'logredac-',
                                   self.value_size,
                                   end=self.num_items)
        self._load_all_buckets(self.master, gen_create, "create", 0)
        shell = RemoteMachineShellConnection(self.master)
        type = shell.extract_remote_info().distribution_type
        curl_path = "curl"
        if type.lower() == 'windows':
            self.curl_path = "%scurl" % self.path

        shell.execute_command(
            "%s -u Administrator:password http://%s:%s/query/service -d 'statement=create primary index on default'"
            % (curl_path, self.master.ip, self.n1ql_port))

        shell.execute_command(
            "%s -u Administrator:password http://%s:%s/query/service -d 'statement=create index idx on default(fake)'"
            % (curl_path, self.master.ip, self.n1ql_port))

        shell.execute_command(
            "%s -u Administr:pasword http://%s:%s/query/service -d 'statement=select * from default'"
            % (curl_path, self.master.ip, self.n1ql_port))

        shell.execute_command(
            "%s http://Administrator:password@%s:%s/query/service -d 'statement=select * from default'"
            % (curl_path, self.master.ip, self.n1ql_port))

        shell.execute_command(
            "%s -u Administrator:password http://%s:%s/query/service -d 'statement=select * from default'"
            % (curl_path, self.master.ip, self.n1ql_port))

        # Get the CAS mismatch error by double inserting a document, second one will throw desired error
        shell.execute_command(
            "%s -u Administrator:password http://%s:%s/query/service -d 'statement=insert into default (KEY,VALUE) VALUES(\"test\",{\"field1\":\"test\"})'"
            % (curl_path, self.master.ip, self.n1ql_port))

        shell.execute_command(
            "%s -u Administrator:password http://%s:%s/query/service -d 'statement=insert into default (KEY,VALUE) VALUES(\"test\",{\"field1\":\"test\"})'"
            % (curl_path, self.master.ip, self.n1ql_port))

        # Delete a document that does not exist
        shell.execute_command(
            "%s -u Administrator:password http://%s:%s/query/service -d 'statement=DELETE FROM default USE KEYS \"fakekey\"})'"
            % (curl_path, self.master.ip, self.n1ql_port))

        #set log redaction level, collect logs, verify log files exist and verify them for redaction
        self.set_redaction_level()
        self.start_logs_collection()
        result = self.monitor_logs_collection()
        logs_path = result["perNode"]["[email protected]"]["path"]
        redactFileName = logs_path.split('/')[-1]
        nonredactFileName = logs_path.split('/')[-1].replace('-redacted', '')
        remotepath = logs_path[0:logs_path.rfind('/') + 1]
        self.verify_log_files_exist(remotepath=remotepath,
                                    redactFileName=redactFileName,
                                    nonredactFileName=nonredactFileName)
        self.verify_log_redaction(remotepath=remotepath,
                                  redactFileName=redactFileName,
                                  nonredactFileName=nonredactFileName,
                                  logFileName="ns_server.query.log")
        shell.disconnect()
예제 #53
0
    def setUp(self):
        super(RackzoneBaseTest, self).setUp()
        self.product = self.input.param("product", "cb")
        self.version = self.input.param("version", "2.5.1-1082")
        self.type = self.input.param('type', 'enterprise')
        self.doc_ops = self.input.param("doc_ops", None)
        if self.doc_ops is not None:
            self.doc_ops = self.doc_ops.split(";")
        self.default_map_func = "function (doc) {\n  emit(doc._id, doc);\n}"

        self.nodes_init = self.input.param("nodes_init", 1)
        self.nodes_in = self.input.param("nodes_in", 1)
        self.nodes_out = self.input.param("nodes_out", 1)
        self.doc_ops = self.input.param("doc_ops", "create")
        nodes_init = self.cluster.servers[1:self.nodes_init] \
            if self.nodes_init != 1 else []
        self.task.rebalance([self.cluster.master], nodes_init, [])
        self.cluster.nodes_in_cluster.append(self.cluster.master)
        self.bucket_util.create_default_bucket()
        self.bucket_util.add_rbac_user()
        # define the data that will be used to test
        self.blob_generator = self.input.param("blob_generator", False)
        server_info = self.servers[0]
        rest = RestConnection(server_info)
        if not rest.is_enterprise_edition():
            raise Exception("This couchbase server is not Enterprise Edition.\
                  This RZA feature requires Enterprise Edition to work")
        if self.blob_generator:
            # gen_load data is used for upload before each test
            self.gen_load = BlobGenerator('test',
                                          'test-',
                                          self.doc_size,
                                          end=self.num_items)
            # gen_update is used for doing mutation for 1/2th of uploaded data
            self.gen_update = BlobGenerator('test',
                                            'test-',
                                            self.doc_size,
                                            end=(self.num_items / 2 - 1))
            # upload data before each test
            tasks = []
            for bucket in self.bucket_util.buckets:
                tasks.append(
                    self.task.async_load_gen_docs(
                        self.cluster,
                        bucket,
                        self.gen_load,
                        "create",
                        0,
                        batch_size=20,
                        persist_to=self.persist_to,
                        replicate_to=self.replicate_to,
                        pause_secs=5,
                        timeout_secs=self.sdk_timeout,
                        retries=self.sdk_retries))
            for task in tasks:
                self.task.jython_task_manager.get_task_result(task)
        else:
            tasks = []
            age = range(5)
            first = ['james', 'sharon']
            template = '{{ "mutated" : 0, "age": {0}, "first_name": "{1}" }}'
            self.gen_load = DocumentGenerator('test_docs',
                                              template,
                                              age,
                                              first,
                                              start=0,
                                              end=self.num_items)
            for bucket in self.bucket_util.buckets:
                tasks.append(
                    self.task.async_load_gen_docs(
                        self.cluster,
                        bucket,
                        self.gen_load,
                        "create",
                        0,
                        batch_size=20,
                        persist_to=self.persist_to,
                        replicate_to=self.replicate_to,
                        pause_secs=5,
                        timeout_secs=self.sdk_timeout,
                        retries=self.sdk_retries))
            for task in tasks:
                self.task.jython_task_manager.get_task_result(task)
        shell = RemoteMachineShellConnection(self.cluster.master)
        s_type = shell.extract_remote_info().distribution_type
        shell.disconnect()
        self.os_name = "linux"
        self.is_linux = True
        self.cbstat_command = "%scbstats" % LINUX_COUCHBASE_BIN_PATH
        if s_type.lower() == 'windows':
            self.is_linux = False
            self.os_name = "windows"
            self.cbstat_command = "%scbstats.exe" % WIN_COUCHBASE_BIN_PATH
        if s_type.lower() == 'mac':
            self.cbstat_command = "%scbstats" % MAC_COUCHBASE_BIN_PATH
        if self.nonroot:
            self.cbstat_command = "/home/%s%scbstats" \
                                  % (self.cluster.master.ssh_username,
                                     LINUX_COUCHBASE_BIN_PATH)
예제 #54
0
 def run_failover_operations_with_ops(self, chosen, failover_reason):
     """ Method to run fail over operations used in the test scenario based on failover reason """
     # Perform Operations relalted to failover
     failed_over = True
     for node in chosen:
         unreachable = False
         if failover_reason == 'stop_server':
             unreachable = True
             self.stop_server(node)
             self.log.info(
                 "10 seconds delay to wait for membase-server to shutdown")
             # wait for 5 minutes until node is down
             self.assertTrue(
                 RestHelper(self.rest).wait_for_node_status(
                     node, "unhealthy", 300),
                 msg=
                 "node status is not unhealthy even after waiting for 5 minutes"
             )
         elif failover_reason == "firewall":
             unreachable = True
             self.filter_list.append(node.ip)
             server = [srv for srv in self.servers if node.ip == srv.ip][0]
             RemoteUtilHelper.enable_firewall(
                 server, bidirectional=self.bidirectional)
             status = RestHelper(self.rest).wait_for_node_status(
                 node, "unhealthy", 300)
             if status:
                 self.log.info(
                     "node {0}:{1} is 'unhealthy' as expected".format(
                         node.ip, node.port))
             else:
                 # verify iptables on the node if something wrong
                 for server in self.servers:
                     if server.ip == node.ip:
                         shell = RemoteMachineShellConnection(server)
                         info = shell.extract_remote_info()
                         if info.type.lower() == "windows":
                             o, r = shell.execute_command(
                                 "netsh advfirewall show allprofiles")
                             shell.log_command_output(o, r)
                         else:
                             o, r = shell.execute_command(
                                 "/sbin/iptables --list")
                             shell.log_command_output(o, r)
                         shell.disconnect()
                 self.rest.print_UI_logs()
                 api = self.rest.baseUrl + 'nodeStatuses'
                 status, content, header = self.rest._http_request(api)
                 json_parsed = json.loads(content)
                 self.log.info("nodeStatuses: {0}".format(json_parsed))
                 self.fail(
                     "node status is not unhealthy even after waiting for 5 minutes"
                 )
     nodes = self.filter_servers(self.servers, chosen)
     failed_over = self.cluster.async_failover([self.master],
                                               failover_nodes=chosen,
                                               graceful=self.graceful)
     # Perform Compaction
     compact_tasks = []
     if self.compact:
         for bucket in self.buckets:
             compact_tasks.append(
                 self.cluster.async_compact_bucket(self.master, bucket))
     # Run View Operations
     if self.withViewsOps:
         self.query_and_monitor_view_tasks(nodes)
     # Run mutation operations
     if self.withMutationOps:
         self.run_mutation_operations()
     failed_over.result()
     for task in compact_tasks:
         task.result()
     msg = "rebalance failed while removing failover nodes {0}".format(
         node.id)
     self.assertTrue(self.rest.monitorRebalance(stop_if_loop=True), msg=msg)
예제 #55
0
class TransferBaseTest(BaseTestCase):
    def setUp(self):
        super(TransferBaseTest, self).setUp()
        self.couchbase_login_info = "%s:%s" % (
            self.input.membase_settings.rest_username,
            self.input.membase_settings.rest_password)
        self.value_size = self.input.param("value_size", 256)
        self.expire_time = self.input.param("expire_time", 60)
        self.item_flag = self.input.param("item_flag", 0)
        self.backup_location = self.input.param("backup_location",
                                                "/tmp/backup")
        self.win_data_location = self.input.param("win_data_location",
                                                  "/tmp/data")
        self.server_origin = self.servers[0]
        self.server_recovery = self.servers[1]
        self.doc_ops = self.input.param("doc_ops", None)
        if self.doc_ops is not None:
            self.doc_ops = self.doc_ops.split(";")
        self.shell = RemoteMachineShellConnection(self.server_origin)
        info = self.shell.extract_remote_info()
        self.os = info.type.lower()

    def tearDown(self):
        super(TransferBaseTest, self).tearDown()

    def load_data(self):
        gen_load = BlobGenerator('nosql',
                                 'nosql-',
                                 self.value_size,
                                 end=self.num_items)
        gen_update = BlobGenerator('nosql',
                                   'nosql-',
                                   self.value_size,
                                   end=(self.num_items / 2 - 1))
        gen_expire = BlobGenerator('nosql',
                                   'nosql-',
                                   self.value_size,
                                   start=self.num_items / 2,
                                   end=(self.num_items * 3 / 4 - 1))
        gen_delete = BlobGenerator('nosql',
                                   'nosql-',
                                   self.value_size,
                                   start=self.num_items * 3 / 4,
                                   end=self.num_items)
        self._load_all_buckets(self.server_origin,
                               gen_load,
                               "create",
                               0,
                               1,
                               self.item_flag,
                               True,
                               batch_size=20000,
                               pause_secs=5,
                               timeout_secs=180)

        if (self.doc_ops is not None):
            if ("update" in self.doc_ops):
                self._load_all_buckets(self.server_origin,
                                       gen_update,
                                       "update",
                                       0,
                                       1,
                                       self.item_flag,
                                       True,
                                       batch_size=20000,
                                       pause_secs=5,
                                       timeout_secs=180)
            if ("delete" in self.doc_ops):
                self._load_all_buckets(self.server_origin,
                                       gen_delete,
                                       "delete",
                                       0,
                                       1,
                                       self.item_flag,
                                       True,
                                       batch_size=20000,
                                       pause_secs=5,
                                       timeout_secs=180)
            if ("expire" in self.doc_ops):
                self._load_all_buckets(self.server_origin,
                                       gen_expire,
                                       "update",
                                       self.expire_time,
                                       1,
                                       self.item_flag,
                                       True,
                                       batch_size=20000,
                                       pause_secs=5,
                                       timeout_secs=180)
        self._wait_for_stats_all_buckets([self.server_origin])
        time.sleep(30)
예제 #56
0
class QueryTests(BaseTestCase):
    def setUp(self):
        if not self._testMethodName == 'suite_setUp':
            self.skip_buckets_handle = True
        super(QueryTests, self).setUp()
        self.version = self.input.param("cbq_version", "git_repo")
        if self.input.tuq_client and "client" in self.input.tuq_client:
            self.shell = RemoteMachineShellConnection(
                self.input.tuq_client["client"])
        else:
            self.shell = RemoteMachineShellConnection(self.master)
        if not self._testMethodName == 'suite_setUp':
            self._start_command_line_query(self.master)
        self.use_rest = self.input.param("use_rest", True)
        self.max_verify = self.input.param("max_verify", None)
        self.buckets = RestConnection(self.master).get_buckets()
        self.docs_per_day = self.input.param("doc-per-day", 49)
        self.item_flag = self.input.param("item_flag", 4042322160)
        self.n1ql_port = self.input.param("n1ql_port", 8093)
        self.dataset = self.input.param("dataset", "default")
        self.gens_load = self.generate_docs(self.docs_per_day)
        if self.input.param("gomaxprocs", None):
            self.configure_gomaxprocs()
        self.gen_results = TuqGenerators(
            self.log, self.generate_full_docs_list(self.gens_load))
        # temporary for MB-12848
        self.create_primary_index_for_3_0_and_greater()

    def suite_setUp(self):
        try:
            self.load(self.gens_load, flag=self.item_flag)
            self.create_primary_index_for_3_0_and_greater()
            if not self.input.param("skip_build_tuq", True):
                self._build_tuq(self.master)
            self.skip_buckets_handle = True
        except:
            self.log.error('SUITE SETUP FAILED')
            self.tearDown()

    def tearDown(self):
        if self._testMethodName == 'suite_tearDown':
            self.skip_buckets_handle = False
        super(QueryTests, self).tearDown()

    def suite_tearDown(self):
        if not self.input.param("skip_build_tuq", False):
            if hasattr(self, 'shell'):
                self.shell.execute_command("killall /tmp/tuq/cbq-engine")
                self.shell.execute_command("killall tuqtng")
                self.shell.disconnect()

##############################################################################################
#
#   SIMPLE CHECKS
##############################################################################################

    def test_simple_check(self):
        for bucket in self.buckets:
            query_template = 'FROM %s select $str0, $str1 ORDER BY $str0,$str1 ASC' % (
                bucket.name)
            actual_result, expected_result = self.run_query_from_template(
                query_template)
            self._verify_results(actual_result['results'], expected_result)

    def test_simple_negative_check(self):
        queries_errors = {
            'SELECT $str0 FROM {0} WHERE COUNT({0}.$str0)>3':
            'Aggregates not allowed in WHERE',
            'SELECT *.$str0 FROM {0}': 'syntax error',
            'SELECT *.* FROM {0} ... ERROR': 'syntax error',
            'FROM %s SELECT $str0 WHERE id=null': 'syntax error',
        }
        self.negative_common_body(queries_errors)

    def test_consistent_simple_check(self):
        queries = [self.gen_results.generate_query('SELECT $str0, $int0, $int1 FROM %s ' +\
                    'WHERE $str0 IS NOT NULL AND $int0<10 ' +\
                    'OR $int1 = 6 ORDER BY $int0, $int1'),
                   self.gen_results.generate_query('SELECT $str0, $int0, $int1 FROM %s ' +\
                    'WHERE $int1 = 6 OR $str0 IS NOT NULL AND ' +\
                    '$int0<10 ORDER BY $int0, $int1')]
        for bucket in self.buckets:
            actual_result1 = self.run_cbq_query(queries[0] % bucket.name)
            actual_result2 = self.run_cbq_query(queries[1] % bucket.name)
            self.assertTrue(
                actual_result1['results'] == actual_result2['results'],
                "Results are inconsistent.Difference: %s %s %s %s" %
                (len(actual_result1['results']), len(
                    actual_result2['results']),
                 actual_result1['results'][:100],
                 actual_result2['results'][:100]))

    def test_simple_nulls(self):
        queries = ['SELECT id FROM %s WHERE id=NULL or id="null"']
        for bucket in self.buckets:
            for query in queries:
                actual_result = self.run_cbq_query(query % (bucket.name))
                self._verify_results(actual_result['results'], [])

##############################################################################################
#
#   LIMIT OFFSET CHECKS
##############################################################################################

    def test_limit_offset(self):
        for bucket in self.buckets:
            query_template = 'SELECT DISTINCT $str0 FROM %s ORDER BY $str0 LIMIT 10' % (
                bucket.name)
            actual_result, expected_result = self.run_query_from_template(
                query_template)
            self._verify_results(actual_result['results'], expected_result)
            query_template = 'SELECT DISTINCT $str0 FROM %s ORDER BY $str0 LIMIT 10 OFFSET 10' % (
                bucket.name)
            actual_result, expected_result = self.run_query_from_template(
                query_template)

    def test_limit_offset_zero(self):
        for bucket in self.buckets:
            query_template = 'SELECT DISTINCT $str0 FROM %s ORDER BY $str0 LIMIT 0' % (
                bucket.name)
            self.query = self.gen_results.generate_query(query_template)
            actual_result = self.run_cbq_query()
            self.assertEquals(
                actual_result['results'], [],
                "Results are incorrect.Actual %s.\n Expected: %s.\n" %
                (actual_result['results'], []))
            query_template = 'SELECT DISTINCT $str0 FROM %s ORDER BY $str0 LIMIT 10 OFFSET 0' % (
                bucket.name)
            actual_result, expected_result = self.run_query_from_template(
                query_template)
            self.assertEquals(
                actual_result['results'], expected_result,
                "Results are incorrect.Actual %s.\n Expected: %s.\n" %
                (actual_result['results'], expected_result))

    def test_limit_offset_negative_check(self):
        queries_errors = {
            'SELECT DISTINCT $str0 FROM {0} LIMIT -1':
            'Parse Error - syntax error',
            'SELECT DISTINCT $str0 FROM {0} LIMIT 1.1':
            'Parse Error - syntax error',
            'SELECT DISTINCT $str0 FROM {0} OFFSET -1':
            'Parse Error - syntax error',
            'SELECT DISTINCT $str0 FROM {0} OFFSET 1.1':
            'Parse Error - syntax error'
        }
        self.negative_common_body(queries_errors)

##############################################################################################
#
#   ALIAS CHECKS
##############################################################################################

    def test_simple_alias(self):
        for bucket in self.buckets:
            query_template = 'SELECT COUNT($str0) AS COUNT_EMPLOYEE FROM %s' % (
                bucket.name)
            actual_result, expected_result = self.run_query_from_template(
                query_template)
            self.assertEquals(
                actual_result['results'], expected_result,
                "Results are incorrect.Actual %s.\n Expected: %s.\n" %
                (actual_result['results'], expected_result))

            query_template = 'SELECT COUNT(*) + 1 AS COUNT_EMPLOYEE FROM %s' % (
                bucket.name)
            actual_result, expected_result = self.run_query_from_template(
                query_template)
            expected_result = [{
                "COUNT_EMPLOYEE":
                expected_result[0]['COUNT_EMPLOYEE'] + 1
            }]
            self.assertEquals(
                actual_result['results'], expected_result,
                "Results are incorrect.Actual %s.\n Expected: %s.\n" %
                (actual_result['results'], expected_result))

    def test_simple_negative_alias(self):
        queries_errors = {
            'SELECT $str0._last_name as *':
            'syntax error',
            'SELECT $str0._last_name as DATABASE ?':
            'syntax error',
            'SELECT $str0 AS NULL FROM {0}':
            'syntax error',
            'SELECT $str1 as $str0, $str0 FROM {0}':
            'Duplicate result alias name',
            'SELECT test.$obj0 as points FROM {0} AS TEST ' + 'GROUP BY $obj0 AS GROUP_POINT':
            'syntax error'
        }
        self.negative_common_body(queries_errors)

    def test_alias_from_clause(self):
        queries_templates = ['SELECT $obj0.$_obj0_int0 AS points FROM %s AS test ORDER BY points',
                   'SELECT $obj0.$_obj0_int0 AS points FROM %s AS test WHERE test.$int0 >0'  +\
                   ' ORDER BY points',
                   'SELECT $obj0.$_obj0_int0 AS points FROM %s AS test ' +\
                   'GROUP BY test.$obj0.$_obj0_int0 ORDER BY points']
        for bucket in self.buckets:
            for query_template in queries_templates:
                actual_result, expected_result = self.run_query_from_template(
                    query_template % (bucket.name))
                self._verify_results(actual_result['results'], expected_result)

    def test_alias_from_clause_group(self):
        for bucket in self.buckets:
            query_template = 'SELECT $obj0.$_obj0_int0 AS points FROM %s AS test ' %(bucket.name) +\
                         'GROUP BY $obj0.$_obj0_int0 ORDER BY points'
            actual_result, expected_result = self.run_query_from_template(
                query_template)
            self._verify_results(actual_result['results'], expected_result)

    def test_alias_order_desc(self):
        for bucket in self.buckets:
            query_template = 'SELECT $str0 AS name_new FROM %s AS test ORDER BY name_new DESC' % (
                bucket.name)
            actual_result, expected_result = self.run_query_from_template(
                query_template)
            self._verify_results(actual_result['results'], expected_result)

    def test_alias_order_asc(self):
        for bucket in self.buckets:
            query_template = 'SELECT $str0 AS name_new FROM %s AS test ORDER BY name_new ASC' % (
                bucket.name)
            actual_result, expected_result = self.run_query_from_template(
                query_template)
            self._verify_results(actual_result['results'], expected_result)

    def test_alias_aggr_fn(self):
        for bucket in self.buckets:
            query_template = 'SELECT COUNT(TEST.$str0) from %s AS TEST' % (
                bucket.name)
            actual_result, expected_result = self.run_query_from_template(
                query_template)
            self._verify_results(actual_result['results'], expected_result)

    def test_alias_unnest(self):
        for bucket in self.buckets:
            query_template = 'SELECT count(skill) FROM %s AS emp UNNEST emp.$list_str0 AS skill' % (
                bucket.name)
            actual_result, expected_result = self.run_query_from_template(
                query_template)
            self._verify_results(actual_result['results'], expected_result)

            query_template = 'SELECT count(skill) FROM %s AS emp UNNEST emp.$list_str0 skill' % (
                bucket.name)
            actual_result, expected_result = self.run_query_from_template(
                query_template)
            self._verify_results(actual_result['results'], expected_result)

##############################################################################################
#
#   ORDER BY CHECKS
##############################################################################################

    def test_order_by_check(self):
        for bucket in self.buckets:
            query_template = 'SELECT $str0, $str1, $obj0.$_obj0_int0 points FROM %s'  % (bucket.name) +\
            ' ORDER BY $str1, $str0, $obj0.$_obj0_int0'
            actual_result, expected_result = self.run_query_from_template(
                query_template)
            self._verify_results(actual_result['results'], expected_result)
            query_template = 'SELECT $str0, $str1 FROM %s'  % (bucket.name) +\
            ' ORDER BY $obj0.$_obj0_int0, $str0, $str1'
            actual_result, expected_result = self.run_query_from_template(
                query_template)
            self._verify_results(actual_result['results'], expected_result)

    def test_order_by_alias(self):
        for bucket in self.buckets:
            query_template = 'SELECT $str1, $obj0 AS points FROM %s'  % (bucket.name) +\
            ' AS test ORDER BY $str1 DESC, points DESC'
            actual_result, expected_result = self.run_query_from_template(
                query_template)
            self._verify_results(actual_result['results'], expected_result)

    def test_order_by_alias_arrays(self):
        for bucket in self.buckets:
            query_template = 'SELECT $str1, $obj0, $list_str0[0] AS SKILL FROM %s'  % (
                                                                            bucket.name) +\
            ' AS TEST ORDER BY SKILL, $str1, TEST.$obj0'
            actual_result, expected_result = self.run_query_from_template(
                query_template)
            self._verify_results(actual_result['results'], expected_result)

    def test_order_by_alias_aggr_fn(self):
        for bucket in self.buckets:
            query_template = 'SELECT $int0, $int1, count(*) AS emp_per_month from %s'% (
                                                                            bucket.name) +\
            ' WHERE $int1 >7 GROUP BY $int0, $int1 ORDER BY emp_per_month, $int1, $int0'
            actual_result, expected_result = self.run_query_from_template(
                query_template)
            self._verify_results(actual_result['results'], expected_result)

    def test_order_by_aggr_fn(self):
        for bucket in self.buckets:
            query_template = 'SELECT $str1 AS TITLE FROM %s GROUP'  % (bucket.name) +\
            ' BY $str1 ORDER BY MIN($int1), $str1'
            actual_result, expected_result = self.run_query_from_template(
                query_template)
            self._verify_results(actual_result['results'], expected_result)

    def test_order_by_precedence(self):
        for bucket in self.buckets:
            query_template = 'SELECT $str0, $str1 FROM %s'  % (bucket.name) +\
            ' ORDER BY $str0, $str1'
            actual_result, expected_result = self.run_query_from_template(
                query_template)
            self._verify_results(actual_result['results'], expected_result)

            query_template = 'SELECT $str0, $str1 FROM %s'  % (bucket.name) +\
            ' ORDER BY $str1, $str0'
            actual_result, expected_result = self.run_query_from_template(
                query_template)
            self._verify_results(actual_result['results'], expected_result)

    def test_order_by_satisfy(self):
        for bucket in self.buckets:
            query_template = 'SELECT $str0, $list_obj0 FROM %s AS employee ' % (bucket.name) +\
                        'WHERE ANY vm IN employee.$list_obj0 SATISFIES vm.$_list_obj0_int0 > 5 AND' +\
                        ' vm.$_list_obj0_str0 = "ubuntu" END ORDER BY $str0, $list_obj0[0].$_list_obj0_int0'
            actual_result, expected_result = self.run_query_from_template(
                query_template)
            self._verify_results(actual_result['results'], expected_result)

##############################################################################################
#
#   DISTINCT
##############################################################################################

    def test_distinct(self):
        for bucket in self.buckets:
            query_template = 'SELECT DISTINCT $str1 FROM %s ORDER BY $str1' % (
                bucket.name)
            actual_result, expected_result = self.run_query_from_template(
                query_template)
            self._verify_results(actual_result['results'], expected_result)

    def test_distinct_nested(self):
        for bucket in self.buckets:
            query_template = 'SELECT DISTINCT $obj0.$_obj0_int0 as VAR FROM %s '  % (bucket.name) +\
                         'ORDER BY $obj0.$_obj0_int0'
            actual_result, expected_result = self.run_query_from_template(
                query_template)
            self._verify_results(actual_result['results'], expected_result)

            query_template = 'SELECT DISTINCT $list_str0[0] as skill' +\
                         ' FROM %s ORDER BY $list_str0[0]'  % (bucket.name)
            actual_result, expected_result = self.run_query_from_template(
                query_template)
            self._verify_results(actual_result['results'], expected_result)

            self.query = 'SELECT DISTINCT $obj0.* FROM %s' % (bucket.name)
            actual_result, expected_result = self.run_query_from_template(
                query_template)
            self._verify_results(actual_result['results'], expected_result)

##############################################################################################
#
#   COMPLEX PATHS
##############################################################################################

    def test_simple_complex_paths(self):
        for bucket in self.buckets:
            query_template = 'SELECT $_obj0_int0 FROM %s.$obj0' % (bucket.name)
            actual_result, expected_result = self.run_query_from_template(
                query_template)
            self._verify_results(actual_result['results'], expected_result)

    def test_alias_complex_paths(self):
        for bucket in self.buckets:
            query_template = 'SELECT $_obj0_int0 as new_attribute FROM %s.$obj0' % (
                bucket.name)
            actual_result, expected_result = self.run_query_from_template(
                query_template)
            self._verify_results(actual_result['results'], expected_result)

    def test_where_complex_paths(self):
        for bucket in self.buckets:
            query_template = 'SELECT $_obj0_int0 FROM %s.$obj0 WHERE $_obj0_int0 = 1' % (
                bucket.name)
            actual_result, expected_result = self.run_query_from_template(
                query_template)
            self._verify_results(actual_result['results'], expected_result)

##############################################################################################
#
#   COMMON FUNCTIONS
##############################################################################################

    def run_query_from_template(self, query_template):
        self.query = self.gen_results.generate_query(query_template)
        expected_result = self.gen_results.generate_expected_result()
        actual_result = self.run_cbq_query()
        return actual_result, expected_result

    def negative_common_body(self, queries_errors={}):
        if not queries_errors:
            self.fail("No queries to run!")
        for bucket in self.buckets:
            for query_template, error in queries_errors.iteritems():
                try:
                    query = self.gen_results.generate_query(query_template)
                    actual_result = self.run_cbq_query(
                        query.format(bucket.name))
                except CBQError as ex:
                    self.log.error(ex)
                    self.assertTrue(
                        str(ex).find(error) != -1,
                        "Error is incorrect.Actual %s.\n Expected: %s.\n" %
                        (str(ex).split(':')[-1], error))
                else:
                    self.fail("There was no errors. Error expected: %s" %
                              error)

    def run_cbq_query(self, query=None, min_output_size=10, server=None):
        if query is None:
            query = self.query
        if server is None:
            server = self.master
            if server.ip == "127.0.0.1":
                self.n1ql_port = server.n1ql_port
        else:
            if server.ip == "127.0.0.1":
                self.n1ql_port = server.n1ql_port
            if self.input.tuq_client and "client" in self.input.tuq_client:
                server = self.tuq_client
        if self.n1ql_port == None or self.n1ql_port == '':
            self.n1ql_port = self.input.param("n1ql_port", 8093)
            if not self.n1ql_port:
                self.log.info(
                    " n1ql_port is not defined, processing will not proceed further"
                )
                raise Exception(
                    "n1ql_port is not defined, processing will not proceed further"
                )
        if self.use_rest:
            result = RestConnection(server).query_tool(query, self.n1ql_port)
        else:
            if self.version == "git_repo":
                output = self.shell.execute_commands_inside("$GOPATH/src/github.com/couchbaselabs/tuqtng/" +\
                                                            "tuq_client/tuq_client " +\
                                                            "-engine=http://%s:8093/" % server.ip,
                                                       subcommands=[query,],
                                                       min_output_size=20,
                                                       end_msg='tuq_client>')
            else:
                output = self.shell.execute_commands_inside(
                    "/tmp/tuq/cbq -engine=http://%s:8093/" % server.ip,
                    subcommands=[
                        query,
                    ],
                    min_output_size=20,
                    end_msg='cbq>')
            result = self._parse_query_output(output)
        if isinstance(result, str) or 'errors' in result:
            raise CBQError(result, server.ip)
        self.log.info("TOTAL ELAPSED TIME: %s" %
                      result["metrics"]["elapsedTime"])
        return result

    def build_url(self, version):
        info = self.shell.extract_remote_info()
        type = info.distribution_type.lower()
        if type in ["ubuntu", "centos", "red hat"]:
            url = "https://s3.amazonaws.com/packages.couchbase.com/releases/couchbase-query/dp1/"
            url += "couchbase-query_%s_%s_linux.tar.gz" % (
                version, info.architecture_type)
        #TODO for windows
        return url

    def _build_tuq(self, server):
        if self.version == "git_repo":
            os = self.shell.extract_remote_info().type.lower()
            if os != 'windows':
                goroot = testconstants.LINUX_GOROOT
                gopath = testconstants.LINUX_GOPATH
            else:
                goroot = testconstants.WINDOWS_GOROOT
                gopath = testconstants.WINDOWS_GOPATH
            if self.input.tuq_client and "gopath" in self.input.tuq_client:
                gopath = self.input.tuq_client["gopath"]
            if self.input.tuq_client and "goroot" in self.input.tuq_client:
                goroot = self.input.tuq_client["goroot"]
            cmd = "rm -rf {0}/src/github.com".format(gopath)
            self.shell.execute_command(cmd)
            cmd= 'export GOROOT={0} && export GOPATH={1} &&'.format(goroot, gopath) +\
                ' export PATH=$PATH:$GOROOT/bin && ' +\
                'go get github.com/couchbaselabs/tuqtng;' +\
                'cd $GOPATH/src/github.com/couchbaselabs/tuqtng; ' +\
                'go get -d -v ./...; cd .'
            self.shell.execute_command(cmd)
            cmd = 'export GOROOT={0} && export GOPATH={1} &&'.format(goroot, gopath) +\
                ' export PATH=$PATH:$GOROOT/bin && ' +\
                'cd $GOPATH/src/github.com/couchbaselabs/tuqtng; go build; cd .'
            self.shell.execute_command(cmd)
            cmd = 'export GOROOT={0} && export GOPATH={1} &&'.format(goroot, gopath) +\
                ' export PATH=$PATH:$GOROOT/bin && ' +\
                'cd $GOPATH/src/github.com/couchbaselabs/tuqtng/tuq_client; go build; cd .'
            self.shell.execute_command(cmd)
        else:
            cbq_url = self.build_url(self.version)
            #TODO for windows
            cmd = "cd /tmp; mkdir tuq;cd tuq; wget {0} -O tuq.tar.gz;".format(
                cbq_url)
            cmd += "tar -xvf tuq.tar.gz;rm -rf tuq.tar.gz"
            self.shell.execute_command(cmd)

    def _start_command_line_query(self, server):
        self.shell.execute_command(
            "export NS_SERVER_CBAUTH_URL=\"http://{0}:{1}/_cbauth\"".format(
                server.ip, server.port))
        self.shell.execute_command(
            "export NS_SERVER_CBAUTH_USER=\"{0}\"".format(
                server.rest_username))
        self.shell.execute_command(
            "export NS_SERVER_CBAUTH_PWD=\"{0}\"".format(server.rest_password))
        self.shell.execute_command(
            "export NS_SERVER_CBAUTH_RPC_URL=\"http://{0}:{1}/cbauth-demo\"".
            format(server.ip, server.port))
        if self.version == "git_repo":
            os = self.shell.extract_remote_info().type.lower()
            if os != 'windows':
                gopath = testconstants.LINUX_GOPATH
            else:
                gopath = testconstants.WINDOWS_GOPATH
            if self.input.tuq_client and "gopath" in self.input.tuq_client:
                gopath = self.input.tuq_client["gopath"]
            if os == 'windows':
                cmd = "cd %s/src/github.com/couchbaselabs/query/server/main; " % (gopath) +\
                "./cbq-engine.exe -datastore http://%s:%s/ >/dev/null 2>&1 &" %(
                                                                server.ip, server.port)
            else:
                cmd = "cd %s/src/github.com/couchbaselabs/query//server/main; " % (gopath) +\
                "./cbq-engine -datastore http://%s:%s/ >n1ql.log 2>&1 &" %(
                                                                server.ip, server.port)
            self.shell.execute_command(cmd)
        elif self.version == "sherlock":
            os = self.shell.extract_remote_info().type.lower()
            if os != 'windows':
                couchbase_path = testconstants.LINUX_COUCHBASE_BIN_PATH
            else:
                couchbase_path = testconstants.WIN_COUCHBASE_BIN_PATH
            if self.input.tuq_client and "sherlock_path" in self.input.tuq_client:
                couchbase_path = "%s/bin" % self.input.tuq_client[
                    "sherlock_path"]
                print "PATH TO SHERLOCK: %s" % couchbase_path
            if os == 'windows':
                cmd = "cd %s; " % (couchbase_path) +\
                "./cbq-engine.exe -datastore http://%s:%s/ >/dev/null 2>&1 &" %(
                                                                server.ip, server.port)
            else:
                cmd = "cd %s; " % (couchbase_path) +\
                "./cbq-engine -datastore http://%s:%s/ >n1ql.log 2>&1 &" %(
                                                                server.ip, server.port)
                n1ql_port = self.input.param("n1ql_port", None)
                if server.ip == "127.0.0.1" and server.n1ql_port:
                    n1ql_port = server.n1ql_port
                if n1ql_port:
                    cmd = "cd %s; " % (couchbase_path) +\
                './cbq-engine -datastore http://%s:%s/ -http=":%s">n1ql.log 2>&1 &' %(
                                                                server.ip, server.port, n1ql_port)
            self.shell.execute_command(cmd)
        else:
            os = self.shell.extract_remote_info().type.lower()
            if os != 'windows':
                cmd = "cd /tmp/tuq;./cbq-engine -couchbase http://%s:%s/ >/dev/null 2>&1 &" % (
                    server.ip, server.port)
            else:
                cmd = "cd /cygdrive/c/tuq;./cbq-engine.exe -couchbase http://%s:%s/ >/dev/null 2>&1 &" % (
                    server.ip, server.port)
            self.shell.execute_command(cmd)

    def _parse_query_output(self, output):
        if output.find("cbq>") == 0:
            output = output[output.find("cbq>") + 4:].strip()
        if output.find("tuq_client>") == 0:
            output = output[output.find("tuq_client>") + 11:].strip()
        if output.find("cbq>") != -1:
            output = output[:output.find("cbq>")].strip()
        if output.find("tuq_client>") != -1:
            output = output[:output.find("tuq_client>")].strip()
        return json.loads(output)

    def generate_docs(self, num_items, start=0):
        try:
            return getattr(self, 'generate_docs_' + self.dataset)(num_items,
                                                                  start)
        except:
            self.fail("There is no dataset %s, please enter a valid one" %
                      self.dataset)

    def generate_docs_default(self, docs_per_day, start=0):
        json_generator = JsonGenerator()
        return json_generator.generate_docs_employee(docs_per_day, start)

    def generate_docs_sabre(self, docs_per_day, start=0):
        json_generator = JsonGenerator()
        return json_generator.generate_docs_sabre(docs_per_day, start)

    def generate_docs_employee(self, docs_per_day, start=0):
        json_generator = JsonGenerator()
        return json_generator.generate_docs_employee_data(
            docs_per_day=docs_per_day, start=start)

    def generate_docs_simple(self, docs_per_day, start=0):
        json_generator = JsonGenerator()
        return json_generator.generate_docs_employee_simple_data(
            docs_per_day=docs_per_day, start=start)

    def generate_docs_sales(self, docs_per_day, start=0):
        json_generator = JsonGenerator()
        return json_generator.generate_docs_employee_sales_data(
            docs_per_day=docs_per_day, start=start)

    def generate_docs_bigdata(self, docs_per_day, start=0):
        json_generator = JsonGenerator()
        return json_generator.generate_docs_bigdata(docs_per_day=docs_per_day *
                                                    1000,
                                                    start=start,
                                                    value_size=self.value_size)

    def _verify_results(self,
                        actual_result,
                        expected_result,
                        missing_count=1,
                        extra_count=1):
        if len(actual_result) != len(expected_result):
            missing, extra = self.check_missing_and_extra(
                actual_result, expected_result)
            self.log.error("Missing items: %s.\n Extra items: %s" %
                           (missing[:missing_count], extra[:extra_count]))
            self.fail(
                "Results are incorrect.Actual num %s. Expected num: %s.\n" %
                (len(actual_result), len(expected_result)))
        if self.max_verify is not None:
            actual_result = actual_result[:self.max_verify]
            expected_result = expected_result[:self.max_verify]

        msg = "Results are incorrect.\n Actual first and last 100:  %s.\n ... \n %s" +\
        "Expected first and last 100: %s.\n  ... \n %s"
        self.assertTrue(
            actual_result == expected_result,
            msg % (actual_result[:100], actual_result[-100:],
                   expected_result[:100], expected_result[-100:]))

    def check_missing_and_extra(self, actual, expected):
        missing = []
        extra = []
        for item in actual:
            if not (item in expected):
                extra.append(item)
        for item in expected:
            if not (item in actual):
                missing.append(item)
        return missing, extra

    def sort_nested_list(self, result):
        actual_result = []
        for item in result:
            curr_item = {}
            for key, value in item.iteritems():
                if isinstance(value, list) or isinstance(value, set):
                    curr_item[key] = sorted(value)
                else:
                    curr_item[key] = value
            actual_result.append(curr_item)
        return actual_result

    def configure_gomaxprocs(self):
        max_proc = self.input.param("gomaxprocs", None)
        cmd = "export GOMAXPROCS=%s" % max_proc
        for server in self.servers:
            shell_connection = RemoteMachineShellConnection(self.master)
            shell_connection.execute_command(cmd)

    def create_primary_index_for_3_0_and_greater(self):
        self.log.info("CHECK FOR PRIMARY INDEXES")
        rest = RestConnection(self.master)
        versions = rest.get_nodes_versions()
        ddoc_name = 'ddl_#primary'
        if versions[0].startswith("3"):
            try:
                rest.get_ddoc(self.buckets[0], ddoc_name)
            except ReadDocumentException:
                for bucket in self.buckets:
                    self.log.info("Creating primary index for %s ..." %
                                  bucket.name)
                    self.query = "CREATE PRIMARY INDEX ON %s " % (bucket.name)
                    try:
                        self.run_cbq_query()
                    except Exception, ex:
                        self.log.error('ERROR during index creation %s' %
                                       str(ex))
예제 #57
0
class bidirectional(XDCRNewBaseTest):
    def setUp(self):
        super(bidirectional, self).setUp()
        self.src_cluster = self.get_cb_cluster_by_name('C1')
        self.src_master = self.src_cluster.get_master_node()
        self.dest_cluster = self.get_cb_cluster_by_name('C2')
        self.dest_master = self.dest_cluster.get_master_node()

    def tearDown(self):
        super(bidirectional, self).tearDown()

    def __perform_ops_joint_sets(self):
        # Merging the keys as keys are actually replicated.
        temp_expires = self._expires
        self._expires = 0  # Assigning it to 0, so that merge_buckets don't wait for expiration here.
        self.merge_all_buckets()

        tasks = []
        kv_gen_src = self.src_cluster.get_kv_gen()[OPS.CREATE]
        gen_update = BlobGenerator(kv_gen_src.name,
                                   kv_gen_src.seed,
                                   kv_gen_src.value_size,
                                   start=0,
                                   end=int(kv_gen_src.end *
                                           (float)(self._perc_upd) / 100))
        gen_delete = BlobGenerator(
            kv_gen_src.name,
            kv_gen_src.seed,
            kv_gen_src.value_size,
            start=int((kv_gen_src.end) * (float)(100 - self._perc_del) / 100),
            end=kv_gen_src.end)
        if "C1" in self._upd_clusters:
            tasks += self.src_cluster.async_load_all_buckets_from_generator(
                gen_update, OPS.UPDATE, self._expires)
        if "C2" in self._upd_clusters:
            tasks += self.dest_cluster.async_load_all_buckets_from_generator(
                gen_update, OPS.UPDATE, self._expires)
        if "C1" in self._del_clusters:
            tasks += self.src_cluster.async_load_all_buckets_from_generator(
                gen_delete, OPS.DELETE, 0)
        if "C2" in self._del_clusters:
            tasks += self.dest_cluster.async_load_all_buckets_from_generator(
                gen_delete, OPS.DELETE, 0)

        for task in tasks:
            task.result()

        self._expires = temp_expires
        if (self._wait_for_expiration
                and self._expires) and ("C1" in self._upd_clusters
                                        or "C2" in self._upd_clusters):
            self.sleep(self._expires)

        self.sleep(self._wait_timeout)

    """Bidirectional replication between two clusters(currently), create-updates-deletes on DISJOINT sets on same bucket."""

    def load_with_ops(self):
        self.setup_xdcr_and_load()
        self.perform_update_delete()
        self.verify_results()

    """Bidirectional replication between two clusters(currently), create-updates-deletes on DISJOINT sets on same bucket.
    Here running incremental load on both cluster1 and cluster2 as specified by the user/conf file"""

    def load_with_async_ops(self):
        self.setup_xdcr_and_load()
        self.async_perform_update_delete()
        self.verify_results()

    """Testing Bidirectional load( Loading at source/destination). Failover node at Source/Destination while
    Create/Update/Delete are performed in parallel based on doc-ops specified by the user.
    Verifying whether XDCR replication is successful on subsequent destination clusters. """

    def load_with_async_ops_and_joint_sets(self):
        self.setup_xdcr_and_load()
        self.async_perform_update_delete()
        self.verify_results()

    def load_with_async_ops_with_warmup(self):
        self.setup_xdcr_and_load()
        warmupnodes = []
        if "C1" in self._warmup:
            warmupnodes.append(self.src_cluster.warmup_node())
        if "C2" in self._warmup:
            warmupnodes.append(self.dest_cluster.warmup_node())

        self.sleep(self._wait_timeout)
        NodeHelper.wait_warmup_completed(warmupnodes)
        self.async_perform_update_delete()
        self.sleep(self._wait_timeout / 2)
        self.verify_results()

    def load_with_async_ops_with_warmup_master(self):
        self.setup_xdcr_and_load()
        warmupnodes = []
        if "C1" in self._warmup:
            warmupnodes.append(self.src_cluster.warmup_node(master=True))
        if "C2" in self._warmup:
            warmupnodes.append(self.dest_cluster.warmup_node(master=True))

        self.sleep(self._wait_timeout)
        NodeHelper.wait_warmup_completed(warmupnodes)
        self.async_perform_update_delete()
        self.sleep(self._wait_timeout / 2)
        self.verify_results()

    def load_with_async_ops_and_joint_sets_with_warmup(self):
        bucket_type = self._input.param("bucket_type", "membase")
        if bucket_type == "ephemeral":
            "Test case does not apply for Ephemeral buckets"
            return
        self.setup_xdcr_and_load()
        warmupnodes = []
        if "C1" in self._warmup:
            warmupnodes.append(self.src_cluster.warmup_node())
        if "C2" in self._warmup:
            warmupnodes.append(self.dest_cluster.warmup_node())

        self.sleep(self._wait_timeout)
        self.async_perform_update_delete()
        self.sleep(self._wait_timeout / 2)

        NodeHelper.wait_warmup_completed(warmupnodes)

        self.verify_results()

    def load_with_async_ops_and_joint_sets_with_warmup_master(self):
        self.setup_xdcr_and_load()
        warmupnodes = []
        if "C1" in self._warmup:
            warmupnodes.append(self.src_cluster.warmup_node(master=True))
        if "C2" in self._warmup:
            warmupnodes.append(self.dest_cluster.warmup_node(master=True))

        self.sleep(self._wait_timeout)
        self.async_perform_update_delete()
        self.sleep(self._wait_timeout / 2)

        NodeHelper.wait_warmup_completed(warmupnodes)

        self.verify_results()

    def load_with_failover(self):
        self.setup_xdcr_and_load()

        if "C1" in self._failover:
            self.src_cluster.failover_and_rebalance_nodes()
        if "C2" in self._failover:
            self.dest_cluster.failover_and_rebalance_nodes()

        self.sleep(self._wait_timeout / 6)
        self.perform_update_delete()
        self.sleep(300)

        self.verify_results()

    def load_with_failover_then_add_back(self):

        self.setup_xdcr_and_load()

        if "C1" in self._failover:
            self.src_cluster.failover_and_rebalance_nodes(rebalance=False)
            self.src_cluster.add_back_node()
        if "C2" in self._failover:
            self.dest_cluster.failover_and_rebalance_nodes(rebalance=False)
            self.dest_cluster.add_back_node()

        self.perform_update_delete()

        self.verify_results()

    def load_with_failover_master(self):
        self.setup_xdcr_and_load()

        if "C1" in self._failover:
            self.src_cluster.failover_and_rebalance_master()
        if "C2" in self._failover:
            self.dest_cluster.failover_and_rebalance_master()

        self.sleep(self._wait_timeout / 6)
        self.perform_update_delete()

        self.verify_results()

    """Replication with compaction ddocs and view queries on both clusters.

    This test begins by loading a given number of items on both clusters.
    It creates _num_views as development/production view with default
    map view funcs(_is_dev_ddoc = True by default) on both clusters.
    Then we disabled compaction for ddoc on src cluster. While we don't reach
    expected fragmentation for ddoc on src cluster we update docs and perform
    view queries for all views. Then we start compaction when fragmentation
    was reached fragmentation_value. When compaction was completed we perform
    a full verification: wait for the disk queues to drain
    and then verify that there has been no data loss on both clusters."""

    def replication_with_ddoc_compaction(self):
        bucket_type = self._input.param("bucket_type", "membase")
        if bucket_type == "ephemeral":
            self.log.info("Test case does not apply to ephemeral")
            return

        self.setup_xdcr()

        self.src_cluster.load_all_buckets(self._num_items)
        self.dest_cluster.load_all_buckets(self._num_items)

        num_views = self._input.param("num_views", 5)
        is_dev_ddoc = self._input.param("is_dev_ddoc", True)
        fragmentation_value = self._input.param("fragmentation_value", 80)
        for bucket in self.src_cluster.get_buckets():
            views = Utility.make_default_views(bucket.name, num_views,
                                               is_dev_ddoc)

        ddoc_name = "ddoc1"
        prefix = ("", "dev_")[is_dev_ddoc]

        query = {"full_set": "true", "stale": "false"}

        tasks = self.src_cluster.async_create_views(ddoc_name, views,
                                                    BUCKET_NAME.DEFAULT)
        tasks += self.dest_cluster.async_create_views(ddoc_name, views,
                                                      BUCKET_NAME.DEFAULT)
        for task in tasks:
            task.result(self._poll_timeout)

        self.src_cluster.disable_compaction()
        fragmentation_monitor = self.src_cluster.async_monitor_view_fragmentation(
            prefix + ddoc_name, fragmentation_value, BUCKET_NAME.DEFAULT)
        # generate load until fragmentation reached
        while fragmentation_monitor.state != "FINISHED":
            # update docs to create fragmentation
            self.src_cluster.update_delete_data(OPS.UPDATE)
            for view in views:
                # run queries to create indexes
                self.src_cluster.query_view(prefix + ddoc_name, view.name,
                                            query)
                self.dest_cluster.query_view(prefix + ddoc_name, view.name,
                                             query)
        fragmentation_monitor.result()

        compaction_task = self.src_cluster.async_compact_view(
            prefix + ddoc_name, 'default')

        self.assertTrue(compaction_task.result())

        self.verify_results()

    def replication_with_view_queries_and_ops(self):
        bucket_type = self._input.param("bucket_type", "membase")
        if bucket_type == "ephemeral":
            self.log.info("Test case does not apply to ephemeral")
            return
        tasks = []
        try:
            self.setup_xdcr()

            self.src_cluster.load_all_buckets(self._num_items)
            self.dest_cluster.load_all_buckets(self._num_items)

            num_views = self._input.param("num_views", 5)
            is_dev_ddoc = self._input.param("is_dev_ddoc", True)
            for bucket in self.src_cluster.get_buckets():
                views = Utility.make_default_views(bucket.name, num_views,
                                                   is_dev_ddoc)

            ddoc_name = "ddoc1"
            prefix = ("", "dev_")[is_dev_ddoc]

            query = {
                "full_set": "true",
                "stale": "false",
                "connection_timeout": 60000
            }

            tasks = self.src_cluster.async_create_views(
                ddoc_name, views, BUCKET_NAME.DEFAULT)
            tasks += self.dest_cluster.async_create_views(
                ddoc_name, views, BUCKET_NAME.DEFAULT)

            for task in tasks:
                task.result(self._poll_timeout)

            tasks = []
            # Setting up doc-ops at source nodes
            if "C1" in self._upd_clusters:
                tasks.extend(
                    self.src_cluster.async_update_delete(
                        OPS.UPDATE, self._perc_upd, self._expires))
            if "C1" in self._del_clusters:
                tasks.extend(
                    self.src_cluster.async_update_delete(
                        OPS.DELETE, self._perc_del))
            if "C2" in self._upd_clusters:
                tasks.extend(
                    self.dest_cluster.async_update_delete(
                        OPS.UPDATE, self._perc_upd, self._expires))
            if "C2" in self._del_clusters:
                tasks.extend(
                    self.dest_cluster.async_update_delete(
                        OPS.DELETE, self._perc_del))

            self.sleep(5)
            while True:
                for view in views:
                    self.src_cluster.query_view(prefix + ddoc_name, view.name,
                                                query)
                    self.dest_cluster.query_view(prefix + ddoc_name, view.name,
                                                 query)
                if set([task.state for task in tasks]) != set(["FINISHED"]):
                    continue
                else:
                    if self._wait_for_expiration:
                        if "C1" in self._upd_clusters or "C2" in self._upd_clusters:
                            self.sleep(self._expires)
                    break

            self.merge_all_buckets()
            self.src_cluster.verify_items_count()
            self.dest_cluster.verify_items_count()

            tasks = []
            src_buckets = self.src_cluster.get_buckets()
            dest_buckets = self.dest_cluster.get_buckets()
            for view in views:
                tasks.append(
                    self.src_cluster.async_query_view(
                        prefix + ddoc_name, view.name, query,
                        src_buckets[0].kvs[1].__len__()))
                tasks.append(
                    self.src_cluster.async_query_view(
                        prefix + ddoc_name, view.name, query,
                        dest_buckets[0].kvs[1].__len__()))

            for task in tasks:
                task.result(self._poll_timeout)

            self.verify_results()
        finally:
            # For timeout error, all tasks to be cancelled
            # Before proceeding to next test
            for task in tasks:
                task.cancel()

    """Replication with disabled/enabled ddoc compaction on both clusters.

    This test begins by loading a given number of items on both clusters.
    Then we disabled or enabled compaction on both clusters( set via params).
    Then we mutate and delete data on clusters 3 times. After deletion we recreate
    deleted items. When data was changed 3 times we perform
    a full verification: wait for the disk queues to drain
    and then verify that there has been no data loss on both clusters."""

    def replication_with_disabled_ddoc_compaction(self):
        self.setup_xdcr()
        self.src_cluster.load_all_buckets(self._num_items)
        self.dest_cluster.load_all_buckets(self._num_items)

        if "C1" in self._disable_compaction:
            self.src_cluster.disable_compaction()
        if "C2" in self._disable_compaction:
            self.dest_cluster.disable_compaction()

        # perform doc's ops 3 times to increase rev number
        for _ in range(3):
            self.async_perform_update_delete()
            # wait till deletes have been sent to recreate
            self.sleep(60)
            # restore(re-creating) deleted items
            if 'C1' in self._del_clusters:
                c1_kv_gen = self.src_cluster.get_kv_gen()

                c1_gen_delete = copy.deepcopy(c1_kv_gen[OPS.DELETE])
                if self._expires:
                    # if expiration set, recreate those keys before
                    # trying to update
                    c1_gen_update = copy.deepcopy(c1_kv_gen[OPS.UPDATE])
                    self.src_cluster.load_all_buckets_from_generator(
                        kv_gen=c1_gen_update)
                self.src_cluster.load_all_buckets_from_generator(
                    kv_gen=c1_gen_delete)
            if 'C2' in self._del_clusters:
                c2_kv_gen = self.dest_cluster.get_kv_gen()
                c2_gen_delete = copy.deepcopy(c2_kv_gen[OPS.DELETE])
                if self._expires:
                    c2_gen_update = copy.deepcopy(c2_kv_gen[OPS.UPDATE])
                    self.dest_cluster.load_all_buckets_from_generator(
                        kv_gen=c2_gen_update)
                self.dest_cluster.load_all_buckets_from_generator(
                    kv_gen=c2_gen_delete)
            # wait till we recreate deleted keys before we can delete/update
            self.sleep(300)

        self.verify_results()

    def replication_while_rebooting_a_non_master_src_dest_node(self):
        bucket_type = self._input.param("bucket_type", "membase")
        if bucket_type == "ephemeral":
            self.log.info("Test case does not apply to ephemeral")
            return
        self.setup_xdcr_and_load()
        self.async_perform_update_delete()
        self.sleep(self._wait_timeout)

        reboot_node_dest = self.dest_cluster.reboot_one_node(self)
        NodeHelper.wait_node_restarted(reboot_node_dest,
                                       self,
                                       wait_time=self._wait_timeout * 4,
                                       wait_if_warmup=True)

        reboot_node_src = self.src_cluster.reboot_one_node(self)
        NodeHelper.wait_node_restarted(reboot_node_src,
                                       self,
                                       wait_time=self._wait_timeout * 4,
                                       wait_if_warmup=True)

        self.sleep(120)
        ClusterOperationHelper.wait_for_ns_servers_or_assert(
            [reboot_node_dest], self, wait_if_warmup=True)
        ClusterOperationHelper.wait_for_ns_servers_or_assert(
            [reboot_node_src], self, wait_if_warmup=True)
        self.verify_results()

    def test_disk_full(self):
        self.setup_xdcr_and_load()
        self.verify_results()

        self.sleep(self._wait_timeout)

        zip_file = "%s.zip" % (self._input.param("file_name", "collectInfo"))
        try:
            for node in [self.src_master, self.dest_master]:
                self.shell = RemoteMachineShellConnection(node)
                self.shell.execute_cbcollect_info(zip_file)
                if self.shell.extract_remote_info().type.lower() != "windows":
                    command = "unzip %s" % (zip_file)
                    output, error = self.shell.execute_command(command)
                    self.shell.log_command_output(output, error)
                    if len(error) > 0:
                        raise Exception(
                            "unable to unzip the files. Check unzip command output for help"
                        )
                    cmd = 'grep -R "Approaching full disk warning." cbcollect_info*/'
                    output, _ = self.shell.execute_command(cmd)
                else:
                    cmd = "curl -0 http://{1}:{2}@{0}:8091/diag 2>/dev/null | grep 'Approaching full disk warning.'".format(
                        self.src_master.ip, self.src_master.rest_username,
                        self.src_master.rest_password)
                    output, _ = self.shell.execute_command(cmd)
                self.assertNotEquals(
                    len(output), 0,
                    "Full disk warning not generated as expected in %s" %
                    node.ip)
                self.log.info("Full disk warning generated as expected in %s" %
                              node.ip)

                self.shell.delete_files(zip_file)
                self.shell.delete_files("cbcollect_info*")
        except Exception as e:
            self.log.info(e)

    def test_rollback(self):
        bucket = self.src_cluster.get_buckets()[0]
        src_nodes = self.src_cluster.get_nodes()
        dest_nodes = self.dest_cluster.get_nodes()
        nodes = src_nodes + dest_nodes

        # Stop Persistence on Node A & Node B
        for node in nodes:
            mem_client = MemcachedClientHelper.direct_client(node, bucket)
            mem_client.stop_persistence()

        goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
                     + '/goxdcr.log*'
        self.setup_xdcr()

        self.src_cluster.pause_all_replications()
        self.dest_cluster.pause_all_replications()

        gen = BlobGenerator("C1-",
                            "C1-",
                            self._value_size,
                            end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)
        gen = BlobGenerator("C2-",
                            "C2-",
                            self._value_size,
                            end=self._num_items)
        self.dest_cluster.load_all_buckets_from_generator(gen)

        self.src_cluster.resume_all_replications()
        self.dest_cluster.resume_all_replications()

        # Perform mutations on the bucket
        self.async_perform_update_delete()

        rest1 = RestConnection(self.src_cluster.get_master_node())
        rest2 = RestConnection(self.dest_cluster.get_master_node())

        # Fetch count of docs in src and dest cluster
        _count1 = rest1.fetch_bucket_stats(
            bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
        _count2 = rest2.fetch_bucket_stats(
            bucket=bucket.name)["op"]["samples"]["curr_items"][-1]

        self.log.info(
            "Before rollback src cluster count = {0} dest cluster count = {1}".
            format(_count1, _count2))

        # Kill memcached on Node A so that Node B becomes master
        shell = RemoteMachineShellConnection(
            self.src_cluster.get_master_node())
        shell.kill_memcached()
        shell = RemoteMachineShellConnection(
            self.dest_cluster.get_master_node())
        shell.kill_memcached()

        # Start persistence on Node B
        mem_client = MemcachedClientHelper.direct_client(src_nodes[1], bucket)
        mem_client.start_persistence()
        mem_client = MemcachedClientHelper.direct_client(dest_nodes[1], bucket)
        mem_client.start_persistence()

        # Failover Node B
        failover_task = self.src_cluster.async_failover()
        failover_task.result()
        failover_task = self.dest_cluster.async_failover()
        failover_task.result()

        # Wait for Failover & rollback to complete
        self.sleep(60)

        # Fetch count of docs in src and dest cluster
        _count1 = rest1.fetch_bucket_stats(
            bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
        _count2 = rest2.fetch_bucket_stats(
            bucket=bucket.name)["op"]["samples"]["curr_items"][-1]

        self.log.info(
            "After rollback src cluster count = {0} dest cluster count = {1}".
            format(_count1, _count2))

        self.assertTrue(
            self.src_cluster.wait_for_outbound_mutations(),
            "Mutations in source cluster not replicated to target after rollback"
        )
        self.assertTrue(
            self.dest_cluster.wait_for_outbound_mutations(),
            "Mutations in target cluster not replicated to source after rollback"
        )

        count = NodeHelper.check_goxdcr_log(
            src_nodes[0], "Received rollback from DCP stream", goxdcr_log)
        self.assertGreater(count, 0, "rollback did not happen as expected")
        self.log.info("rollback happened as expected")

        count = NodeHelper.check_goxdcr_log(
            dest_nodes[0], "Received rollback from DCP stream", goxdcr_log)
        self.assertGreater(count, 0, "rollback did not happen as expected")
        self.log.info("rollback happened as expected")
예제 #58
0
class unidirectional(XDCRNewBaseTest):
    def setUp(self):
        super(unidirectional, self).setUp()
        self.src_cluster = self.get_cb_cluster_by_name('C1')
        self.src_master = self.src_cluster.get_master_node()
        self.dest_cluster = self.get_cb_cluster_by_name('C2')
        self.dest_master = self.dest_cluster.get_master_node()

    def tearDown(self):
        super(unidirectional, self).tearDown()

    """Testing Unidirectional load( Loading only at source) Verifying whether XDCR replication is successful on
    subsequent destination clusters.Create/Update/Delete operations are performed based on doc-ops specified by the user. """

    def load_with_ops(self):
        self.setup_xdcr_and_load()
        self.perform_update_delete()
        self.verify_results()

    """Testing Unidirectional load( Loading only at source) Verifying whether XDCR replication is successful on
    subsequent destination clusters. Create/Update/Delete are performed in parallel- doc-ops specified by the user. """

    def load_with_async_ops(self):
        self.setup_xdcr_and_load()
        self.async_perform_update_delete()
        self.verify_results()

    def load_with_async_ops_diff_data_size(self):
        # Load 1 item with size 1
        # 52 alphabets (small and capital letter)
        self.src_cluster.load_all_buckets(52, value_size=1)
        # Load items with below sizes of 1M
        self.src_cluster.load_all_buckets(5, value_size=1000000)
        # Load items with size 10MB
        # Getting memory issue with 20MB data on VMs.
        self.src_cluster.load_all_buckets(1, value_size=10000000)

        self.verify_results()

    """Testing Unidirectional load( Loading only at source). Failover node at Source/Destination while
    Create/Update/Delete are performed after based on doc-ops specified by the user.
    Verifying whether XDCR replication is successful on subsequent destination clusters. """

    def load_with_ops_with_warmup(self):
        self.setup_xdcr_and_load()
        warmupnodes = []
        if "C1" in self._warmup:
            warmupnodes.append(self.src_cluster.warmup_node())
        if "C2" in self._warmup:
            warmupnodes.append(self.dest_cluster.warmup_node())

        self.sleep(self._wait_timeout)
        self.perform_update_delete()
        self.sleep(self._wait_timeout / 2)

        NodeHelper.wait_warmup_completed(warmupnodes)

        self.verify_results()

    def load_with_ops_with_warmup_master(self):
        self.setup_xdcr_and_load()
        warmupnodes = []
        if "C1" in self._warmup:
            warmupnodes.append(self.src_cluster.warmup_node(master=True))
        if "C2" in self._warmup:
            warmupnodes.append(self.dest_cluster.warmup_node(master=True))

        self.sleep(self._wait_timeout)
        self.perform_update_delete()
        self.sleep(self._wait_timeout / 2)

        NodeHelper.wait_warmup_completed(warmupnodes)

        self.verify_results()

    def load_with_async_ops_with_warmup(self):
        self.setup_xdcr_and_load()
        warmupnodes = []
        if "C1" in self._warmup:
            warmupnodes.append(self.src_cluster.warmup_node())
        if "C2" in self._warmup:
            warmupnodes.append(self.dest_cluster.warmup_node())

        self.sleep(self._wait_timeout)
        self.async_perform_update_delete()
        self.sleep(self._wait_timeout / 2)

        NodeHelper.wait_warmup_completed(warmupnodes)

        self.verify_results()

    def load_with_async_ops_with_warmup_master(self):
        self.setup_xdcr_and_load()
        warmupnodes = []
        if "C1" in self._warmup:
            warmupnodes.append(self.src_cluster.warmup_node(master=True))
        if "C2" in self._warmup:
            warmupnodes.append(self.dest_cluster.warmup_node(master=True))

        self.sleep(self._wait_timeout)
        self.async_perform_update_delete()
        self.sleep(self._wait_timeout / 2)

        NodeHelper.wait_warmup_completed(warmupnodes)

        self.verify_results()

    def load_with_failover(self):
        self.setup_xdcr_and_load()

        if "C1" in self._failover:
            self.src_cluster.failover_and_rebalance_nodes()
        if "C2" in self._failover:
            self.dest_cluster.failover_and_rebalance_nodes()

        self.sleep(self._wait_timeout / 6)
        self.perform_update_delete()

        self.verify_results()

    def load_with_failover_then_add_back(self):

        self.setup_xdcr_and_load()

        if "C1" in self._failover:
            self.src_cluster.failover_and_rebalance_nodes(rebalance=False)
            self.src_cluster.add_back_node()
        if "C2" in self._failover:
            self.dest_cluster.failover_and_rebalance_nodes(rebalance=False)
            self.dest_cluster.add_back_node()

        self.perform_update_delete()

        self.verify_results()

    """Testing Unidirectional load( Loading only at source). Failover node at Source/Destination while
    Create/Update/Delete are performed in parallel based on doc-ops specified by the user.
    Verifying whether XDCR replication is successful on subsequent destination clusters. """

    def load_with_failover_master(self):
        self.setup_xdcr_and_load()

        if "C1" in self._failover:
            self.src_cluster.failover_and_rebalance_master()
        if "C2" in self._failover:
            self.dest_cluster.failover_and_rebalance_master()

        self.sleep(self._wait_timeout / 6)
        self.perform_update_delete()

        self.verify_results()

    """Testing Unidirectional load( Loading only at source). Failover node at Source/Destination while
    Create/Update/Delete are performed in parallel based on doc-ops specified by the user.
    Verifying whether XDCR replication is successful on subsequent destination clusters. """

    def load_with_async_failover(self):
        self.setup_xdcr_and_load()

        tasks = []
        if "C1" in self._failover:
            tasks.append(self.src_cluster.async_failover())
        if "C2" in self._failover:
            tasks.append(self.dest_cluster.async_failover())

        self.perform_update_delete()
        self.sleep(self._wait_timeout / 4)

        for task in tasks:
            task.result()

        if "C1" in self._failover:
            self.src_cluster.rebalance_failover_nodes()
        if "C2" in self._failover:
            self.dest_cluster.rebalance_failover_nodes()

        self.verify_results()

    """Replication with compaction ddocs and view queries on both clusters.

        This test begins by loading a given number of items on the source cluster.
        It creates num_views as development/production view with default
        map view funcs(_is_dev_ddoc = True by default) on both clusters.
        Then we disabled compaction for ddoc on src cluster. While we don't reach
        expected fragmentation for ddoc on src cluster we update docs and perform
        view queries for all views. Then we start compaction when fragmentation
        was reached fragmentation_value. When compaction was completed we perform
        a full verification: wait for the disk queues to drain
        and then verify that there has been any data loss on all clusters."""
    def replication_with_ddoc_compaction(self):
        self.setup_xdcr_and_load()

        num_views = self._input.param("num_views", 5)
        is_dev_ddoc = self._input.param("is_dev_ddoc", True)
        fragmentation_value = self._input.param("fragmentation_value", 80)
        for bucket in self.src_cluster.get_buckets():
            views = Utility.make_default_views(bucket.name, num_views, is_dev_ddoc)

        ddoc_name = "ddoc1"
        prefix = ("", "dev_")[is_dev_ddoc]

        query = {"full_set": "true", "stale": "false"}

        tasks = self.src_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT)
        tasks += self.dest_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT)
        for task in tasks:
            task.result(self._poll_timeout)

        self.src_cluster.disable_compaction()
        fragmentation_monitor = self.src_cluster.async_monitor_view_fragmentation(prefix + ddoc_name, fragmentation_value, BUCKET_NAME.DEFAULT)

        # generate load until fragmentation reached
        while fragmentation_monitor.state != "FINISHED":
            # update docs to create fragmentation
            self.src_cluster.update_delete_data(OPS.UPDATE)
            for view in views:
                # run queries to create indexes
                self.src_cluster.query_view(prefix + ddoc_name, view.name, query)
        fragmentation_monitor.result()

        compaction_task = self.src_cluster.async_compact_view(prefix + ddoc_name, 'default')

        self.assertTrue(compaction_task.result())

        self.verify_results()

    """Replication with disabled/enabled ddoc compaction on source cluster.

        This test begins by loading a given number of items on the source cluster.
        Then we disabled or enabled compaction on both clusters( set via params).
        Then we mutate and delete data on the source cluster 3 times.
        After deletion we recreate deleted items. When data was changed 3 times
        we perform a full verification: wait for the disk queues to drain
        and then verify that there has been no data loss on both all clusters."""
    def replication_with_disabled_ddoc_compaction(self):
        self.setup_xdcr_and_load()

        if "C1" in self._disable_compaction:
            self.src_cluster.disable_compaction()
        if "C2" in self._disable_compaction:
            self.dest_cluster.disable_compaction()

        # perform doc's ops 3 times to increase rev number
        for _ in range(3):
            self.async_perform_update_delete()
            # restore(re-creating) deleted items
            if 'C1' in self._del_clusters:
                c1_kv_gen = self.src_cluster.get_kv_gen()
                gen_delete = copy.deepcopy(c1_kv_gen[OPS.DELETE])
                self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_delete)
                self.sleep(5)

        self.verify_results()

    def replication_while_rebooting_a_non_master_destination_node(self):
        self.setup_xdcr_and_load()
        self.src_cluster.set_xdcr_param("xdcrFailureRestartInterval", 1)
        self.perform_update_delete()
        self.sleep(self._wait_timeout / 2)
        rebooted_node = self.dest_cluster.reboot_one_node(self)
        NodeHelper.wait_node_restarted(rebooted_node, self, wait_time=self._wait_timeout * 4, wait_if_warmup=True)

        self.verify_results()

    def replication_with_firewall_enabled(self):
        self.src_cluster.set_xdcr_param("xdcrFailureRestartInterval", 1)
        self.setup_xdcr_and_load()
        self.perform_update_delete()

        NodeHelper.enable_firewall(self.dest_master)
        self.sleep(30)
        NodeHelper.disable_firewall(self.dest_master)
        self.verify_results()

    """Testing Unidirectional append ( Loading only at source) Verifying whether XDCR replication is successful on
    subsequent destination clusters. """

    def test_append(self):
        self.setup_xdcr_and_load()
        self.verify_results()
        loop_count = self._input.param("loop_count", 20)
        for i in xrange(loop_count):
            self.log.info("Append iteration # %s" % i)
            gen_append = BlobGenerator('loadOne', 'loadOne', self._value_size, end=self._num_items)
            self.src_cluster.load_all_buckets_from_generator(gen_append, ops=OPS.APPEND, batch_size=1)
            self.sleep(self._wait_timeout)
        self.verify_results()

    '''
    This method runs cbcollectinfo tool after setting up uniXDCR and check
    whether the log generated by cbcollectinfo contains xdcr log file or not.
    '''
    def collectinfotest_for_xdcr(self):
        self.load_with_ops()
        self.node_down = self._input.param("node_down", False)
        self.log_filename = self._input.param("file_name", "collectInfo")
        self.shell = RemoteMachineShellConnection(self.src_master)
        self.shell.execute_cbcollect_info("%s.zip" % (self.log_filename))
        from clitest import collectinfotest
        # HACK added self.buckets data member.
        self.buckets = self.src_cluster.get_buckets()
        collectinfotest.CollectinfoTests.verify_results(
            self, self.log_filename
        )

    """ Verify the fix for MB-9548"""
    def test_verify_replications_stream_delete(self):
        self.setup_xdcr_and_load()
        self.verify_results()
        rest_conn = RestConnection(self.src_master)
        replications = rest_conn.get_replications()
        self.assertTrue(replications, "Number of replication streams should not be 0")
        self.src_cluster.delete_all_buckets()

        replications = rest_conn.get_replications()
        self.assertTrue(not replications, "No replication streams should exists after deleting the buckets")

    """ Verify fix for MB-9862"""
    def test_verify_memcache_connections(self):
        allowed_memcached_conn = self._input.param("allowed_connections", 100)
        max_ops_per_second = self._input.param("max_ops_per_second", 2500)
        min_item_size = self._input.param("min_item_size", 128)
        num_docs = self._input.param("num_docs", 30000)
        # start load, max_ops_per_second is the combined limit for all buckets
        mcsodaLoad = LoadWithMcsoda(self.src_master, num_docs, prefix='')
        mcsodaLoad.cfg["max-ops"] = 0
        mcsodaLoad.cfg["max-ops-per-sec"] = max_ops_per_second
        mcsodaLoad.cfg["exit-after-creates"] = 1
        mcsodaLoad.cfg["min-value-size"] = min_item_size
        mcsodaLoad.cfg["json"] = 0
        mcsodaLoad.cfg["batch"] = 100
        loadDataThread = Thread(target=mcsodaLoad.load_data,
                                  name='mcloader_default')
        loadDataThread.daemon = True
        loadDataThread.start()

        src_remote_shell = RemoteMachineShellConnection(self.src_master)
        machine_type = src_remote_shell.extract_remote_info().type.lower()
        while (loadDataThread.isAlive() and machine_type == 'linux'):
            command = "netstat -lpnta | grep 11210 | grep TIME_WAIT | wc -l"
            output, _ = src_remote_shell.execute_command(command)
            if int(output[0]) > allowed_memcached_conn:
                # stop load
                mcsodaLoad.load_stop()
                loadDataThread.join()
                self.fail("Memcached connections {0} are increased above {1} \
                            on Source node".format(
                                                   allowed_memcached_conn,
                                                   int(output[0])))
            self.sleep(5)

        # stop load
        mcsodaLoad.load_stop()
        loadDataThread.join()

    # Test to verify MB-10116
    def verify_ssl_private_key_not_present_in_logs(self):
        zip_file = "%s.zip" % (self._input.param("file_name", "collectInfo"))
        try:
            self.shell = RemoteMachineShellConnection(self.src_master)
            self.load_with_ops()
            self.shell.execute_cbcollect_info(zip_file)
            if self.shell.extract_remote_info().type.lower() != "windows":
                command = "unzip %s" % (zip_file)
                output, error = self.shell.execute_command(command)
                self.shell.log_command_output(output, error)
                if len(error) > 0:
                    raise Exception("unable to unzip the files. Check unzip command output for help")
                cmd = 'grep -R "BEGIN RSA PRIVATE KEY" cbcollect_info*/'
                output, _ = self.shell.execute_command(cmd)
            else:
                cmd = "curl -0 http://{1}:{2}@{0}:8091/diag 2>/dev/null | grep 'BEGIN RSA PRIVATE KEY'".format(
                                                    self.src_master.ip,
                                                    self.src_master.rest_username,
                                                    self.src_master.rest_password)
                output, _ = self.shell.execute_command(cmd)
            self.assertTrue(not output, "XDCR SSL Private Key is found diag logs -> %s" % output)
        finally:
            self.shell.delete_files(zip_file)
            self.shell.delete_files("cbcollect_info*")

    # Buckets States
    def delete_recreate_dest_buckets(self):
        self.setup_xdcr_and_load()

        # Remove destination buckets
        self.dest_cluster.delete_all_buckets()

        # Code for re-create_buckets
        self.create_buckets_on_cluster("C2")

        self._resetup_replication_for_recreate_buckets("C2")

        self.async_perform_update_delete()
        self.verify_results()

    def flush_dest_buckets(self):
        self.setup_xdcr_and_load()

        # flush destination buckets
        self.dest_cluster.flush_buckets()

        self.async_perform_update_delete()
        self.verify_results()

    # Nodes Crashing Scenarios
    def __kill_processes(self, crashed_nodes=[]):
        for node in crashed_nodes:
            NodeHelper.kill_erlang(node)

    def __start_cb_server(self, node):
        shell = RemoteMachineShellConnection(node)
        shell.start_couchbase()
        shell.disconnect()

    def test_node_crash_master(self):
        self.setup_xdcr_and_load()

        crashed_nodes = []
        crash = self._input.param("crash", "").split('-')
        if "C1" in crash:
            crashed_nodes.append(self.src_master)
        if "C2" in crash:
            crashed_nodes.append(self.dest_master)

        self.__kill_processes(crashed_nodes)

        for crashed_node in crashed_nodes:
            self.__start_cb_server(crashed_node)
        NodeHelper.wait_warmup_completed(crashed_nodes)

        self.async_perform_update_delete()
        self.verify_results()

    # Disaster at site.
    # 1. Crash Source Cluster., Sleep n second
    # 2. Crash Dest Cluster.
    # 3. Wait for Source Cluster to warmup. Load more data and perform mutations on Src.
    # 4. Wait for Dest to warmup.
    # 5. Verify data.
    def test_node_crash_cluster(self):
        self.setup_xdcr_and_load()

        crashed_nodes = []
        crash = self._input.param("crash", "").split('-')
        if "C1" in crash:
            crashed_nodes += self.src_cluster.get_nodes()
            self.__kill_processes(crashed_nodes)
            self.sleep(30)
        if "C2" in crash:
            crashed_nodes += self.dest_cluster.get_nodes()
            self.__kill_processes(crashed_nodes)

        for crashed_node in crashed_nodes:
            self.__start_cb_server(crashed_node)

        if "C1" in crash:
            NodeHelper.wait_warmup_completed(self.src_cluster.get_nodes())
            gen_create = BlobGenerator('loadTwo', 'loadTwo', self._value_size, end=self._num_items)
            self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.async_perform_update_delete()

        if "C2" in crash:
            NodeHelper.wait_warmup_completed(self.dest_cluster.get_nodes())

        self.verify_results()

    """ Test if replication restarts 60s after idle xdcr following dest bucket flush """
    def test_idle_xdcr_dest_flush(self):
        self.setup_xdcr_and_load()
        self.verify_results()

        bucket = self.dest_cluster.get_bucket_by_name(BUCKET_NAME.DEFAULT)
        self.dest_cluster.flush_buckets([bucket])

        self.sleep(self._wait_timeout)

        self.verify_results()

    """ Test if replication restarts 60s after idle xdcr following dest bucket recreate """
    def test_idle_xdcr_dest_recreate(self):
        self.setup_xdcr_and_load()
        self.verify_results()

        bucket = self.dest_cluster.get_bucket_by_name(BUCKET_NAME.DEFAULT)
        self.dest_cluster.delete_bucket(BUCKET_NAME.DEFAULT)

        self.dest_cluster.create_default_bucket(bucket.bucket_size)

        self.sleep(self._wait_timeout)

        self.verify_results()

    """ Test if replication restarts 60s after idle xdcr following dest failover """
    def test_idle_xdcr_dest_failover(self):
        self.setup_xdcr_and_load()
        self.verify_results()

        self.dest_cluster.failover_and_rebalance_nodes()

        self.sleep(self._wait_timeout)

        self.verify_results()
예제 #59
0
class BackupBaseTest(BaseTestCase):
    def setUp(self):
        self.times_teardown_called = 1
        super(BackupBaseTest, self).setUp()
        self.shell = RemoteMachineShellConnection(self.master)
        info = self.shell.extract_remote_info()
        self.os = info.type.lower()
        self.value_size = self.input.param("value_size", 256)
        self.expire_time = self.input.param("expire_time", 60)
        self.item_flag = self.input.param("item_flag", 0)
        self.couchbase_login_info = "%s:%s" % (self.input.membase_settings.rest_username,
                                               self.input.membase_settings.rest_password)
        self.backup_location = self.input.param("backup_location", "/tmp/backup")
        self.command_options = self.input.param("command_options", '')
        if self.command_options is not '':
            self.command_options = self.command_options.split(";")
        self.doc_ops = self.input.param("doc_ops", None)
        if self.doc_ops is not None:
            self.doc_ops = self.doc_ops.split(";")
        servers_in = [self.servers[i + 1] for i in range(self.num_servers - 1)]
        for bucket in self.buckets:
            bucket.kvs[2] = KVStore()
        self.cluster.rebalance(self.servers[:1], servers_in, [])

    def tearDown(self):
        if not self.input.param("skip_cleanup", True):
            if self.times_teardown_called > 1 :
                if self.os == 'windows':
                    self.shell.delete_files("/cygdrive/c%s" % (self.backup_location))
                else:
                    self.shell.delete_files(self.backup_location)
                self.shell.disconnect()
                del self.buckets
                gc.collect()
        if self.input.param("skip_cleanup", True):
            if self.case_number > 1 or self.times_teardown_called > 1:
                if self.os == 'windows':
                    self.shell.delete_files("/cygdrive/c%s" % (self.backup_location))
                else:
                    self.shell.delete_files(self.backup_location)
                self.shell.disconnect()
                del self.buckets
                gc.collect()
        self.times_teardown_called += 1
        super(BackupBaseTest, self).tearDown()

    def verify_results(self, server, kv_store=1):
        """This is the verification function for test cases of backup/restore.

        Args:
          server: the master server in the cluster as self.master.
          kv_store: default value is 1. This is the key of the kv_store of each bucket.

        if the command line assign command options -k and/or -b and/or --single-node, in the verification function
        key_name indicates which keys we need to verify and bucket_name indicates which bucket we need to verify.
        If single node flag is true, the we only need to verify all the buckets at the master node"""

        key_name = None
        bucket_name = None
        single_node_flag = False
        if self.command_options is not None:
            for s in self.command_options:
                if s.find("-k") != -1:
                    sub = s.find(" ")
                    key_name = s[sub + 1:]
                if s.find("-b") != -1:
                    sub = s.find(" ")
                    bucket_name = s[sub + 1:]
                if "--single-node" in self.command_options:
                    single_node_flag = True

        #we delete the buckets whose name does not match the name assigned to -b in KVStore
        self.buckets = [bucket for bucket in self.buckets if bucket_name is None or bucket.name == bucket_name]
        for bucket in self.buckets:
             if key_name is not None:
                valid_keys, deleted_keys = bucket.kvs[kv_store].key_set()
                for key in valid_keys:
                    matchObj = re.search(key_name, key, re.M | re.S) #use regex match to find out keys we need to verify
                    if matchObj is None:
                        partition = bucket.kvs[kv_store].acquire_partition(key)
                        partition.delete(key)  #we delete keys whose prefix does not match the value assigned to -k in KVStore
                        bucket.kvs[kv_store].release_partition(key)
        if single_node_flag is False:
            self._verify_all_buckets(server, kv_store, self.wait_timeout * 50, self.max_verify, True, 1)
        else:
            self.verify_single_node(server, kv_store)

    def verify_single_node(self, server, kv_store=1):
        """This is the verification function for single node backup.

        Args:
          server: the master server in the cluster as self.master.
          kv_store: default value is 1. This is the key of the kv_store of each bucket.

        If --single-node flag appears in backup commad line, we just backup all the items
        from a single node (the master node in this case). For each bucket, we request for the vBucketMap. For every key
        in the kvstore of that bucket, we use hash function to get the vBucketId corresponding to that
        key. By using the vBucketMap, we can know whether that key is in master node or not.
        If yes, keep it. Otherwise delete it."""

        rest = RestConnection(server)
        for bucket in self.buckets:
            VBucketAware = VBucketAwareMemcached(rest, bucket.name)
            memcacheds, vBucketMap, vBucketMapReplica = VBucketAware.request_map(rest, bucket.name)
            valid_keys, deleted_keys = bucket.kvs[kv_store].key_set()
            for key in valid_keys:
                vBucketId = VBucketAware._get_vBucket_id(key)
                which_server = vBucketMap[vBucketId]
                sub = which_server.find(":")
                which_server_ip = which_server[:sub]
                if which_server_ip != server.ip:
                    partition = bucket.kvs[kv_store].acquire_partition(key)
                    partition.delete(key)
                    bucket.kvs[kv_store].release_partition(key)

        self._verify_all_buckets(server, kv_store, self.wait_timeout * 50, self.max_verify, True, 1)
예제 #60
0
    def test_clusterOps(self):
        Audit = audit(eventID=self.eventID, host=self.master)
        ops = self.input.param('ops', None)
        servs_inout = self.servers[1:self.nodes_in + 1]
        source = 'ns_server'

        if (ops in ['addNodeKV']):
            self.cluster.rebalance(self.servers, servs_inout, [])
            print servs_inout
            print servs_inout[0].ip
            expectedResults = {
                "services": ['kv'],
                'port': 8091,
                'hostname': servs_inout[0].ip,
                'groupUUID': "0",
                'node': 'ns_1@' + servs_inout[0].ip,
                'source': source,
                'user': self.master.rest_username,
                "ip": self.ipAddress,
                "remote:port": 57457
            }

        if (ops in ['addNodeN1QL']):
            rest = RestConnection(self.master)
            rest.add_node(user=self.master.rest_username,
                          password=self.master.rest_password,
                          remoteIp=servs_inout[0].ip,
                          services=['n1ql'])
            expectedResults = {
                "services": ['n1ql'],
                'port': 8091,
                'hostname': servs_inout[0].ip,
                'groupUUID': "0",
                'node': 'ns_1@' + servs_inout[0].ip,
                'source': source,
                'user': self.master.rest_username,
                "ip": self.ipAddress,
                "remote:port": 57457
            }

        if (ops in ['addNodeIndex']):
            rest = RestConnection(self.master)
            rest.add_node(user=self.master.rest_username,
                          password=self.master.rest_password,
                          remoteIp=servs_inout[0].ip,
                          services=['index'])
            expectedResults = {
                "services": ['index'],
                'port': 8091,
                'hostname': servs_inout[0].ip,
                'groupUUID': "0",
                'node': 'ns_1@' + servs_inout[0].ip,
                'source': source,
                'user': self.master.rest_username,
                "ip": self.ipAddress,
                "remote:port": 57457
            }

        if (ops in ['removeNode']):
            self.cluster.rebalance(self.servers, [], servs_inout)
            shell = RemoteMachineShellConnection(self.master)
            os_type = shell.extract_remote_info().distribution_type
            log.info("OS type is {0}".format(os_type))
            if os_type == 'windows':
                expectedResults = {"delta_recovery_buckets":"all", 'known_nodes':["ns_1@" + servs_inout[0].ip, "ns_1@" + self.master.ip], 'ejected_nodes':['ns_1@' + servs_inout[0].ip], 'source':'ns_server', \
                               'source':source, 'user':self.master.rest_username, "ip":self.ipAddress, "port":57457}
            else:
                expectedResults = {"delta_recovery_buckets":"all", 'known_nodes':["ns_1@" + servs_inout[0].ip, "ns_1@" + self.master.ip], 'ejected_nodes':['ns_1@' + servs_inout[0].ip], 'source':'ns_server', \
                               'source':source, 'user':self.master.rest_username, "ip":self.ipAddress, "port":57457}

        if (ops in ['rebalanceIn']):
            self.cluster.rebalance(self.servers, servs_inout, [])
            shell = RemoteMachineShellConnection(self.master)
            os_type = shell.extract_remote_info().distribution_type
            log.info("OS type is {0}".format(os_type))
            if os_type == 'windows':
                expectedResults = {"delta_recovery_buckets":"all", 'known_nodes':["ns_1@" + servs_inout[0].ip, "ns_1@" + self.master.ip], 'ejected_nodes':[], 'source':'ns_server', \
                                'source':source, 'user':self.master.rest_username, "ip":self.ipAddress, "port":57457}
            else:
                expectedResults = {"delta_recovery_buckets":"all", 'known_nodes':["ns_1@" + servs_inout[0].ip, "ns_1@" + self.master.ip], 'ejected_nodes':[], 'source':'ns_server', \
                                'source':source, 'user':self.master.rest_username, "ip":self.ipAddress, "port":57457}

        if (ops in ['rebalanceOut']):
            self.cluster.rebalance(self.servers, [], servs_inout)
            shell = RemoteMachineShellConnection(self.master)
            os_type = shell.extract_remote_info().distribution_type
            log.info("OS type is {0}".format(os_type))
            if os_type == 'windows':
                expectedResults = {"delta_recovery_buckets":"all", 'known_nodes':["ns_1@" + servs_inout[0].ip, "ns_1@" + self.master.ip], 'ejected_nodes':['ns_1@' + servs_inout[0].ip], 'source':'ns_server', \
                               'source':source, 'user':self.master.rest_username, "ip":self.ipAddress, "port":57457}
            else:
                expectedResults = {"delta_recovery_buckets":"all", 'known_nodes':["ns_1@" + servs_inout[0].ip, "ns_1@" + self.master.ip], 'ejected_nodes':['ns_1@' + servs_inout[0].ip], 'source':'ns_server', \
                               'source':source, 'user':self.master.rest_username, "ip":self.ipAddress, "port":57457}

        if (ops in ['failover']):
            type = self.input.param('type', None)
            self.cluster.failover(self.servers, servs_inout)
            self.cluster.rebalance(self.servers, [], [])
            expectedResults = {
                'source': source,
                'user': self.master.rest_username,
                "ip": self.ipAddress,
                "port": 57457,
                'type': type,
                'node': 'ns_1@' + servs_inout[0].ip
            }

        if (ops == 'nodeRecovery'):
            expectedResults = {
                'node': 'ns_1@' + servs_inout[0].ip,
                'type': 'delta',
                'source': source,
                'user': self.master.rest_username,
                "ip": self.ipAddress,
                "port": 57457
            }
            self.cluster.failover(self.servers, servs_inout)
            rest = RestConnection(self.master)
            rest.set_recovery_type(expectedResults['node'], 'delta')

        # Pending of failover - soft
        self.checkConfig(self.eventID, self.master, expectedResults)