Exemplo n.º 1
0
class CliBaseTest(BaseTestCase):
    vbucketId = 0

    def setUp(self):
        self.times_teardown_called = 1
        super(CliBaseTest, self).setUp()
        self.r = random.Random()
        self.vbucket_count = 1024
        self.cluster = Cluster()
        self.clusters_dic = self.input.clusters
        if self.clusters_dic:
            if len(self.clusters_dic) > 1:
                self.dest_nodes = self.clusters_dic[1]
                self.dest_master = self.dest_nodes[0]
            elif len(self.clusters_dic) == 1:
                self.log.error("=== need 2 cluster to setup xdcr in ini file ===")
        else:
            self.log.error("**** Cluster config is setup in ini file. ****")
        self.shell = RemoteMachineShellConnection(self.master)
        if not self.skip_init_check_cbserver:
            self.rest = RestConnection(self.master)
            self.cb_version = self.rest.get_nodes_version()
            """ cli output message """
            self.cli_bucket_create_msg = "SUCCESS: Bucket created"
            self.cli_rebalance_msg = "SUCCESS: Rebalance complete"
            if self.cb_version[:3] == "4.6":
                self.cli_bucket_create_msg = "SUCCESS: bucket-create"
                self.cli_rebalance_msg = "SUCCESS: rebalanced cluster"
        self.import_back = self.input.param("import_back", False)
        if self.import_back:
            if len(self.servers) < 3:
                self.fail("This test needs minimum of 3 vms to run ")
        self.test_type = self.input.param("test_type", "import")
        self.import_file = self.input.param("import_file", None)
        self.imex_type = self.input.param("imex_type", "json")
        self.format_type = self.input.param("format_type", "lines")
        self.import_method = self.input.param("import_method", "file://")
        self.force_failover = self.input.param("force_failover", False)
        self.json_invalid_errors = self.input.param("json-invalid-errors", None)
        self.field_separator = self.input.param("field-separator", "comma")
        self.key_gen = self.input.param("key-gen", True)
        self.skip_docs = self.input.param("skip-docs", None)
        self.limit_docs = self.input.param("limit-docs", None)
        self.limit_rows = self.input.param("limit-rows", None)
        self.skip_rows = self.input.param("skip-rows", None)
        self.omit_empty = self.input.param("omit-empty", None)
        self.infer_types = self.input.param("infer-types", None)
        self.fx_generator = self.input.param("fx-generator", None)
        self.fx_gen_start = self.input.param("fx-gen-start", None)
        self.secure_conn = self.input.param("secure-conn", False)
        self.no_cacert = self.input.param("no-cacert", False)
        self.no_ssl_verify = self.input.param("no-ssl-verify", False)
        self.verify_data = self.input.param("verify-data", False)
        self.field_substitutions = self.input.param("field-substitutions", None)
        self.check_preload_keys = self.input.param("check-preload-keys", True)
        self.debug_logs = self.input.param("debug-logs", False)
        self.should_fail = self.input.param("should-fail", False)
        info = self.shell.extract_remote_info()
        self.os_version = info.distribution_version.lower()
        self.deliverable_type = info.deliverable_type.lower()
        type = info.type.lower()
        self.excluded_commands = self.input.param("excluded_commands", None)
        self.os = 'linux'
        self.full_v = None
        self.short_v = None
        self.build_number = None
        cmd =  'curl -g {0}:8091/diag/eval -u {1}:{2} '.format(self.master.ip,
                                                              self.master.rest_username,
                                                              self.master.rest_password)
        cmd += '-d "path_config:component_path(bin)."'
        bin_path  = subprocess.check_output(cmd, shell=True)
        if "bin" not in bin_path:
            self.fail("Check if cb server install on %s" % self.master.ip)
        else:
            self.cli_command_path = bin_path.replace('"','') + "/"
        self.root_path = LINUX_ROOT_PATH
        self.tmp_path = "/tmp/"
        self.tmp_path_raw = "/tmp/"
        self.cmd_backup_path = LINUX_BACKUP_PATH
        self.backup_path = LINUX_BACKUP_PATH
        self.cmd_ext = ""
        self.src_file = ""
        self.des_file = ""
        self.sample_files_path = LINUX_COUCHBASE_SAMPLE_PATH
        self.log_path = LINUX_COUCHBASE_LOGS_PATH
        self.base_cb_path = LINUX_CB_PATH
        """ non root path """
        if self.nonroot:
            self.sample_files_path = "/home/%s%s" % (self.master.ssh_username,
                                                     LINUX_COUCHBASE_SAMPLE_PATH)
            self.log_path = "/home/%s%s" % (self.master.ssh_username,
                                            LINUX_COUCHBASE_LOGS_PATH)
            self.base_cb_path = "/home/%s%s" % (self.master.ssh_username,
                                                LINUX_CB_PATH)
            self.root_path = "/home/%s/" % self.master.ssh_username
        if type == 'windows':
            self.os = 'windows'
            self.cmd_ext = ".exe"
            self.root_path = WIN_ROOT_PATH
            self.tmp_path = WIN_TMP_PATH
            self.tmp_path_raw = WIN_TMP_PATH_RAW
            self.cmd_backup_path = WIN_BACKUP_C_PATH
            self.backup_path = WIN_BACKUP_PATH
            self.sample_files_path = WIN_COUCHBASE_SAMPLE_PATH_C
            self.log_path = WIN_COUCHBASE_LOGS_PATH
            win_format = "C:/Program Files"
            cygwin_format = "/cygdrive/c/Program\ Files"
            if win_format in self.cli_command_path:
                self.cli_command_path = self.cli_command_path.replace(win_format,
                                                                      cygwin_format)
            self.base_cb_path = WIN_CB_PATH
        if info.distribution_type.lower() == 'mac':
            self.os = 'mac'
        self.full_v, self.short_v, self.build_number = self.shell.get_cbversion(type)
        self.couchbase_usrname = "%s" % (self.input.membase_settings.rest_username)
        self.couchbase_password = "******" % (self.input.membase_settings.rest_password)
        self.cb_login_info = "%s:%s" % (self.couchbase_usrname,
                                        self.couchbase_password)
        self.path_type = self.input.param("path_type", None)
        if self.path_type is None:
            self.log.info("Test command with absolute path ")
        elif self.path_type == "local":
            self.log.info("Test command at %s dir " % self.cli_command_path)
            self.cli_command_path = "cd %s; ./" % self.cli_command_path
        self.cli_command = self.input.param("cli_command", None)
        self.command_options = self.input.param("command_options", None)
        if self.command_options is not None:
            self.command_options = self.command_options.split(";")
        if str(self.__class__).find('couchbase_clitest.CouchbaseCliTest') == -1:
            if len(self.servers) > 1 and int(self.nodes_init) == 1:
                servers_in = [self.servers[i + 1] for i in range(self.num_servers - 1)]
                self.cluster.rebalance(self.servers[:1], servers_in, [])
        for bucket in self.buckets:
            testuser = [{'id': bucket.name, 'name': bucket.name, 'password': '******'}]
            rolelist = [{'id': bucket.name, 'name': bucket.name, 'roles': 'admin'}]
            self.add_built_in_server_user(testuser=testuser, rolelist=rolelist)


    def tearDown(self):
        if not self.input.param("skip_cleanup", True):
            if self.times_teardown_called > 1 :
                self.shell.disconnect()
        if self.input.param("skip_cleanup", True):
            if self.case_number > 1 or self.times_teardown_called > 1:
                self.shell.disconnect()
        self.times_teardown_called += 1
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)
        zones = rest.get_zone_names()
        for zone in zones:
            if zone != "Group 1":
                rest.delete_zone(zone)
        self.clusters_dic = self.input.clusters
        if self.clusters_dic:
            if len(self.clusters_dic) > 1:
                self.dest_nodes = self.clusters_dic[1]
                self.dest_master = self.dest_nodes[0]
                if self.dest_nodes and len(self.dest_nodes) > 1:
                    self.log.info("======== clean up destination cluster =======")
                    rest = RestConnection(self.dest_nodes[0])
                    rest.remove_all_remote_clusters()
                    rest.remove_all_replications()
                    BucketOperationHelper.delete_all_buckets_or_assert(self.dest_nodes, self)
                    ClusterOperationHelper.cleanup_cluster(self.dest_nodes)
            elif len(self.clusters_dic) == 1:
                self.log.error("=== need 2 cluster to setup xdcr in ini file ===")
        else:
            self.log.info("**** If run xdcr test, need cluster config is setup in ini file. ****")
        super(CliBaseTest, self).tearDown()


    """ in sherlock, there is an extra value called runCmd in the 1st element """
    def del_runCmd_value(self, output):
        if "runCmd" in output[0]:
            output = output[1:]
        return output

    def verifyCommandOutput(self, output, expect_error, message):
        """Inspects each line of the output and checks to see if the expected error was found

        Options:
        output - A list of output lines
        expect_error - Whether or not the command should have succeeded or failed
        message - The success or error message

        Returns a boolean indicating whether or not the error/success message was found in the output
        """
        if expect_error:
            for line in output:
                if line == "ERROR: " + message:
                    return True
            log.info("Did not receive expected error message `ERROR: %s`", message)
            return False
        else:
            for line in output:
                if line == "SUCCESS: " + message:
                    return True
            log.info("Did not receive expected success message `SUCCESS: %s`", message)
            return False

    def verifyWarningOutput(self, output, message):
        for line in output:
            if line == "WARNING: " + message:
                return True
        log.info("Did not receive expected error message `WARNING: %s`", message)
        return False

    def verifyServices(self, server, expected_services):
        """Verifies that the services on a given node match the expected service

            Options:
            server - A TestInputServer object of the server to connect to
            expected_services - A comma separated list of services

            Returns a boolean corresponding to whether or not the expected services
            are available on the server.
        """
        rest = RestConnection(server)
        hostname = "%s:%s" % (server.ip, server.port)
        expected_services = expected_services.replace("data", "kv")
        expected_services = expected_services.replace("query", "n1ql")
        expected_services = expected_services.split(",")

        nodes_services = rest.get_nodes_services()
        for node, services in nodes_services.iteritems():
            if node.encode('ascii') == hostname:
                if len(services) != len(expected_services):
                    log.info("Services on %s do not match expected services (%s vs. %s)",
                             hostname, services, expected_services)
                    return False
                for service in services:
                    if service.encode("ascii") not in expected_services:
                        log.info("Services on %s do not match expected services (%s vs. %s)",
                                 hostname, services, expected_services)
                        return False
                return True

        log.info("Services on %s not found, the server may not exist", hostname)
        return False

    def verifyRamQuotas(self, server, data, index, fts):
        """Verifies that the RAM quotas for each service are set properly

        Options:
        server - A TestInputServer object of the server to connect to
        data - An int containing the data service RAM quota, None will skip the check
        index - An int containing the index service RAM quota, None will skip the check
        fts - An int containing the FTS service RAM quota, None will skip the check

        Returns a boolean corresponding to whether or not the RAM quotas were set properly
        """
        rest = RestConnection(server)
        settings = rest.get_pools_default()
        if data:
            if "memoryQuota" not in settings:
                log.info("Unable to get data service ram quota")
                return False
            if int(settings["memoryQuota"]) != int(data):
                log.info("Data service memory quota does not match (%d vs %d)",
                         int(settings["memoryQuota"]), int(data))
                return False

        if index:
            if "indexMemoryQuota" not in settings:
                log.info("Unable to get index service ram quota")
                return False
            if int(settings["indexMemoryQuota"]) != int(index):
                log.info(
                    "Index service memory quota does not match (%d vs %d)",
                    int(settings["indexMemoryQuota"]), int(index))
                return False

        if fts:
            if "ftsMemoryQuota" not in settings:
                log.info("Unable to get fts service ram quota")
                return False
            if int(settings["ftsMemoryQuota"]) != int(fts):
                log.info("FTS service memory quota does not match (%d vs %d)",
                         int(settings["ftsMemoryQuota"]), int(fts))
                return False

        return True

    def verifyBucketSettings(self, server, bucket_name, bucket_type, memory_quota,
                             eviction_policy, replica_count, enable_index_replica,
                             priority, enable_flush):
        rest = RestConnection(server)
        result = rest.get_bucket_json(bucket_name)

        if bucket_type == "couchbase":
            bucket_type = "membase"

        if bucket_type is not None and bucket_type != result["bucketType"]:
            log.info("Memory quota does not match (%s vs %s)", bucket_type,
                     result["bucketType"])
            return False

        quota = result["quota"]["rawRAM"] / 1024 / 1024
        if memory_quota is not None and memory_quota != quota:
            log.info("Bucket quota does not match (%s vs %s)", memory_quota,
                     quota)
            return False

        if eviction_policy is not None and eviction_policy != result[
            "evictionPolicy"]:
            log.info("Eviction policy does not match (%s vs %s)",
                     eviction_policy, result["evictionPolicy"])
            return False

        if replica_count is not None and replica_count != result[
            "replicaNumber"]:
            log.info("Replica count does not match (%s vs %s)", replica_count,
                     result["replicaNumber"])
            return False

        if enable_index_replica == 1:
            enable_index_replica = True
        elif enable_index_replica == 0:
            enable_index_replica = False

        if enable_index_replica is not None and enable_index_replica != result[
            "replicaIndex"]:
            log.info("Replica index enabled does not match (%s vs %s)",
                     enable_index_replica, result["replicaIndex"])
            return False

        if priority == "high":
            priority = 8
        elif priority == "low":
            priority = 3

        if priority is not None and priority != result["threadsNumber"]:
            log.info("Bucket priority does not match (%s vs %s)", priority,
                     result["threadsNumber"])
            return False

        if enable_flush is not None:
            if enable_flush == 1 and "flush" not in result["controllers"]:
                log.info("Bucket flush is not enabled, but it should be")
                return False
            elif enable_flush == 0 and "flush" in result["controllers"]:
                log.info("Bucket flush is not enabled, but it should be")
                return False

        return True

    def verifyContainsBucket(self, server, name):
        rest = RestConnection(server)
        buckets = rest.get_buckets()

        for bucket in buckets:
            if bucket.name == name:
                return True
        return False

    def verifyClusterName(self, server, name):
        rest = RestConnection(server)
        settings = rest.get_pools_default("waitChange=0")

        if name is None:
            name = ""
        if name == "empty":
            name = " "

        if "clusterName" not in settings:
            log.info("Unable to get cluster name from server")
            return False
        if settings["clusterName"] != name:
            log.info("Cluster name does not match (%s vs %s)",
                     settings["clusterName"], name)
            return False

        return True

    def isClusterInitialized(self, server):
        """Checks whether or not the server is initialized

        Options:
        server - A TestInputServer object of the server to connect to

        Checks to see whether or not the default pool was created in order to
        determine whether or no the server was initialized. Returns a boolean value
        to indicate initialization.
        """
        rest = RestConnection(server)
        settings = rest.get_pools_info()
        if "pools" in settings and len(settings["pools"]) > 0:
            return True

        return False

    def verifyNotificationsEnabled(self, server):
        rest = RestConnection(server)
        enabled = rest.get_notifications()
        if enabled:
            return True
        return False

    def verifyIndexSettings(self, server, max_rollbacks, stable_snap_interval,
                            mem_snap_interval,
                            storage_mode, threads, log_level):
        rest = RestConnection(server)
        settings = rest.get_global_index_settings()

        if storage_mode == "default":
            storage_mode = "plasma"
        elif storage_mode == "memopt":
            storage_mode = "memory_optimized"

        if max_rollbacks and str(settings["maxRollbackPoints"]) != str(
                max_rollbacks):
            log.info("Max rollbacks does not match (%s vs. %s)",
                     str(settings["maxRollbackPoints"]), str(max_rollbacks))
            return False
        if stable_snap_interval and str(
                settings["stableSnapshotInterval"]) != str(
                stable_snap_interval):
            log.info("Stable snapshot interval does not match (%s vs. %s)",
                     str(settings["stableSnapshotInterval"]),
                     str(stable_snap_interval))
            return False
        if mem_snap_interval and str(
                settings["memorySnapshotInterval"]) != str(mem_snap_interval):
            log.info("Memory snapshot interval does not match (%s vs. %s)",
                     str(settings["memorySnapshotInterval"]),
                     str(mem_snap_interval))
            return False
        if storage_mode and str(settings["storageMode"]) != str(storage_mode):
            log.info("Storage mode does not match (%s vs. %s)",
                     str(settings["storageMode"]), str(storage_mode))
            return False
        if threads and str(settings["indexerThreads"]) != str(threads):
            log.info("Threads does not match (%s vs. %s)",
                     str(settings["indexerThreads"]), str(threads))
            return False
        if log_level and str(settings["logLevel"]) != str(log_level):
            log.info("Log level does not match (%s vs. %s)",
                     str(settings["logLevel"]), str(log_level))
            return False

        return True

    def verifyAutofailoverSettings(self, server, enabled, timeout):
        rest = RestConnection(server)
        settings = rest.get_autofailover_settings()

        if enabled and not ((str(enabled) == "1" and settings.enabled) or (
                str(enabled) == "0" and not settings.enabled)):
            log.info("Enabled does not match (%s vs. %s)", str(enabled),
                     str(settings.enabled))
            return False
        if timeout and str(settings.timeout) != str(timeout):
            log.info("Timeout does not match (%s vs. %s)", str(timeout),
                     str(settings.timeout))
            return False

        return True

    def verifyAutoreprovisionSettings(self, server, enabled, max_nodes):
        rest = RestConnection(server)
        settings = rest.get_autoreprovision_settings()

        if enabled and not ((str(enabled) == "1" and settings.enabled) or (
                str(enabled) == "0" and not settings.enabled)):
            log.info("Enabled does not match (%s vs. %s)", str(max_nodes),
                     str(settings.enabled))
            return False
        if max_nodes and str(settings.max_nodes) != str(max_nodes):
            log.info("max_nodes does not match (%s vs. %s)", str(max_nodes),
                     str(settings.max_nodes))
            return False

        return True

    def verifyAuditSettings(self, server, enabled, log_path, rotate_interval):
        rest = RestConnection(server)
        settings = rest.getAuditSettings()

        if enabled and not (
            (str(enabled) == "1" and settings["auditdEnabled"]) or (
                str(enabled) == "0" and not settings["auditdEnabled"])):
            log.info("Enabled does not match (%s vs. %s)", str(enabled),
                     str(settings["auditdEnabled"]))
            return False
        if log_path and str(str(settings["logPath"])) != str(log_path):
            log.info("Log path does not match (%s vs. %s)", str(log_path),
                     str(settings["logPath"]))
            return False

        if rotate_interval and str(str(settings["rotateInterval"])) != str(
                rotate_interval):
            log.info("Rotate interval does not match (%s vs. %s)",
                     str(rotate_interval), str(settings["rotateInterval"]))
            return False

        return True

    def verifyPendingServer(self, server, server_to_add, group_name, services):
        rest = RestConnection(server)
        settings = rest.get_all_zones_info()
        if not settings or "groups" not in settings:
            log.info("Group settings payload appears to be invalid")
            return False

        expected_services = services.replace("data", "kv")
        expected_services = expected_services.replace("query", "n1ql")
        expected_services = expected_services.split(",")

        for group in settings["groups"]:
            for node in group["nodes"]:
                if node["hostname"] == server_to_add:
                    if node["clusterMembership"] != "inactiveAdded":
                        log.info("Node `%s` not in pending status",
                                 server_to_add)
                        return False

                    if group["name"] != group_name:
                        log.info("Node `%s` not in correct group (%s vs %s)",
                                 node["hostname"], group_name,
                                 group["name"])
                        return False

                    if len(node["services"]) != len(expected_services):
                        log.info("Services do not match on %s (%s vs %s) ",
                                 node["hostname"], services,
                                 ",".join(node["services"]))
                        return False

                    for service in node["services"]:
                        if service not in expected_services:
                            log.info("Services do not match on %s (%s vs %s) ",
                                     node["hostname"], services,
                                     ",".join(node["services"]))
                            return False
                    return True

        log.info("Node `%s` not found in nodes list", server_to_add)
        return False

    def verifyPendingServerDoesNotExist(self, server, server_to_add):
        rest = RestConnection(server)
        settings = rest.get_all_zones_info()
        if not settings or "groups" not in settings:
            log.info("Group settings payload appears to be invalid")
            return False

        for group in settings["groups"]:
            for node in group["nodes"]:
                if node["hostname"] == server_to_add:
                    return False

        log.info("Node `%s` not found in nodes list", server_to_add)
        return True

    def verifyActiveServers(self, server, expected_num_servers):
        return self._verifyServersByStatus(server, expected_num_servers,
                                           "active")

    def verifyFailedServers(self, server, expected_num_servers):
        return self._verifyServersByStatus(server, expected_num_servers,
                                           "inactiveFailed")

    def _verifyServersByStatus(self, server, expected_num_servers, status):
        rest = RestConnection(server)
        settings = rest.get_pools_default()

        count = 0
        for node in settings["nodes"]:
            if node["clusterMembership"] == status:
                count += 1

        return count == expected_num_servers

    def verifyRecoveryType(self, server, recovery_servers, recovery_type):
        rest = RestConnection(server)
        settings = rest.get_all_zones_info()
        if not settings or "groups" not in settings:
            log.info("Group settings payload appears to be invalid")
            return False

        if not recovery_servers:
            return True

        num_found = 0
        recovery_servers = recovery_servers.split(",")
        for group in settings["groups"]:
            for node in group["nodes"]:
                for rs in recovery_servers:
                    if node["hostname"] == rs:
                        if node["recoveryType"] != recovery_type:
                            log.info(
                                "Node %s doesn't contain recovery type %s ",
                                rs, recovery_type)
                            return False
                        else:
                            num_found = num_found + 1

        if num_found == len(recovery_servers):
            return True

        log.info("Node `%s` not found in nodes list",
                 ",".join(recovery_servers))
        return False

    def verifyUserRoles(self, server, username, roles):
        rest = RestConnection(server)
        status, content, header = rbacmain(server)._retrieve_user_roles()
        content = json.loads(content)
        temp = rbacmain()._parse_get_user_response(content, username, username, roles)
        return temp

    def verifyLdapSettings(self, server, admins, ro_admins, default, enabled):
        rest = RestConnection(server)
        settings = rest.ldapRestOperationGetResponse()

        if admins is None:
            admins = []
        else:
            admins = admins.split(",")

        if ro_admins is None:
            ro_admins = []
        else:
            ro_admins = ro_admins.split(",")

        if str(enabled) == "0":
            admins = []
            ro_admins = []

        if default == "admins" and str(enabled) == "1":
            if settings["admins"] != "asterisk":
                log.info("Admins don't match (%s vs asterisk)",
                         settings["admins"])
                return False
        elif not self._list_compare(settings["admins"], admins):
            log.info("Admins don't match (%s vs %s)", settings["admins"],
                     admins)
            return False

        if default == "roadmins" and str(enabled) == "1":
            if settings["roAdmins"] != "asterisk":
                log.info("Read only admins don't match (%s vs asterisk)",
                         settings["roAdmins"])
                return False
        elif not self._list_compare(settings["roAdmins"], ro_admins):
            log.info("Read only admins don't match (%s vs %s)",
                     settings["roAdmins"], ro_admins)
            return False

        return True

    def verifyAlertSettings(self, server, enabled, email_recipients,
                            email_sender, email_username, email_password,
                            email_host,
                            email_port, encrypted, alert_af_node,
                            alert_af_max_reached, alert_af_node_down,
                            alert_af_small,
                            alert_af_disable, alert_ip_changed,
                            alert_disk_space, alert_meta_overhead,
                            alert_meta_oom,
                            alert_write_failed, alert_audit_dropped):
        rest = RestConnection(server)
        settings = rest.get_alerts_settings()
        print settings

        if not enabled:
            if not settings["enabled"]:
                return True
            else:
                log.info("Alerts should be disabled")
                return False

        if encrypted is None or encrypted == "0":
            encrypted = False
        else:
            encrypted = True

        if email_recipients is not None and not self._list_compare(
                email_recipients.split(","), settings["recipients"]):
            log.info("Email recipients don't match (%s vs %s)",
                     email_recipients.split(","), settings["recipients"])
            return False

        if email_sender is not None and email_sender != settings["sender"]:
            log.info("Email sender does not match (%s vs %s)", email_sender,
                     settings["sender"])
            return False

        if email_username is not None and email_username != \
                settings["emailServer"]["user"]:
            log.info("Email username does not match (%s vs %s)",
                     email_username, settings["emailServer"]["user"])
            return False

        if email_host is not None and email_host != settings["emailServer"][
            "host"]:
            log.info("Email host does not match (%s vs %s)", email_host,
                     settings["emailServer"]["host"])
            return False

        if email_port is not None and email_port != settings["emailServer"][
            "port"]:
            log.info("Email port does not match (%s vs %s)", email_port,
                     settings["emailServer"]["port"])
            return False

        if encrypted is not None and encrypted != settings["emailServer"][
            "encrypt"]:
            log.info("Email encryption does not match (%s vs %s)", encrypted,
                     settings["emailServer"]["encrypt"])
            return False

        alerts = list()
        if alert_af_node:
            alerts.append('auto_failover_node')
        if alert_af_max_reached:
            alerts.append('auto_failover_maximum_reached')
        if alert_af_node_down:
            alerts.append('auto_failover_other_nodes_down')
        if alert_af_small:
            alerts.append('auto_failover_cluster_too_small')
        if alert_af_disable:
            alerts.append('auto_failover_disabled')
        if alert_ip_changed:
            alerts.append('ip')
        if alert_disk_space:
            alerts.append('disk')
        if alert_meta_overhead:
            alerts.append('overhead')
        if alert_meta_oom:
            alerts.append('ep_oom_errors')
        if alert_write_failed:
            alerts.append('ep_item_commit_failed')
        if alert_audit_dropped:
            alerts.append('audit_dropped_events')

        if not self._list_compare(alerts, settings["alerts"]):
            log.info("Alerts don't match (%s vs %s)", alerts,
                     settings["alerts"])
            return False

        return True

    def verify_node_settings(self, server, data_path, index_path, hostname):
        rest = RestConnection(server)
        node_settings = rest.get_nodes_self()

        if data_path != node_settings.storage[0].path:
            log.info("Data path does not match (%s vs %s)", data_path,
                     node_settings.storage[0].path)
            return False
        if index_path != node_settings.storage[0].index_path:
            log.info("Index path does not match (%s vs %s)", index_path,
                     node_settings.storage[0].index_path)
            return False
        if hostname is not None:
            if hostname != node_settings.hostname:
                log.info("Hostname does not match (%s vs %s)", hostname,
                         node_settings.hostname)
                return True
        return True

    def verifyCompactionSettings(self, server, db_frag_perc, db_frag_size,
                                 view_frag_perc, view_frag_size, from_period,
                                 to_period, abort_outside, parallel_compact,
                                 purgeInt):
        rest = RestConnection(server)
        settings = rest.get_auto_compaction_settings()
        ac = settings["autoCompactionSettings"]

        if db_frag_perc is not None and str(db_frag_perc) != str(
                ac["databaseFragmentationThreshold"]["percentage"]):
            log.info("DB frag perc does not match (%s vs %s)",
                     str(db_frag_perc),
                     str(ac["databaseFragmentationThreshold"]["percentage"]))
            return False

        if db_frag_size is not None and str(db_frag_size * 1024 ** 2) != str(
                ac["databaseFragmentationThreshold"]["size"]):
            log.info("DB frag size does not match (%s vs %s)",
                     str(db_frag_size * 1024 ** 2),
                     str(ac["databaseFragmentationThreshold"]["size"]))
            return False

        if view_frag_perc is not None and str(view_frag_perc) != str(
                ac["viewFragmentationThreshold"]["percentage"]):
            log.info("View frag perc does not match (%s vs %s)",
                     str(view_frag_perc),
                     str(ac["viewFragmentationThreshold"]["percentage"]))
            return False

        if view_frag_size is not None and str(
                        view_frag_size * 1024 ** 2) != str(
                ac["viewFragmentationThreshold"]["size"]):
            log.info("View frag size does not match (%s vs %s)",
                     str(view_frag_size * 1024 ** 2),
                     str(ac["viewFragmentationThreshold"]["size"]))
            return False

        print from_period, to_period
        if from_period is not None:
            fromHour, fromMin = from_period.split(":", 1)
            if int(fromHour) != int(ac["allowedTimePeriod"]["fromHour"]):
                log.info("From hour does not match (%s vs %s)", str(fromHour),
                         str(ac["allowedTimePeriod"]["fromHour"]))
                return False
            if int(fromMin) != int(ac["allowedTimePeriod"]["fromMinute"]):
                log.info("From minute does not match (%s vs %s)", str(fromMin),
                         str(ac["allowedTimePeriod"]["fromMinute"]))
                return False

        if to_period is not None:
            toHour, toMin = to_period.split(":", 1)
            if int(toHour) != int(ac["allowedTimePeriod"]["toHour"]):
                log.info("To hour does not match (%s vs %s)", str(toHour),
                         str(ac["allowedTimePeriod"]["toHour"]))
                return False
            if int(toMin) != int(ac["allowedTimePeriod"]["toMinute"]):
                log.info("To minute does not match (%s vs %s)", str(toMin),
                         str(ac["allowedTimePeriod"]["toMinute"]))
                return False

        if str(abort_outside) == "1":
            abort_outside = True
        elif str(abort_outside) == "0":
            abort_outside = False

        if abort_outside is not None and abort_outside != \
                ac["allowedTimePeriod"]["abortOutside"]:
            log.info("Abort outside does not match (%s vs %s)", abort_outside,
                     ac["allowedTimePeriod"]["abortOutside"])
            return False

        if str(parallel_compact) == "1":
            parallel_compact = True
        elif str(parallel_compact) == "0":
            parallel_compact = False

        if parallel_compact is not None and parallel_compact != ac[
            "parallelDBAndViewCompaction"]:
            log.info("Parallel compact does not match (%s vs %s)",
                     str(parallel_compact),
                     str(ac["parallelDBAndViewCompaction"]))
            return False

        if purgeInt is not None and str(purgeInt) != str(
                settings["purgeInterval"]):
            log.info("Purge interval does not match (%s vs %s)", str(purgeInt),
                     str(settings["purgeInterval"]))
            return False

        return True

    def verify_gsi_compact_settings(self, compact_mode, compact_percent,
                                    compact_interval,
                                    from_period, to_period, enable_abort):
        rest = RestConnection(self.master)
        settings = rest.get_auto_compaction_settings()
        ac = settings["autoCompactionSettings"]["indexFragmentationThreshold"]
        cc = settings["autoCompactionSettings"]["indexCircularCompaction"]
        if compact_mode is not None:
            if compact_mode == "append":
                self.log.info("append compactino settings %s " % ac)
                if compact_percent is not None and \
                                compact_percent != ac["percentage"]:
                    raise Exception(
                        "setting percent does not match.  Set: %s vs %s :Actual"
                        % (compact_percent, ac["percentage"]))
            if compact_mode == "circular":
                self.log.info("circular compaction settings %s " % cc)
                if enable_abort and not cc["interval"]["abortOutside"]:
                    raise Exception("setting enable abort failed")
                if compact_interval is not None:
                    if compact_interval != cc["daysOfWeek"]:
                        raise Exception(
                            "Failed to set compaction on %s " % compact_interval)
                    elif from_period is None and int(
                            cc["interval"]["fromHour"]) != 0 and \
                                    int(cc["interval"]["fromMinute"]) != 0:
                        raise Exception(
                            "fromHour and fromMinute should be zero")
                if compact_interval is None:
                    if (from_period != str(cc["interval"][
                                                    "fromHour"]) + ":" + str(
                                cc["interval"]["fromMinute"])) \
                    and (to_period != str(cc["interval"]["toHour"]) + ":" + str(
                                cc["interval"]["toMinute"])):
                        raise Exception(
                            "fromHour and fromMinute do not set correctly")
        return True

    def verifyGroupExists(self, server, name):
        rest = RestConnection(server)
        groups = rest.get_zone_names()
        print groups

        for gname, _ in groups.iteritems():
            if name == gname:
                return True

        return False

    def _list_compare(self, list1, list2):
        if len(list1) != len(list2):
            return False
        for elem1 in list1:
            found = False
            for elem2 in list2:
                if elem1 == elem2:
                    found = True
                    break
            if not found:
                return False
        return True

    def waitForItemCount(self, server, bucket_name, count, timeout=30):
        rest = RestConnection(server)
        for sec in range(timeout):
            items = int(
                rest.get_bucket_json(bucket_name)["basicStats"]["itemCount"])
            if items != count:
                time.sleep(1)
            else:
                return True
        log.info("Waiting for item count to be %d timed out", count)
        return False
Exemplo n.º 2
0
class Capi(XDCRNewBaseTest, NewUpgradeBaseTest):

    def setUp(self):
        super(Capi, self).setUp()
        self.cluster = Cluster()
        self.src_cluster = self.get_cb_cluster_by_name('C1')
        self.src_master = self.src_cluster.get_master_node()
        self.dest_cluster = self.get_cb_cluster_by_name('C2')
        self.dest_master = self.dest_cluster.get_master_node()
        self.use_hostnames = self._input.param("use_hostnames", False)
        self.src_init = self._input.param('src_init', 2)
        self.dest_init = self._input.param('dest_init', 1)
        self.product = self._input.param('product', 'couchbase-server')
        self.initial_version = self._input.param('initial_version', '2.5.1-1083')
        self.initial_vbuckets = self._input.param('initial_vbuckets', 1024)
        self.init_nodes = self._input.param('init_nodes', True)
        self.initial_build_type = self._input.param('initial_build_type', None)
        self.upgrade_build_type = self._input.param('upgrade_build_type', self.initial_build_type)
        self.master = self.src_master
        self.rest = RestConnection(self.src_master)

    def tearDown(self):
        super(Capi, self).tearDown()

    def _start_es_replication(self, bucket='default', xdcr_params={}):
        rest_conn = RestConnection(self.src_cluster.get_master_node())
        if bucket == 'default':
            self.log.info("Creating default bucket")
            rest_conn.create_bucket(bucket='default', ramQuotaMB=100, authType='none', saslPassword='', replicaNumber=1,
                                proxyPort=11211, bucketType='membase', replica_index=1, threadsNumber=3,
                                flushEnabled=1, lww=False)
            self.src_cluster.add_bucket(ramQuotaMB=100, bucket='default', authType='none',
                                   saslPassword='', replicaNumber=1, proxyPort=11211, bucketType='membase',
                                   evictionPolicy='valueOnly')
        elif bucket == 'sasl':
            self.log.info("Creating sasl bucket")
            rest_conn.create_bucket(bucket='sasl', ramQuotaMB=100, authType='sasl', saslPassword='******', replicaNumber=1,
                                proxyPort=11211, bucketType='membase', replica_index=1, threadsNumber=3,
                                flushEnabled=1, lww=False)
            self.src_cluster.add_bucket(ramQuotaMB=100, bucket='sasl', authType='sasl',
                                   saslPassword='******', replicaNumber=1, proxyPort=11211, bucketType='membase',
                                   evictionPolicy='valueOnly')
        elif bucket == 'standard':
            self.log.info("Creating standard bucket")
            rest_conn.create_bucket(bucket='standard', ramQuotaMB=100, authType='none', saslPassword='', replicaNumber=1,
                                proxyPort=STANDARD_BUCKET_PORT, bucketType='membase', replica_index=1, threadsNumber=3,
                                flushEnabled=1, lww=False)
            self.src_cluster.add_bucket(ramQuotaMB=100, bucket='standard', authType='none',
                                   saslPassword='', replicaNumber=1, proxyPort=STANDARD_BUCKET_PORT, bucketType='membase',
                                   evictionPolicy='valueOnly')
        elif bucket== 'lww':
            self.log.info("Creating lww bucket")
            rest_conn.create_bucket(bucket='lww', ramQuotaMB=100, authType='none', saslPassword='', replicaNumber=1,
                                proxyPort=11211, bucketType='membase', replica_index=1, threadsNumber=3,
                                flushEnabled=1, lww=True)
            self.src_cluster.add_bucket(ramQuotaMB=100, bucket='lww', authType='none',
                                   saslPassword='', replicaNumber=1, proxyPort=11211, bucketType='membase',
                                   evictionPolicy='valueOnly')
        esrest_conn = EsRestConnection(self.dest_cluster.get_master_node())
        esrest_conn.create_index(bucket)
        rest_conn.add_remote_cluster(remoteIp=self.dest_master.ip, remotePort=9091, username='******',
                                     password='******', name='es')
        self.src_cluster.get_remote_clusters().append(XDCRRemoteClusterRef(self.src_cluster, self.dest_cluster,
                                                                       Utility.get_rc_name(self.src_cluster.get_name(),
                                                                                        self.dest_cluster.get_name())))
        repl_id = rest_conn.start_replication(replicationType='continuous', fromBucket=bucket, toCluster='es',
                                              rep_type='capi', toBucket=bucket, xdcr_params=xdcr_params)
        return repl_id

    def _verify_es_results(self, bucket='default'):
        esrest_conn = EsRestConnection(self.dest_master)
        es_docs = esrest_conn.all_docs()
        self.log.info("Retrieved ES Docs")
        rest_conn = RestConnection(self.src_master)
        memcached_conn = VBucketAwareMemcached(rest_conn, bucket)
        self.log.info("Comparing CB and ES data")
        for doc in es_docs:
            es_data = doc['doc']
            mc_active = memcached_conn.memcached(str(es_data['_id']))
            cb_flags, cb_cas, cb_data = mc_active.get(str(es_data['_id']))
            self.assertDictEqual(es_data, json.loads(cb_data), "Data mismatch found - es data: {0} cb data: {1}".
                                 format(str(es_data), str(cb_data)))
        self.log.info("Data verified")

    def test_crud_ops_from_cb_to_es(self):
        bucket = self._input.param("bucket", 'default')
        repl_id = self._start_es_replication(bucket=bucket)

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.perform_update_delete()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results(bucket=bucket)

    def test_incr_crud_ops_from_cb_to_es(self):
        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.async_perform_update_delete()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results()

    def test_capi_with_pause_resume(self):
        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.async_load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        self.sleep(30)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results()

    def test_capi_with_checkpointing(self):
        repl_id = self._start_es_replication(xdcr_params={"checkpointInterval":"60"})

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self.sleep(120)

        vb0_node = None
        nodes = self.src_cluster.get_nodes()
        ip = VBucketAwareMemcached(rest_conn,'default').vBucketMap[0].split(':')[0]
        for node in nodes:
            if ip == node.ip:
                vb0_node = node
        if not vb0_node:
            raise XDCRCheckpointException("Error determining the node containing active vb0")
        vb0_conn = RestConnection(vb0_node)
        try:
            checkpoint_record = vb0_conn.get_recent_xdcr_vb_ckpt(repl_id)
            self.log.info("Checkpoint record : {0}".format(checkpoint_record))
        except Exception as e:
            raise XDCRCheckpointException("Error retrieving last checkpoint document - {0}".format(e))

        self._verify_es_results()

    def test_capi_with_optimistic_replication(self):
        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)

        rest_conn.set_xdcr_param('default', 'default', 'optimisticReplicationThreshold', self._optimistic_threshold)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.perform_update_delete()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results()

    def test_capi_with_filter(self):
        repl_id = self._start_es_replication(xdcr_params={'filterExpression':'es-5*'})

        rest_conn = RestConnection(self.src_master)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results()

    def test_capi_with_advanced_settings(self):
        batch_count = self._input.param("batch_count", 10)
        batch_size = self._input.param("batch_size", 2048)
        source_nozzle = self._input.param("source_nozzle", 2)
        target_nozzle = self._input.param("target_nozzle", 2)
        enable_firewall = self._input.param("enable_firewall", False)

        capi_data_chan_size_multi = self._input.param("capi_data_chan_size_multi", None)
        if capi_data_chan_size_multi:
            shell = RemoteMachineShellConnection(self.src_master)
            command = "curl -X POST -u Administrator:password http://127.0.0.1:9998/xdcr/internalSettings " + \
                      "-d CapiDataChanSizeMultiplier=" + str(capi_data_chan_size_multi)
            output, error = shell.execute_command(command)
            shell.log_command_output(output, error)

        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)

        rest_conn.set_xdcr_param('default', 'default', 'workerBatchSize', batch_count)
        rest_conn.set_xdcr_param('default', 'default', 'docBatchSizeKb', batch_size)
        rest_conn.set_xdcr_param('default', 'default', 'sourceNozzlePerNode', source_nozzle)
        rest_conn.set_xdcr_param('default', 'default', 'targetNozzlePerNode', target_nozzle)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        if enable_firewall:
            NodeHelper.enable_firewall(self.dest_cluster.get_master_node())
            self.sleep(120)
            NodeHelper.disable_firewall(self.dest_cluster.get_master_node())

        self._verify_es_results()

    def test_capi_with_rebalance_in(self):
        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self.src_cluster.rebalance_in()

        self._wait_for_es_replication_to_catchup(timeout=900)

        self._verify_es_results()

    def test_capi_with_rebalance_out(self):
        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self.src_cluster.rebalance_out()

        self._wait_for_es_replication_to_catchup(timeout=900)

        self._verify_es_results()

    def test_capi_with_swap_rebalance(self):
        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self.src_cluster.swap_rebalance()

        self._wait_for_es_replication_to_catchup(timeout=600)

        self._verify_es_results()

    def test_capi_with_failover(self):
        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        graceful = self._input.param("graceful", False)
        self.recoveryType = self._input.param("recoveryType", None)
        self.src_cluster.failover(graceful=graceful)

        self.sleep(30)

        if self.recoveryType:
            server_nodes = rest_conn.node_statuses()
            for node in server_nodes:
                if node.ip == self._input.servers[1].ip:
                    rest_conn.set_recovery_type(otpNode=node.id, recoveryType=self.recoveryType)
                    self.sleep(30)
                    rest_conn.add_back_node(otpNode=node.id)
            rebalance = self.cluster.async_rebalance(self.src_cluster.get_nodes(), [], [])
            rebalance.result()

        self._verify_es_results()

    def test_capi_with_malformed_http_resp(self):
        repl_id = self._start_es_replication(xdcr_params={'workerBatchSize':'2000',
                                                          'docBatchSizeKb':'8096',
                                                          'targetNozzlePerNode':'64'})

        rest_conn = RestConnection(self.src_master)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        goxdcr_log = NodeHelper.get_goxdcr_log_dir(self.src_master)\
                     + '/goxdcr.log*'
        for node in self.src_cluster.get_nodes():
            count = NodeHelper.check_goxdcr_log(
                            node,
                            "malformed HTTP response",
                            goxdcr_log)
            self.assertEqual(count, 0, "malformed HTTP response error message found in " + str(node.ip))
            self.log.info("malformed HTTP response error message not found in " + str(node.ip))

        self._verify_es_results()

    def test_capi_with_offline_upgrade(self):
        self._install(self._input.servers[:self.src_init + self.dest_init])
        upgrade_nodes = self.src_cluster.get_nodes()
        upgrade_version = self._input.param("upgrade_version", "5.0.0-1797")

        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.perform_update_delete()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        self._install(servers=upgrade_nodes, version=upgrade_version)

        self.log.info("######### Upgrade of CB cluster completed ##########")

        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value"}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.perform_update_delete()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results()

    def test_capi_with_online_upgrade(self):
        self._install(self._input.servers[:self.src_init + self.dest_init])
        upgrade_version = self._input.param("upgrade_version", "5.0.0-1797")
        upgrade_nodes = self.src_cluster.get_nodes()
        extra_nodes = self._input.servers[self.src_init + self.dest_init:]

        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.perform_update_delete()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        RestConnection(upgrade_nodes[0]).get_nodes_versions()
        added_versions = RestConnection(extra_nodes[0]).get_nodes_versions()
        self.cluster.rebalance(upgrade_nodes + extra_nodes, extra_nodes, [])
        self.log.info("Rebalance in all {0} nodes completed".format(added_versions[0]))
        RestConnection(upgrade_nodes[0]).get_nodes_versions()
        self.sleep(15)
        status, content = ClusterOperationHelper.find_orchestrator(upgrade_nodes[0])
        self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
                        format(status, content))
        self.log.info("after rebalance in the master is {0}".format(content))
        find_master = False
        for new_server in extra_nodes:
            if content.find(new_server.ip) >= 0:
                find_master = True
                self.log.info("{0} Node {1} becomes the master".format(added_versions[0], new_server.ip))
                break
        if not find_master:
            raise Exception("After rebalance in {0} Nodes, one of them doesn't become the master".
                            format(added_versions[0]))
        self.log.info("Rebalancing out all old version nodes")
        self.cluster.rebalance(upgrade_nodes + extra_nodes, [], upgrade_nodes)
        self.src_master = self._input.servers[self.src_init + self.dest_init]

        self._install(self.src_cluster.get_nodes(), version=upgrade_version)
        upgrade_nodes = self._input.servers[self.src_init + self.dest_init:]
        extra_nodes = self.src_cluster.get_nodes()

        RestConnection(upgrade_nodes[0]).get_nodes_versions()
        added_versions = RestConnection(extra_nodes[0]).get_nodes_versions()
        self.cluster.rebalance(upgrade_nodes + extra_nodes, extra_nodes, [])
        self.log.info("Rebalance in all {0} nodes completed".format(added_versions[0]))
        RestConnection(upgrade_nodes[0]).get_nodes_versions()
        self.sleep(15)
        status, content = ClusterOperationHelper.find_orchestrator(upgrade_nodes[0])
        self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
                        format(status, content))
        self.log.info("after rebalance in the master is {0}".format(content))
        self.log.info("Rebalancing out all old version nodes")
        self.cluster.rebalance(upgrade_nodes + extra_nodes, [], upgrade_nodes)
        self.src_master = self._input.servers[0]

        self.log.info("######### Upgrade of CB cluster completed ##########")

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value"}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.perform_update_delete()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results()

    def test_capi_with_cb_stop_and_start(self):
        bucket = self._input.param("bucket", 'default')
        repl_id = self._start_es_replication(bucket=bucket)

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self.async_perform_update_delete()

        conn = RemoteMachineShellConnection(self.src_master)
        conn.stop_couchbase()
        conn.start_couchbase()

        self.sleep(30)

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results(bucket=bucket)

    def test_capi_with_erlang_crash(self):
        bucket = self._input.param("bucket", 'default')
        repl_id = self._start_es_replication(bucket=bucket)

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self.async_perform_update_delete()

        conn = RemoteMachineShellConnection(self.src_master)
        conn.kill_erlang()
        conn.start_couchbase()

        self.sleep(30)

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results(bucket=bucket)

    def test_capi_with_memcached_crash(self):
        bucket = self._input.param("bucket", 'default')
        repl_id = self._start_es_replication(bucket=bucket)

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self.async_perform_update_delete()

        conn = RemoteMachineShellConnection(self.src_master)
        conn.pause_memcached()
        conn.unpause_memcached()

        self.sleep(30)

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results(bucket=bucket)
Exemplo n.º 3
0
class BucketConfig(BaseTestCase):

    def setUp(self):
        super(BucketConfig, self).setUp()
        self.testcase = '2'
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        #self.time_synchronization = self.input.param("time_sync", "enabledWithoutDrift")
        self.lww = self.input.param("lww", True)
        self.drift = self.input.param("drift", False)
        self.bucket='bucket-1'
        self.master = self.servers[0]
        self.rest = RestConnection(self.master)
        self.cluster = Cluster()
        self.skip_rebalance = self.input.param("skip_rebalance", False)

        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
        mem_quota = int(self.rest.get_nodes_self().mcdMemoryReserved *
                        node_ram_ratio)

        if not self.skip_rebalance:
            self.rest.init_cluster(self.master.rest_username,
                self.master.rest_password)
            self.rest.init_cluster_memoryQuota(self.master.rest_username,
                self.master.rest_password,
                memoryQuota=mem_quota)
            for server in self.servers:
                ClusterOperationHelper.cleanup_cluster([server])
                ClusterOperationHelper.wait_for_ns_servers_or_assert(
                    [self.master], self.testcase)
            try:
                rebalanced = ClusterOperationHelper.add_and_rebalance(
                    self.servers)

            except Exception as e:
                self.fail(e, 'cluster is not rebalanced')

        self._create_bucket(self.lww, self.drift)

    def tearDown(self):
        super(BucketConfig, self).tearDown()
        return
        if not "skip_cleanup" in TestInputSingleton.input.test_params:
            BucketOperationHelper.delete_all_buckets_or_assert(
                self.servers, self.testcase)
            ClusterOperationHelper.cleanup_cluster(self.servers)
            ClusterOperationHelper.wait_for_ns_servers_or_assert(
                self.servers, self.testcase)

    def test_modify_bucket_params(self):
        try:
            self.log.info("Modifying timeSynchronization value after bucket creation .....")
            self._modify_bucket()
        except Exception as e:
            traceback.print_exc()
            self.fail('[ERROR] Modify testcase failed .., {0}'.format(e))

    def test_restart(self):
        try:
            self.log.info("Restarting the servers ..")
            self._restart_server(self.servers[:])
            self.log.info("Verifying bucket settings after restart ..")
            self._check_config()
        except Exception as e:
            traceback.print_exc()
            self.fail("[ERROR] Check data after restart failed with exception {0}".format(e))

    def test_failover(self):
        num_nodes=1
        self.cluster.failover(self.servers, self.servers[1:num_nodes])
        try:
            self.log.info("Failing over 1 of the servers ..")
            self.cluster.rebalance(self.servers, [], self.servers[1:num_nodes])
            self.log.info("Verifying bucket settings after failover ..")
            self._check_config()
        except Exception as e:
            traceback.print_exc()
            self.fail('[ERROR]Failed to failover .. , {0}'.format(e))

    def test_rebalance_in(self):
        try:
            self.log.info("Rebalancing 1 of the servers ..")
            ClusterOperationHelper.add_and_rebalance(
                self.servers)
            self.log.info("Verifying bucket settings after rebalance ..")
            self._check_config()
        except Exception as e:
            self.fail('[ERROR]Rebalance failed .. , {0}'.format(e))

    def test_backup_same_cluster(self):
        self.shell = RemoteMachineShellConnection(self.master)
        self.buckets = RestConnection(self.master).get_buckets()
        self.couchbase_login_info = "%s:%s" % (self.input.membase_settings.rest_username,
                                               self.input.membase_settings.rest_password)
        self.backup_location = "/tmp/backup"
        self.command_options = self.input.param("command_options", '')
        try:
            shell = RemoteMachineShellConnection(self.master)
            self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, self.command_options)

            time.sleep(5)
            shell.restore_backupFile(self.couchbase_login_info, self.backup_location, [bucket.name for bucket in self.buckets])

        finally:
            self._check_config()

    def test_backup_diff_bucket(self):
        self.shell = RemoteMachineShellConnection(self.master)
        self.buckets = RestConnection(self.master).get_buckets()
        self.couchbase_login_info = "%s:%s" % (self.input.membase_settings.rest_username,
                                               self.input.membase_settings.rest_password)
        self.backup_location = "/tmp/backup"
        self.command_options = self.input.param("command_options", '')
        try:
            shell = RemoteMachineShellConnection(self.master)
            self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, self.command_options)

            time.sleep(5)
            self._create_bucket(lww=False, name="new_bucket")
            self.buckets = RestConnection(self.master).get_buckets()
            shell.restore_backupFile(self.couchbase_login_info, self.backup_location, ["new_bucket"])

        finally:
            self._check_config()

    ''' Helper functions for above testcases
    '''
    #create a bucket if it doesn't exist. The drift parameter is currently unused
    def _create_bucket(self, lww=True, drift=False, name=None):

        if lww:
            self.lww=lww

        if  name:
            self.bucket=name

        helper = RestHelper(self.rest)
        if not helper.bucket_exists(self.bucket):
            node_ram_ratio = BucketOperationHelper.base_bucket_ratio(
                self.servers)
            info = self.rest.get_nodes_self()
            self.rest.create_bucket(bucket=self.bucket,
                ramQuotaMB=512, authType='sasl', lww=self.lww)
            try:
                ready = BucketOperationHelper.wait_for_memcached(self.master,
                    self.bucket)
            except Exception as e:
                self.fail('unable to create bucket')

    # KETAKI tochange this
    def _modify_bucket(self):
        helper = RestHelper(self.rest)
        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(
            self.servers)
        info = self.rest.get_nodes_self()

        status, content = self.rest.change_bucket_props(bucket=self.bucket,
            ramQuotaMB=512, authType='sasl', timeSynchronization='enabledWithOutDrift')
        if re.search('TimeSyncronization not allowed in update bucket', content):
            self.log.info('[PASS]Expected modify bucket to disallow Time Synchronization.')
        else:
            self.fail('[ERROR] Not expected to allow modify bucket for Time Synchronization')

    def _restart_server(self, servers):
        for server in servers:
            shell = RemoteMachineShellConnection(server)
            shell.stop_couchbase()
            time.sleep(10)
            shell.start_couchbase()
            shell.disconnect()
        ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, self, wait_if_warmup=True)

    # REBOOT
    def _reboot_server(self):
        try:
            for server in self.servers[:]:
                shell = RemoteMachineShellConnection(server)
                if shell.extract_remote_info().type.lower() == 'windows':
                    o, r = shell.execute_command("shutdown -r -f -t 0")
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} is being stopped".format(server.ip))
                elif shell.extract_remote_info().type.lower() == 'linux':
                    o, r = shell.execute_command("reboot")
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} is being stopped".format(server.ip))

                    ClusterOperationHelper.wait_for_ns_servers_or_assert([server], self, wait_if_warmup=True)
                    shell = RemoteMachineShellConnection(server)
                    command = "/sbin/iptables -F"
                    o, r = shell.execute_command(command)
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} backup".format(server.ip))
        finally:
            self.log.info("Warming-up servers ..")
            ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self, wait_if_warmup=True)



    def _check_config(self):
        rc = self.rest.get_bucket_json(self.bucket)
        if 'conflictResolution' in rc:
            conflictResolution  = self.rest.get_bucket_json(self.bucket)['conflictResolutionType']
            self.assertTrue(conflictResolution == 'lww', 'Expected conflict resolution of lww but got {0}'.format(conflictResolution))


        """ drift is disabled in 4.6, commenting out for now as it may come back later
Exemplo n.º 4
0
class BaseTestCase(unittest.TestCase, bucket_utils, cluster_utils,
                   failover_utils, node_utils, views_utils):
    def setUp(self):
        self.failover_util = failover_utils()
        self.node_util = node_utils()
        self.views_util = views_utils()

        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.primary_index_created = False
        self.use_sdk_client = self.input.param("use_sdk_client", False)
        self.analytics = self.input.param("analytics", False)
        if self.input.param("log_level", None):
            log.setLevel(level=0)
            for hd in log.handlers:
                if str(hd.__class__).find('FileHandler') != -1:
                    hd.setLevel(level=logging.DEBUG)
                else:
                    hd.setLevel(level=getattr(
                        logging, self.input.param("log_level", None)))
        self.servers = self.input.servers
        if str(self.__class__).find('moxitests') != -1:
            self.moxi_server = self.input.moxis[0]
            self.servers = [
                server for server in self.servers
                if server.ip != self.moxi_server.ip
            ]
        self.buckets = []
        self.bucket_base_params = {}
        self.bucket_base_params['membase'] = {}
        self.master = self.servers[0]
        self.bucket_util = bucket_utils(self.master)
        self.cluster_util = cluster_utils(self.master)
        self.indexManager = self.servers[0]
        if not hasattr(self, 'cluster'):
            self.cluster = Cluster()
        self.pre_warmup_stats = {}
        self.cleanup = False
        self.nonroot = False
        shell = RemoteMachineShellConnection(self.master)
        self.os_info = shell.extract_remote_info().type.lower()
        if self.os_info != 'windows':
            if self.master.ssh_username != "root":
                self.nonroot = True
        shell.disconnect()
        """ some tests need to bypass checking cb server at set up
            to run installation """
        self.skip_init_check_cbserver = \
            self.input.param("skip_init_check_cbserver", False)
        self.data_collector = DataCollector()
        self.data_analyzer = DataAnalyzer()
        self.result_analyzer = DataAnalysisResultAnalyzer()
        #         self.set_testrunner_client()
        self.change_bucket_properties = False
        self.cbas_node = self.input.cbas
        self.cbas_servers = []
        self.kv_servers = []
        self.otpNodes = []
        for server in self.servers:
            if "cbas" in server.services:
                self.cbas_servers.append(server)
            if "kv" in server.services:
                self.kv_servers.append(server)
        if not self.cbas_node and len(self.cbas_servers) >= 1:
            self.cbas_node = self.cbas_servers[0]

        try:
            self.skip_setup_cleanup = self.input.param("skip_setup_cleanup",
                                                       False)
            self.vbuckets = self.input.param("vbuckets", 1024)
            self.upr = self.input.param("upr", None)
            self.index_quota_percent = self.input.param(
                "index_quota_percent", None)
            self.targetIndexManager = self.input.param("targetIndexManager",
                                                       False)
            self.targetMaster = self.input.param("targetMaster", False)
            self.reset_services = self.input.param("reset_services", False)
            self.auth_mech = self.input.param("auth_mech", "PLAIN")
            self.wait_timeout = self.input.param("wait_timeout", 60)
            # number of case that is performed from testrunner( increment each time)
            self.case_number = self.input.param("case_number", 0)
            self.default_bucket = self.input.param("default_bucket", True)
            self.parallelism = self.input.param("parallelism", False)
            if self.default_bucket:
                self.default_bucket_name = "default"
            self.standard_buckets = self.input.param("standard_buckets", 0)
            self.sasl_buckets = self.input.param("sasl_buckets", 0)
            self.num_buckets = self.input.param("num_buckets", 0)
            self.verify_unacked_bytes = self.input.param(
                "verify_unacked_bytes", False)
            self.memcached_buckets = self.input.param("memcached_buckets", 0)
            self.enable_flow_control = self.input.param(
                "enable_flow_control", False)
            self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets + self.memcached_buckets
            self.num_servers = self.input.param("servers", len(self.servers))
            # initial number of items in the cluster
            self.nodes_init = self.input.param("nodes_init", 1)
            self.nodes_in = self.input.param("nodes_in", 1)
            self.nodes_out = self.input.param("nodes_out", 1)
            self.services_init = self.input.param("services_init", None)
            self.services_in = self.input.param("services_in", None)
            self.forceEject = self.input.param("forceEject", False)
            self.force_kill_memcached = TestInputSingleton.input.param(
                'force_kill_memcached', False)
            self.num_items = self.input.param("items", 1000)
            self.value_size = self.input.param("value_size", 512)
            self.dgm_run = self.input.param("dgm_run", False)
            self.active_resident_threshold = int(
                self.input.param("active_resident_threshold", 0))
            # max items number to verify in ValidateDataTask, None - verify all
            self.max_verify = self.input.param("max_verify", None)
            # we don't change consistent_view on server by default
            self.disabled_consistent_view = self.input.param(
                "disabled_consistent_view", None)
            self.rebalanceIndexWaitingDisabled = self.input.param(
                "rebalanceIndexWaitingDisabled", None)
            self.rebalanceIndexPausingDisabled = self.input.param(
                "rebalanceIndexPausingDisabled", None)
            self.maxParallelIndexers = self.input.param(
                "maxParallelIndexers", None)
            self.maxParallelReplicaIndexers = self.input.param(
                "maxParallelReplicaIndexers", None)
            self.quota_percent = self.input.param("quota_percent", None)
            self.port = None
            self.log_message = self.input.param("log_message", None)
            self.log_info = self.input.param("log_info", None)
            self.log_location = self.input.param("log_location", None)
            self.stat_info = self.input.param("stat_info", None)
            self.port_info = self.input.param("port_info", None)
            if not hasattr(self, 'skip_buckets_handle'):
                self.skip_buckets_handle = self.input.param(
                    "skip_buckets_handle", False)
            self.nodes_out_dist = self.input.param("nodes_out_dist", None)
            self.absolute_path = self.input.param("absolute_path", True)
            self.test_timeout = self.input.param(
                "test_timeout", 3600)  # kill hang test and jump to next one.
            self.enable_bloom_filter = self.input.param(
                "enable_bloom_filter", False)
            self.enable_time_sync = self.input.param("enable_time_sync", False)
            self.gsi_type = self.input.param("gsi_type", 'plasma')
            # bucket parameters go here,
            self.bucket_size = self.input.param("bucket_size", None)
            self.bucket_type = self.input.param("bucket_type", 'membase')
            self.num_replicas = self.input.param("replicas", 1)
            self.enable_replica_index = self.input.param("index_replicas", 1)
            self.eviction_policy = self.input.param(
                "eviction_policy", 'valueOnly')  # or 'fullEviction'
            # for ephemeral bucket is can be noEviction or nruEviction
            if self.bucket_type == 'ephemeral' and self.eviction_policy == 'valueOnly':
                # use the ephemeral bucket default
                self.eviction_policy = 'noEviction'

            # for ephemeral buckets it
            self.sasl_password = self.input.param("sasl_password", 'password')
            self.lww = self.input.param(
                "lww", False
            )  # only applies to LWW but is here because the bucket is created here
            self.maxttl = self.input.param("maxttl", None)
            self.compression_mode = self.input.param("compression_mode",
                                                     'passive')
            self.sdk_compression = self.input.param("sdk_compression", True)
            self.sasl_bucket_name = "bucket"
            self.sasl_bucket_priority = self.input.param(
                "sasl_bucket_priority", None)
            self.standard_bucket_priority = self.input.param(
                "standard_bucket_priority", None)
            # end of bucket parameters spot (this is ongoing)

            if self.skip_setup_cleanup:
                self.buckets = BucketHelper(self.master).get_buckets()
                return
            if not self.skip_init_check_cbserver:
                self.cb_version = None
                if RestHelper(RestConnection(
                        self.master)).is_ns_server_running():
                    """ since every new couchbase version, there will be new features
                        that test code will not work on previous release.  So we need
                        to get couchbase version to filter out those tests. """
                    self.cb_version = RestConnection(
                        self.master).get_nodes_version()
                else:
                    log.info("couchbase server does not run yet")
                self.protocol = self.get_protocol_type()
            self.services_map = None
            if self.sasl_bucket_priority is not None:
                self.sasl_bucket_priority = self.sasl_bucket_priority.split(
                    ":")
            if self.standard_bucket_priority is not None:
                self.standard_bucket_priority = self.standard_bucket_priority.split(
                    ":")

            log.info("==============  basetestcase setup was started for test #{0} {1}==============" \
                          .format(self.case_number, self._testMethodName))
            if not self.skip_buckets_handle and not self.skip_init_check_cbserver:
                self._cluster_cleanup()

            shared_params = self._create_bucket_params(
                server=self.master,
                size=self.bucket_size,
                replicas=self.num_replicas,
                enable_replica_index=self.enable_replica_index,
                eviction_policy=self.eviction_policy,
                bucket_priority=None,
                lww=self.lww,
                maxttl=self.maxttl,
                compression_mode=self.compression_mode)

            membase_params = copy.deepcopy(shared_params)
            membase_params['bucket_type'] = 'membase'
            self.bucket_base_params['membase'][
                'non_ephemeral'] = membase_params

            membase_ephemeral_params = copy.deepcopy(shared_params)
            membase_ephemeral_params['bucket_type'] = 'ephemeral'
            self.bucket_base_params['membase'][
                'ephemeral'] = membase_ephemeral_params

            memcached_params = copy.deepcopy(shared_params)
            memcached_params['bucket_type'] = 'memcached'
            self.bucket_base_params['memcached'] = memcached_params

            # avoid any cluster operations in setup for new upgrade
            #  & upgradeXDCR tests
            if str(self.__class__).find('newupgradetests') != -1 or \
                            str(self.__class__).find('upgradeXDCR') != -1 or \
                            str(self.__class__).find('Upgrade_EpTests') != -1 or \
                            hasattr(self, 'skip_buckets_handle') and \
                            self.skip_buckets_handle:
                log.info("any cluster operation in setup will be skipped")
                self.primary_index_created = True
                log.info("==============  basetestcase setup was finished for test #{0} {1} ==============" \
                              .format(self.case_number, self._testMethodName))
                return
            # avoid clean up if the previous test has been tear down
            if self.case_number == 1 or self.case_number > 1000:
                if self.case_number > 1000:
                    log.warn(
                        "teardDown for previous test failed. will retry..")
                    self.case_number -= 1000
                self.cleanup = True
                if not self.skip_init_check_cbserver:
                    self.tearDownEverything()
                self.cluster = Cluster()
            if not self.skip_init_check_cbserver:
                log.info("initializing cluster")
                self.reset_cluster()
                master_services = self.get_services(self.servers[:1], \
                                                    self.services_init, \
                                                    start_node=0)
                if master_services != None:
                    master_services = master_services[0].split(",")

                self.quota = self._initialize_nodes(self.cluster, self.servers, \
                                                    self.disabled_consistent_view, \
                                                    self.rebalanceIndexWaitingDisabled, \
                                                    self.rebalanceIndexPausingDisabled, \
                                                    self.maxParallelIndexers, \
                                                    self.maxParallelReplicaIndexers, \
                                                    self.port, \
                                                    self.quota_percent, \
                                                    services=master_services)

                self.change_env_variables()
                self.change_checkpoint_params()

                # Add built-in user
                if not self.skip_init_check_cbserver:
                    self.add_built_in_server_user(node=self.master)
                log.info("done initializing cluster")
            else:
                self.quota = ""
            if self.input.param("log_info", None):
                self.change_log_info()
            if self.input.param("log_location", None):
                self.change_log_location()
            if self.input.param("stat_info", None):
                self.change_stat_info()
            if self.input.param("port_info", None):
                self.change_port_info()
            if self.input.param("port", None):
                self.port = str(self.input.param("port", None))
            try:
                if (str(self.__class__).find('rebalanceout.RebalanceOutTests') != -1) or \
                        (str(self.__class__).find('memorysanitytests.MemorySanity') != -1) or \
                                str(self.__class__).find('negativetests.NegativeTests') != -1 or \
                                str(self.__class__).find('warmuptest.WarmUpTests') != -1 or \
                                str(self.__class__).find('failover.failovertests.FailoverTests') != -1 or \
                                str(self.__class__).find('observe.observeseqnotests.ObserveSeqNoTests') != -1 or \
                                str(self.__class__).find('epengine.lwwepengine.LWW_EP_Engine') != -1:

                    self.services = self.get_services(self.servers,
                                                      self.services_init)
                    # rebalance all nodes into the cluster before each test
                    self.cluster.rebalance(self.servers[:self.num_servers],
                                           self.servers[1:self.num_servers],
                                           [],
                                           services=self.services)
                elif self.nodes_init > 1 and not self.skip_init_check_cbserver:
                    self.services = self.get_services(
                        self.servers[:self.nodes_init], self.services_init)
                    self.cluster.rebalance(self.servers[:1], \
                                           self.servers[1:self.nodes_init], \
                                           [], services=self.services)
                elif str(self.__class__).find('ViewQueryTests') != -1 and \
                        not self.input.param("skip_rebalance", False):
                    self.services = self.get_services(self.servers,
                                                      self.services_init)
                    self.cluster.rebalance(self.servers,
                                           self.servers[1:], [],
                                           services=self.services)
                self.setDebugLevel(service_type="index")
            except BaseException, e:
                # increase case_number to retry tearDown in setup for the next test
                self.case_number += 1000
                self.fail(e)

            if self.dgm_run:
                self.quota = 256
            if self.total_buckets > 10:
                log.info("================== changing max buckets from 10 to {0} =================" \
                              .format(self.total_buckets))
                self.change_max_buckets(self, self.total_buckets)
            if self.total_buckets > 0 and not self.skip_init_check_cbserver:
                """ from sherlock, we have index service that could take some
                    RAM quota from total RAM quota for couchbase server.  We need
                    to get the correct RAM quota available to create bucket(s)
                    after all services were set """
                node_info = RestConnection(self.master).get_nodes_self()
                if node_info.memoryQuota and int(node_info.memoryQuota) > 0:
                    ram_available = node_info.memoryQuota
                else:
                    ram_available = self.quota
                if self.bucket_size is None:
                    if self.dgm_run:
                        """ if dgm is set,
                            we need to set bucket size to dgm setting """
                        self.bucket_size = self.quota
                    else:
                        self.bucket_size = self._get_bucket_size(ram_available, \
                                                                 self.total_buckets)

            self.bucket_base_params['membase']['non_ephemeral'][
                'size'] = self.bucket_size
            self.bucket_base_params['membase']['ephemeral'][
                'size'] = self.bucket_size
            self.bucket_base_params['memcached']['size'] = self.bucket_size

            if str(self.__class__).find('upgrade_tests') == -1 and \
                            str(self.__class__).find('newupgradetests') == -1:
                self._bucket_creation()
            log.info("==============  basetestcase setup was finished for test #{0} {1} ==============" \
                          .format(self.case_number, self._testMethodName))

            if not self.skip_init_check_cbserver:
                self._log_start(self)
                self.sleep(10)
        except Exception, e:
            traceback.print_exc()
            self.cluster.shutdown(force=True)
            self.fail(e)
class AltAddrBaseTest(BaseTestCase):
    vbucketId = 0

    def setUp(self):
        self.times_teardown_called = 1
        super(AltAddrBaseTest, self).setUp()
        self.r = random.Random()
        self.cluster = Cluster()
        self.clusters_dic = self.input.clusters
        self.client_os = self.input.param("client_os", "linux")
        self.alt_addr_with_xdcr = self.input.param("alt_addr_with_xdcr", False)
        if self.clusters_dic:
            if len(self.clusters_dic) > 1:
                self.dest_nodes = self.clusters_dic[1]
                self.dest_master = self.dest_nodes[0]
            elif len(self.clusters_dic) == 1:
                self.log.error(
                    "=== need 2 cluster to setup xdcr in ini file ===")
            if self.alt_addr_with_xdcr:
                self.des_name = "des_cluster"
                self.delete_xdcr_reference(self.clusters_dic[0][0].ip,
                                           self.clusters_dic[1][0].ip)
                if self.skip_init_check_cbserver:
                    for key in self.clusters_dic.keys():
                        servers = self.clusters_dic[key]
                        try:
                            self.backup_reset_clusters(servers)
                        except:
                            self.log.error(
                                "was not able to cleanup cluster the first time"
                            )
                            self.backup_reset_clusters(servers)
        else:
            self.log.error("**** Cluster config is setup in ini file. ****")

        self.shell = RemoteMachineShellConnection(self.master)
        if not self.skip_init_check_cbserver:
            self.rest = RestConnection(self.master)
            self.cb_version = self.rest.get_nodes_version()

        self.key_gen = self.input.param("key-gen", True)
        self.secure_conn = self.input.param("secure-conn", False)
        self.no_cacert = self.input.param("no-cacert", False)
        self.no_ssl_verify = self.input.param("no-ssl-verify", False)
        self.verify_data = self.input.param("verify-data", False)
        self.debug_logs = self.input.param("debug-logs", False)
        self.should_fail = self.input.param("should-fail", False)
        self.add_hostname_node = self.input.param("add_hostname_node", False)
        self.add_hostname_node_at_src = self.input.param(
            "add_hostname_node_at_src", False)
        self.add_hostname_node_at_des = self.input.param(
            "add_hostname_node_at_des", False)
        self.num_hostname_add = self.input.param("num_hostname_add", 1)
        self.alt_addr_services_in = self.input.param("alt_addr_services_in",
                                                     "kv")
        self.alt_addr_rebalance_out = self.input.param(
            "alt_addr_rebalance_out", False)
        self.alt_addr_rebalance_in = self.input.param("alt_addr_rebalance_in",
                                                      False)
        self.alt_addr_rebalance_in_services = self.input.param(
            "alt_addr_rebalance_in_services", "kv")
        self.alt_addr_use_public_dns = self.input.param(
            "alt_addr_use_public_dns", False)
        self.alt_addr_kv_loader = self.input.param("alt_addr_kv_loader", False)
        self.alt_addr_n1ql_query = self.input.param("alt_addr_n1ql_query",
                                                    False)
        self.alt_addr_eventing_function = self.input.param(
            "alt_addr_eventing_function", False)
        self.alt_addr_fts_loader = self.input.param("alt_addr_fts_loader",
                                                    False)
        self.run_alt_addr_loader = self.input.param("run_alt_addr_loader",
                                                    False)
        self.all_alt_addr_set = False

        info = self.shell.extract_remote_info()
        self.os_version = info.distribution_version.lower()
        self.deliverable_type = info.deliverable_type.lower()
        type = info.type.lower()
        self.excluded_commands = self.input.param("excluded_commands", None)
        self.os = 'linux'
        self.full_v = None
        self.short_v = None
        self.build_number = None
        cmd = 'curl -g {0}:8091/diag/eval -u {1}:{2} '.format(
            self.master.ip, self.master.rest_username,
            self.master.rest_password)
        cmd += '-d "path_config:component_path(bin)."'
        bin_path = check_output(cmd, shell=True)
        bin_path = bin_path.decode()
        if "bin" not in bin_path:
            self.fail("Check if cb server install on %s" % self.master.ip)
        else:
            self.cli_command_path = bin_path.replace('"', '') + "/"
        self.root_path = LINUX_ROOT_PATH
        self.tmp_path = "/tmp/"
        self.tmp_path_raw = "/tmp/"
        self.cmd_ext = ""
        self.src_file = ""
        self.des_file = ""
        self.log_path = LINUX_COUCHBASE_LOGS_PATH
        self.base_cb_path = LINUX_CB_PATH
        """ non root path """
        if self.nonroot:
            self.log_path = "/home/%s%s" % (self.master.ssh_username,
                                            LINUX_COUCHBASE_LOGS_PATH)
            self.base_cb_path = "/home/%s%s" % (self.master.ssh_username,
                                                LINUX_CB_PATH)
            self.root_path = "/home/%s/" % self.master.ssh_username
        if type == 'windows':
            self.os = 'windows'
            self.cmd_ext = ".exe"
            self.root_path = WIN_ROOT_PATH
            self.tmp_path = WIN_TMP_PATH
            self.tmp_path_raw = WIN_TMP_PATH_RAW
            win_format = "C:/Program Files"
            cygwin_format = "/cygdrive/c/Program\ Files"
            if win_format in self.cli_command_path:
                self.cli_command_path = self.cli_command_path.replace(
                    win_format, cygwin_format)
            self.base_cb_path = WIN_CB_PATH
        if info.distribution_type.lower() == 'mac':
            self.os = 'mac'
        self.full_v, self.short_v, self.build_number = self.shell.get_cbversion(
            type)
        self.couchbase_usrname = "%s" % (
            self.input.membase_settings.rest_username)
        self.couchbase_password = "******" % (
            self.input.membase_settings.rest_password)
        self.cb_login_info = "%s:%s" % (self.couchbase_usrname,
                                        self.couchbase_password)
        self.path_type = self.input.param("path_type", None)
        if self.path_type is None:
            self.log.info("Test command with absolute path ")
        elif self.path_type == "local":
            self.log.info("Test command at %s dir " % self.cli_command_path)
            self.cli_command_path = "cd %s; ./" % self.cli_command_path
        self.cli_command = self.input.param("cli_command", None)

        self.start_with_cluster = self.input.param("start_with_cluster", True)
        if str(self.__class__).find(
                'couchbase_clitest.CouchbaseCliTest') == -1:
            if len(self.servers) > 1 and int(
                    self.nodes_init) == 1 and self.start_with_cluster:
                servers_in = [
                    self.servers[i + 1] for i in range(self.num_servers - 1)
                ]
                self.cluster.rebalance(self.servers[:1], servers_in, [])
        for bucket in self.buckets:
            testuser = [{
                'id': bucket.name,
                'name': bucket.name,
                'password': '******'
            }]
            rolelist = [{
                'id': bucket.name,
                'name': bucket.name,
                'roles': 'admin'
            }]
            self.add_built_in_server_user(testuser=testuser, rolelist=rolelist)

    def tearDown(self):
        self.times_teardown_called += 1
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)
        self.clusters_dic = self.input.clusters
        if self.clusters_dic:
            if len(self.clusters_dic) > 1:
                self.dest_nodes = self.clusters_dic[1]
                self.dest_master = self.dest_nodes[0]
                if self.dest_nodes and len(self.dest_nodes) > 1:
                    self.log.info(
                        "======== clean up destination cluster =======")
                    rest = RestConnection(self.dest_nodes[0])
                    rest.remove_all_remote_clusters()
                    rest.remove_all_replications()
                    BucketOperationHelper.delete_all_buckets_or_assert(
                        self.dest_nodes, self)
                    ClusterOperationHelper.cleanup_cluster(self.dest_nodes)
            elif len(self.clusters_dic) == 1:
                self.log.error(
                    "=== need 2 cluster to setup xdcr in ini file ===")
        else:
            self.log.info(
                "**** If run xdcr test, need cluster config is setup in ini file. ****"
            )
        super(AltAddrBaseTest, self).tearDown()

    def _list_compare(self, list1, list2):
        if len(list1) != len(list2):
            return False
        for elem1 in list1:
            found = False
            for elem2 in list2:
                if elem1 == elem2:
                    found = True
                    break
            if not found:
                return False
        return True

    def get_internal_IP(self, server):
        shell = RemoteMachineShellConnection(server)
        internal_IP = shell.get_ip_address()
        internal_IP = [x for x in internal_IP if x != "127.0.0.1"]
        shell.disconnect()
        if internal_IP:
            return internal_IP[0]
        else:
            self.fail("Fail to get internal IP")

    def backup_reset_clusters(self, servers):
        BucketOperationHelper.delete_all_buckets_or_assert(servers, self)
        ClusterOperationHelper.cleanup_cluster(servers, master=servers[0])
        #ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, self)

    def get_external_IP(self, internal_IP):
        found = False
        external_IP = ""
        for server in self.servers:
            internalIP = self.get_internal_IP(server)
            if internal_IP == internalIP:
                found = True
                external_IP = server.ip
                break
        if not found:
            self.fail("Could not find server which matches internal IP")
        else:
            return external_IP

    def setup_xdcr_cluster(self):
        if not self.input.clusters[0] and not self.input.clusters[1]:
            self.fail("This test needs ini set with cluster config")
        self.log.info("Create source cluster")
        self.create_xdcr_cluster(self.input.clusters[0])
        self.log.info("Create destination cluster")
        self.create_xdcr_cluster(self.input.clusters[1])

    def create_xdcr_cluster(self, cluster_servers):
        num_hostname_add = 1
        add_host_name = False
        if self.add_hostname_node_at_src:
            add_host_name = True
        if self.add_hostname_node_at_des:
            add_host_name = True
        shell = RemoteMachineShellConnection(cluster_servers[0])
        services_in = self.alt_addr_services_in
        if "-" in services_in:
            set_services = services_in.split("-")
        else:
            set_services = services_in.split(",")

        for server in cluster_servers[1:]:
            add_node_IP = self.get_internal_IP(server)
            node_services = "kv"
            if len(set_services) == 1:
                node_services = set_services[0]
            elif len(set_services) > 1:
                if len(set_services) == len(cluster_servers):
                    node_services = set_services[i]
                    i += 1
            if add_host_name and num_hostname_add <= self.num_hostname_add:
                add_node_IP = server.ip
                num_hostname_add += 1

            try:
                shell.alt_addr_add_node(main_server=cluster_servers[0],
                                        internal_IP=add_node_IP,
                                        server_add=server,
                                        services=node_services,
                                        cmd_ext=self.cmd_ext)
            except Exception as e:
                if e:
                    self.fail("Error: {0}".format(e))
        rest = RestConnection(cluster_servers[0])
        rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
                       ejectedNodes=[])
        rest.monitorRebalance()

    def create_xdcr_reference(self, src_IP, des_IP):
        cmd = "curl -u Administrator:password "
        cmd += "http://{0}:8091/pools/default/remoteClusters ".format(src_IP)
        cmd += "-d username=Administrator -d password=password "
        cmd += "-d name={0} -d demandEncryption=0 ".format(self.des_name)
        cmd += "-d hostname={0}:8091 ".format(des_IP)

        mesg = "\n **** Create XDCR cluster remote reference from cluster {0} ".format(
            src_IP)
        mesg += "to cluster {0}".format(des_IP)
        self.log.info(mesg)
        print("command to run: {0}".format(cmd))
        try:
            output = check_output(cmd, shell=True, stderr=STDOUT)
        except CalledProcessError as e:
            if e.output:
                self.fail("\n Error: ".format(e.output))

    def delete_xdcr_reference(self, src_IP, des_IP):
        cmd = "curl -X DELETE -u Administrator:password "
        cmd += "http://{0}:8091/pools/default/remoteClusters/{1}".format(
            src_IP, self.des_name)
        print("command to run: {0}".format(cmd))
        try:
            output = check_output(cmd, shell=True, stderr=STDOUT)
        except CalledProcessError as e:
            if e.output:
                self.fail("Error: ".format(e.output))

    def create_xdcr_replication(self, src_IP, des_IP, bucket_name):
        cmd = "curl -X POST -u Administrator:password "
        cmd += "http://{0}:8091/controller/createReplication ".format(src_IP)
        cmd += "-d fromBucket={0} ".format(bucket_name)
        cmd += "-d toCluster={0} ".format(self.des_name)
        cmd += "-d toBucket={0} ".format(bucket_name)
        cmd += "-d replicationType=continuous -d enableCompression=1 "
        print("command to run: {0}".format(cmd))
        try:
            output = check_output(cmd, shell=True, stderr=STDOUT)
            return output
        except CalledProcessError as e:
            if e.output:
                self.fail("Error: ".format(e.output))

    def delete_xdcr_replication(self, src_IP, replication_id):
        replication_id = urllib.quote(replication_id, safe='')
        cmd = "curl -X DELETE -u Administrator:password "
        cmd += " http://{0}:8091/controller/cancelXDCR/{1} ".format(
            src_IP, replication_id)
        print("command to run: {0}".format(cmd))
        try:
            output = check_output(cmd, shell=True, stderr=STDOUT)
        except CalledProcessError as e:
            if e.output:
                self.fail("Error: ".format(e.output))

    def set_xdcr_checkpoint(self, src_IP, check_time):
        cmd = "curl  -u Administrator:password "
        cmd += "http://{0}:8091/settings/replications ".format(src_IP)
        cmd += "-d goMaxProcs=10 "
        cmd += "-d checkpointInterval={0} ".format(check_time)
        print("command to run: {0}".format(cmd))
        try:
            self.log.info(
                "Set xdcr checkpoint to {0} seconds".format(check_time))
            output = check_output(cmd, shell=True, stderr=STDOUT)
        except CalledProcessError as e:
            if e.output:
                self.fail("Error: ".format(e.output))

    def waitForItemCount(self, server, bucket_name, count, timeout=30):
        rest = RestConnection(server)
        for sec in range(timeout):
            items = int(
                rest.get_bucket_json(bucket_name)["basicStats"]["itemCount"])
            if items != count:
                time.sleep(1)
            else:
                return True
        log.info("Waiting for item count to be %d timed out", count)
        return False

    def _check_output(self, word_check, output):
        found = False
        if len(output) >= 1:
            if isinstance(word_check, list):
                for ele in word_check:
                    for x in output:
                        if ele.lower() in x.lower():
                            self.log.info(
                                "Found '{0} in CLI output".format(ele))
                            found = True
                            break
            elif isinstance(word_check, str):
                for x in output:
                    if word_check.lower() in x.lower():
                        self.log.info(
                            "Found '{0}' in CLI output".format(word_check))
                        found = True
                        break
            else:
                self.log.error("invalid {0}".format(word_check))
        return found
Exemplo n.º 6
0
class CliBaseTest(BaseTestCase):
    vbucketId = 0

    def setUp(self):
        self.times_teardown_called = 1
        super(CliBaseTest, self).setUp()
        self.r = random.Random()
        self.vbucket_count = 1024
        self.cluster = Cluster()
        self.clusters_dic = self.input.clusters
        if self.clusters_dic:
            if len(self.clusters_dic) > 1:
                self.dest_nodes = self.clusters_dic[1]
                self.dest_master = self.dest_nodes[0]
            elif len(self.clusters_dic) == 1:
                self.log.error("=== need 2 cluster to setup xdcr in ini file ===")
        else:
            self.log.error("**** Cluster config is setup in ini file. ****")
        self.shell = RemoteMachineShellConnection(self.master)
        if not self.skip_init_check_cbserver:
            self.rest = RestConnection(self.master)
            self.cb_version = self.rest.get_nodes_version()
            """ cli output message """
            self.cli_bucket_create_msg = "SUCCESS: Bucket created"
            self.cli_rebalance_msg = "SUCCESS: Rebalance complete"
            if self.cb_version[:3] == "4.6":
                self.cli_bucket_create_msg = "SUCCESS: bucket-create"
                self.cli_rebalance_msg = "SUCCESS: rebalanced cluster"
        self.import_back = self.input.param("import_back", False)
        if self.import_back:
            if len(self.servers) < 3:
                self.fail("This test needs minimum of 3 vms to run ")
        self.test_type = self.input.param("test_type", "import")
        self.import_file = self.input.param("import_file", None)
        self.imex_type = self.input.param("imex_type", "json")
        self.format_type = self.input.param("format_type", "lines")
        self.import_method = self.input.param("import_method", "file://")
        self.force_failover = self.input.param("force_failover", False)
        self.json_invalid_errors = self.input.param("json-invalid-errors", None)
        self.field_separator = self.input.param("field-separator", "comma")
        self.key_gen = self.input.param("key-gen", True)
        self.skip_docs = self.input.param("skip-docs", None)
        self.limit_docs = self.input.param("limit-docs", None)
        self.limit_rows = self.input.param("limit-rows", None)
        self.skip_rows = self.input.param("skip-rows", None)
        self.omit_empty = self.input.param("omit-empty", None)
        self.infer_types = self.input.param("infer-types", None)
        self.fx_generator = self.input.param("fx-generator", None)
        self.fx_gen_start = self.input.param("fx-gen-start", None)
        self.secure_conn = self.input.param("secure-conn", False)
        self.no_cacert = self.input.param("no-cacert", False)
        self.no_ssl_verify = self.input.param("no-ssl-verify", False)
        self.verify_data = self.input.param("verify-data", False)
        self.field_substitutions = self.input.param("field-substitutions", None)
        self.check_preload_keys = self.input.param("check-preload-keys", True)
        self.debug_logs = self.input.param("debug-logs", False)
        self.should_fail = self.input.param("should-fail", False)
        info = self.shell.extract_remote_info()
        self.os_version = info.distribution_version.lower()
        type = info.type.lower()
        self.excluded_commands = self.input.param("excluded_commands", None)
        self.os = 'linux'
        self.full_v = None
        self.short_v = None
        self.build_number = None
        cmd =  'curl -g %s:8091/diag/eval -u Administrator:password ' % self.master.ip
        cmd += '-d "path_config:component_path(bin)."'
        bin_path  = subprocess.check_output(cmd, shell=True)
        if "bin" not in bin_path:
            self.fail("Check if cb server install on %s" % self.master.ip)
        else:
            self.cli_command_path = bin_path.replace('"','') + "/"
        self.root_path = LINUX_ROOT_PATH
        self.tmp_path = "/tmp/"
        self.tmp_path_raw = "/tmp/"
        self.cmd_backup_path = LINUX_BACKUP_PATH
        self.backup_path = LINUX_BACKUP_PATH
        self.cmd_ext = ""
        self.src_file = ""
        self.des_file = ""
        self.sample_files_path = LINUX_COUCHBASE_SAMPLE_PATH
        self.log_path = LINUX_COUCHBASE_LOGS_PATH
        self.base_cb_path = LINUX_CB_PATH
        """ non root path """
        if self.nonroot:
            self.sample_files_path = "/home/%s%s" % (self.master.ssh_username,
                                                     LINUX_COUCHBASE_SAMPLE_PATH)
            self.log_path = "/home/%s%s" % (self.master.ssh_username,
                                            LINUX_COUCHBASE_LOGS_PATH)
            self.base_cb_path = "/home/%s%s" % (self.master.ssh_username,
                                                LINUX_CB_PATH)
            self.root_path = "/home/%s/" % self.master.ssh_username
        if type == 'windows':
            self.os = 'windows'
            self.cmd_ext = ".exe"
            self.root_path = WIN_ROOT_PATH
            self.tmp_path = WIN_TMP_PATH
            self.tmp_path_raw = WIN_TMP_PATH_RAW
            self.cmd_backup_path = WIN_BACKUP_C_PATH
            self.backup_path = WIN_BACKUP_PATH
            self.sample_files_path = WIN_COUCHBASE_SAMPLE_PATH_C
            self.log_path = WIN_COUCHBASE_LOGS_PATH
            win_format = "C:/Program Files"
            cygwin_format = "/cygdrive/c/Program\ Files"
            if win_format in self.cli_command_path:
                self.cli_command_path = self.cli_command_path.replace(win_format,
                                                                      cygwin_format)
        if info.distribution_type.lower() == 'mac':
            self.os = 'mac'
        self.full_v, self.short_v, self.build_number = self.shell.get_cbversion(type)
        self.couchbase_usrname = "%s" % (self.input.membase_settings.rest_username)
        self.couchbase_password = "******" % (self.input.membase_settings.rest_password)
        self.cb_login_info = "%s:%s" % (self.couchbase_usrname,
                                        self.couchbase_password)
        self.path_type = self.input.param("path_type", None)
        if self.path_type is None:
            self.log.info("Test command with absolute path ")
        elif self.path_type == "local":
            self.log.info("Test command at %s dir " % self.cli_command_path)
            self.cli_command_path = "cd %s; ./" % self.cli_command_path
        self.cli_command = self.input.param("cli_command", None)
        self.command_options = self.input.param("command_options", None)
        if self.command_options is not None:
            self.command_options = self.command_options.split(";")
        if str(self.__class__).find('couchbase_clitest.CouchbaseCliTest') == -1:
            if len(self.servers) > 1 and int(self.nodes_init) == 1:
                servers_in = [self.servers[i + 1] for i in range(self.num_servers - 1)]
                self.cluster.rebalance(self.servers[:1], servers_in, [])
        for bucket in self.buckets:
            testuser = [{'id': bucket.name, 'name': bucket.name, 'password': '******'}]
            rolelist = [{'id': bucket.name, 'name': bucket.name, 'roles': 'admin'}]
            self.add_built_in_server_user(testuser=testuser, rolelist=rolelist)


    def tearDown(self):
        if not self.input.param("skip_cleanup", True):
            if self.times_teardown_called > 1 :
                self.shell.disconnect()
        if self.input.param("skip_cleanup", True):
            if self.case_number > 1 or self.times_teardown_called > 1:
                self.shell.disconnect()
        self.times_teardown_called += 1
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)
        zones = rest.get_zone_names()
        for zone in zones:
            if zone != "Group 1":
                rest.delete_zone(zone)
        self.clusters_dic = self.input.clusters
        if self.clusters_dic:
            if len(self.clusters_dic) > 1:
                self.dest_nodes = self.clusters_dic[1]
                self.dest_master = self.dest_nodes[0]
                if self.dest_nodes and len(self.dest_nodes) > 1:
                    self.log.info("======== clean up destination cluster =======")
                    rest = RestConnection(self.dest_nodes[0])
                    rest.remove_all_remote_clusters()
                    rest.remove_all_replications()
                    BucketOperationHelper.delete_all_buckets_or_assert(self.dest_nodes, self)
                    ClusterOperationHelper.cleanup_cluster(self.dest_nodes)
            elif len(self.clusters_dic) == 1:
                self.log.error("=== need 2 cluster to setup xdcr in ini file ===")
        else:
            self.log.info("**** If run xdcr test, need cluster config is setup in ini file. ****")
        super(CliBaseTest, self).tearDown()


    """ in sherlock, there is an extra value called runCmd in the 1st element """
    def del_runCmd_value(self, output):
        if "runCmd" in output[0]:
            output = output[1:]
        return output

    def verifyCommandOutput(self, output, expect_error, message):
        """Inspects each line of the output and checks to see if the expected error was found

        Options:
        output - A list of output lines
        expect_error - Whether or not the command should have succeeded or failed
        message - The success or error message

        Returns a boolean indicating whether or not the error/success message was found in the output
        """
        if expect_error:
            for line in output:
                if line == "ERROR: " + message:
                    return True
            log.info("Did not receive expected error message `ERROR: %s`", message)
            return False
        else:
            for line in output:
                if line == "SUCCESS: " + message:
                    return True
            log.info("Did not receive expected success message `SUCCESS: %s`", message)
            return False

    def verifyWarningOutput(self, output, message):
        for line in output:
            if line == "WARNING: " + message:
                return True
        log.info("Did not receive expected error message `WARNING: %s`", message)
        return False

    def verifyServices(self, server, expected_services):
        """Verifies that the services on a given node match the expected service

            Options:
            server - A TestInputServer object of the server to connect to
            expected_services - A comma separated list of services

            Returns a boolean corresponding to whether or not the expected services
            are available on the server.
        """
        rest = RestConnection(server)
        hostname = "%s:%s" % (server.ip, server.port)
        expected_services = expected_services.replace("data", "kv")
        expected_services = expected_services.replace("query", "n1ql")
        expected_services = expected_services.split(",")

        nodes_services = rest.get_nodes_services()
        for node, services in nodes_services.iteritems():
            if node.encode('ascii') == hostname:
                if len(services) != len(expected_services):
                    log.info("Services on %s do not match expected services (%s vs. %s)",
                             hostname, services, expected_services)
                    return False
                for service in services:
                    if service.encode("ascii") not in expected_services:
                        log.info("Services on %s do not match expected services (%s vs. %s)",
                                 hostname, services, expected_services)
                        return False
                return True

        log.info("Services on %s not found, the server may not exist", hostname)
        return False

    def verifyRamQuotas(self, server, data, index, fts):
        """Verifies that the RAM quotas for each service are set properly

        Options:
        server - A TestInputServer object of the server to connect to
        data - An int containing the data service RAM quota, None will skip the check
        index - An int containing the index service RAM quota, None will skip the check
        fts - An int containing the FTS service RAM quota, None will skip the check

        Returns a boolean corresponding to whether or not the RAM quotas were set properly
        """
        rest = RestConnection(server)
        settings = rest.get_pools_default()
        if data:
            if "memoryQuota" not in settings:
                log.info("Unable to get data service ram quota")
                return False
            if int(settings["memoryQuota"]) != int(data):
                log.info("Data service memory quota does not match (%d vs %d)",
                         int(settings["memoryQuota"]), int(data))
                return False

        if index:
            if "indexMemoryQuota" not in settings:
                log.info("Unable to get index service ram quota")
                return False
            if int(settings["indexMemoryQuota"]) != int(index):
                log.info(
                    "Index service memory quota does not match (%d vs %d)",
                    int(settings["indexMemoryQuota"]), int(index))
                return False

        if fts:
            if "ftsMemoryQuota" not in settings:
                log.info("Unable to get fts service ram quota")
                return False
            if int(settings["ftsMemoryQuota"]) != int(fts):
                log.info("FTS service memory quota does not match (%d vs %d)",
                         int(settings["ftsMemoryQuota"]), int(fts))
                return False

        return True

    def verifyBucketSettings(self, server, bucket_name, bucket_type, memory_quota,
                             eviction_policy, replica_count, enable_index_replica,
                             priority, enable_flush):
        rest = RestConnection(server)
        result = rest.get_bucket_json(bucket_name)

        if bucket_type == "couchbase":
            bucket_type = "membase"

        if bucket_type is not None and bucket_type != result["bucketType"]:
            log.info("Memory quota does not match (%s vs %s)", bucket_type,
                     result["bucketType"])
            return False

        quota = result["quota"]["rawRAM"] / 1024 / 1024
        if memory_quota is not None and memory_quota != quota:
            log.info("Bucket quota does not match (%s vs %s)", memory_quota,
                     quota)
            return False

        if eviction_policy is not None and eviction_policy != result[
            "evictionPolicy"]:
            log.info("Eviction policy does not match (%s vs %s)",
                     eviction_policy, result["evictionPolicy"])
            return False

        if replica_count is not None and replica_count != result[
            "replicaNumber"]:
            log.info("Replica count does not match (%s vs %s)", replica_count,
                     result["replicaNumber"])
            return False

        if enable_index_replica == 1:
            enable_index_replica = True
        elif enable_index_replica == 0:
            enable_index_replica = False

        if enable_index_replica is not None and enable_index_replica != result[
            "replicaIndex"]:
            log.info("Replica index enabled does not match (%s vs %s)",
                     enable_index_replica, result["replicaIndex"])
            return False

        if priority == "high":
            priority = 8
        elif priority == "low":
            priority = 3

        if priority is not None and priority != result["threadsNumber"]:
            log.info("Bucket priority does not match (%s vs %s)", priority,
                     result["threadsNumber"])
            return False

        if enable_flush is not None:
            if enable_flush == 1 and "flush" not in result["controllers"]:
                log.info("Bucket flush is not enabled, but it should be")
                return False
            elif enable_flush == 0 and "flush" in result["controllers"]:
                log.info("Bucket flush is not enabled, but it should be")
                return False

        return True

    def verifyContainsBucket(self, server, name):
        rest = RestConnection(server)
        buckets = rest.get_buckets()

        for bucket in buckets:
            if bucket.name == name:
                return True
        return False

    def verifyClusterName(self, server, name):
        rest = RestConnection(server)
        settings = rest.get_pools_default("waitChange=0")

        if name is None:
            name = ""

        if "clusterName" not in settings:
            log.info("Unable to get cluster name from server")
            return False
        if settings["clusterName"] != name:
            log.info("Cluster name does not match (%s vs %s)",
                     settings["clusterName"], name)
            return False

        return True

    def isClusterInitialized(self, server):
        """Checks whether or not the server is initialized

        Options:
        server - A TestInputServer object of the server to connect to

        Checks to see whether or not the default pool was created in order to
        determine whether or no the server was initialized. Returns a boolean value
        to indicate initialization.
        """
        rest = RestConnection(server)
        settings = rest.get_pools_info()
        if "pools" in settings and len(settings["pools"]) > 0:
            return True

        return False

    def verifyNotificationsEnabled(self, server):
        rest = RestConnection(server)
        enabled = rest.get_notifications()
        if enabled:
            return True
        return False

    def verifyIndexSettings(self, server, max_rollbacks, stable_snap_interval,
                            mem_snap_interval,
                            storage_mode, threads, log_level):
        rest = RestConnection(server)
        settings = rest.get_global_index_settings()

        if storage_mode == "default":
            storage_mode = "plasma"
        elif storage_mode == "memopt":
            storage_mode = "memory_optimized"

        if max_rollbacks and str(settings["maxRollbackPoints"]) != str(
                max_rollbacks):
            log.info("Max rollbacks does not match (%s vs. %s)",
                     str(settings["maxRollbackPoints"]), str(max_rollbacks))
            return False
        if stable_snap_interval and str(
                settings["stableSnapshotInterval"]) != str(
                stable_snap_interval):
            log.info("Stable snapshot interval does not match (%s vs. %s)",
                     str(settings["stableSnapshotInterval"]),
                     str(stable_snap_interval))
            return False
        if mem_snap_interval and str(
                settings["memorySnapshotInterval"]) != str(mem_snap_interval):
            log.info("Memory snapshot interval does not match (%s vs. %s)",
                     str(settings["memorySnapshotInterval"]),
                     str(mem_snap_interval))
            return False
        if storage_mode and str(settings["storageMode"]) != str(storage_mode):
            log.info("Storage mode does not match (%s vs. %s)",
                     str(settings["storageMode"]), str(storage_mode))
            return False
        if threads and str(settings["indexerThreads"]) != str(threads):
            log.info("Threads does not match (%s vs. %s)",
                     str(settings["indexerThreads"]), str(threads))
            return False
        if log_level and str(settings["logLevel"]) != str(log_level):
            log.info("Log level does not match (%s vs. %s)",
                     str(settings["logLevel"]), str(log_level))
            return False

        return True

    def verifyAutofailoverSettings(self, server, enabled, timeout):
        rest = RestConnection(server)
        settings = rest.get_autofailover_settings()

        if enabled and not ((str(enabled) == "1" and settings.enabled) or (
                str(enabled) == "0" and not settings.enabled)):
            log.info("Enabled does not match (%s vs. %s)", str(enabled),
                     str(settings.enabled))
            return False
        if timeout and str(settings.timeout) != str(timeout):
            log.info("Timeout does not match (%s vs. %s)", str(timeout),
                     str(settings.timeout))
            return False

        return True

    def verifyAutoreprovisionSettings(self, server, enabled, max_nodes):
        rest = RestConnection(server)
        settings = rest.get_autoreprovision_settings()

        if enabled and not ((str(enabled) == "1" and settings.enabled) or (
                str(enabled) == "0" and not settings.enabled)):
            log.info("Enabled does not match (%s vs. %s)", str(max_nodes),
                     str(settings.enabled))
            return False
        if max_nodes and str(settings.max_nodes) != str(max_nodes):
            log.info("max_nodes does not match (%s vs. %s)", str(max_nodes),
                     str(settings.max_nodes))
            return False

        return True

    def verifyAuditSettings(self, server, enabled, log_path, rotate_interval):
        rest = RestConnection(server)
        settings = rest.getAuditSettings()

        if enabled and not (
            (str(enabled) == "1" and settings["auditdEnabled"]) or (
                str(enabled) == "0" and not settings["auditdEnabled"])):
            log.info("Enabled does not match (%s vs. %s)", str(enabled),
                     str(settings["auditdEnabled"]))
            return False
        if log_path and str(str(settings["logPath"])) != str(log_path):
            log.info("Log path does not match (%s vs. %s)", str(log_path),
                     str(settings["logPath"]))
            return False

        if rotate_interval and str(str(settings["rotateInterval"])) != str(
                rotate_interval):
            log.info("Rotate interval does not match (%s vs. %s)",
                     str(rotate_interval), str(settings["rotateInterval"]))
            return False

        return True

    def verifyPendingServer(self, server, server_to_add, group_name, services):
        rest = RestConnection(server)
        settings = rest.get_all_zones_info()
        if not settings or "groups" not in settings:
            log.info("Group settings payload appears to be invalid")
            return False

        expected_services = services.replace("data", "kv")
        expected_services = expected_services.replace("query", "n1ql")
        expected_services = expected_services.split(",")

        for group in settings["groups"]:
            for node in group["nodes"]:
                if node["hostname"] == server_to_add:
                    if node["clusterMembership"] != "inactiveAdded":
                        log.info("Node `%s` not in pending status",
                                 server_to_add)
                        return False

                    if group["name"] != group_name:
                        log.info("Node `%s` not in correct group (%s vs %s)",
                                 node["hostname"], group_name,
                                 group["name"])
                        return False

                    if len(node["services"]) != len(expected_services):
                        log.info("Services do not match on %s (%s vs %s) ",
                                 node["hostname"], services,
                                 ",".join(node["services"]))
                        return False

                    for service in node["services"]:
                        if service not in expected_services:
                            log.info("Services do not match on %s (%s vs %s) ",
                                     node["hostname"], services,
                                     ",".join(node["services"]))
                            return False
                    return True

        log.info("Node `%s` not found in nodes list", server_to_add)
        return False

    def verifyPendingServerDoesNotExist(self, server, server_to_add):
        rest = RestConnection(server)
        settings = rest.get_all_zones_info()
        if not settings or "groups" not in settings:
            log.info("Group settings payload appears to be invalid")
            return False

        for group in settings["groups"]:
            for node in group["nodes"]:
                if node["hostname"] == server_to_add:
                    return False

        log.info("Node `%s` not found in nodes list", server_to_add)
        return True

    def verifyActiveServers(self, server, expected_num_servers):
        return self._verifyServersByStatus(server, expected_num_servers,
                                           "active")

    def verifyFailedServers(self, server, expected_num_servers):
        return self._verifyServersByStatus(server, expected_num_servers,
                                           "inactiveFailed")

    def _verifyServersByStatus(self, server, expected_num_servers, status):
        rest = RestConnection(server)
        settings = rest.get_pools_default()

        count = 0
        for node in settings["nodes"]:
            if node["clusterMembership"] == status:
                count += 1

        return count == expected_num_servers

    def verifyRecoveryType(self, server, recovery_servers, recovery_type):
        rest = RestConnection(server)
        settings = rest.get_all_zones_info()
        if not settings or "groups" not in settings:
            log.info("Group settings payload appears to be invalid")
            return False

        if not recovery_servers:
            return True

        num_found = 0
        recovery_servers = recovery_servers.split(",")
        for group in settings["groups"]:
            for node in group["nodes"]:
                for rs in recovery_servers:
                    if node["hostname"] == rs:
                        if node["recoveryType"] != recovery_type:
                            log.info(
                                "Node %s doesn't contain recovery type %s ",
                                rs, recovery_type)
                            return False
                        else:
                            num_found = num_found + 1

        if num_found == len(recovery_servers):
            return True

        log.info("Node `%s` not found in nodes list",
                 ",".join(recovery_servers))
        return False

    def verifyUserRoles(self, server, username, roles):
        rest = RestConnection(server)
        status, content, header = rbacmain(server)._retrieve_user_roles()
        content = json.loads(content)
        temp = rbacmain()._parse_get_user_response(content, username, username, roles)
        return temp

    def verifyLdapSettings(self, server, admins, ro_admins, default, enabled):
        rest = RestConnection(server)
        settings = rest.ldapRestOperationGetResponse()

        if admins is None:
            admins = []
        else:
            admins = admins.split(",")

        if ro_admins is None:
            ro_admins = []
        else:
            ro_admins = ro_admins.split(",")

        if str(enabled) == "0":
            admins = []
            ro_admins = []

        if default == "admins" and str(enabled) == "1":
            if settings["admins"] != "asterisk":
                log.info("Admins don't match (%s vs asterisk)",
                         settings["admins"])
                return False
        elif not self._list_compare(settings["admins"], admins):
            log.info("Admins don't match (%s vs %s)", settings["admins"],
                     admins)
            return False

        if default == "roadmins" and str(enabled) == "1":
            if settings["roAdmins"] != "asterisk":
                log.info("Read only admins don't match (%s vs asterisk)",
                         settings["roAdmins"])
                return False
        elif not self._list_compare(settings["roAdmins"], ro_admins):
            log.info("Read only admins don't match (%s vs %s)",
                     settings["roAdmins"], ro_admins)
            return False

        return True

    def verifyAlertSettings(self, server, enabled, email_recipients,
                            email_sender, email_username, email_password,
                            email_host,
                            email_port, encrypted, alert_af_node,
                            alert_af_max_reached, alert_af_node_down,
                            alert_af_small,
                            alert_af_disable, alert_ip_changed,
                            alert_disk_space, alert_meta_overhead,
                            alert_meta_oom,
                            alert_write_failed, alert_audit_dropped):
        rest = RestConnection(server)
        settings = rest.get_alerts_settings()
        print settings

        if not enabled:
            if not settings["enabled"]:
                return True
            else:
                log.info("Alerts should be disabled")
                return False

        if encrypted is None or encrypted == "0":
            encrypted = False
        else:
            encrypted = True

        if email_recipients is not None and not self._list_compare(
                email_recipients.split(","), settings["recipients"]):
            log.info("Email recipients don't match (%s vs %s)",
                     email_recipients.split(","), settings["recipients"])
            return False

        if email_sender is not None and email_sender != settings["sender"]:
            log.info("Email sender does not match (%s vs %s)", email_sender,
                     settings["sender"])
            return False

        if email_username is not None and email_username != \
                settings["emailServer"]["user"]:
            log.info("Email username does not match (%s vs %s)",
                     email_username, settings["emailServer"]["user"])
            return False

        if email_host is not None and email_host != settings["emailServer"][
            "host"]:
            log.info("Email host does not match (%s vs %s)", email_host,
                     settings["emailServer"]["host"])
            return False

        if email_port is not None and email_port != settings["emailServer"][
            "port"]:
            log.info("Email port does not match (%s vs %s)", email_port,
                     settings["emailServer"]["port"])
            return False

        if encrypted is not None and encrypted != settings["emailServer"][
            "encrypt"]:
            log.info("Email encryption does not match (%s vs %s)", encrypted,
                     settings["emailServer"]["encrypt"])
            return False

        alerts = list()
        if alert_af_node:
            alerts.append('auto_failover_node')
        if alert_af_max_reached:
            alerts.append('auto_failover_maximum_reached')
        if alert_af_node_down:
            alerts.append('auto_failover_other_nodes_down')
        if alert_af_small:
            alerts.append('auto_failover_cluster_too_small')
        if alert_af_disable:
            alerts.append('auto_failover_disabled')
        if alert_ip_changed:
            alerts.append('ip')
        if alert_disk_space:
            alerts.append('disk')
        if alert_meta_overhead:
            alerts.append('overhead')
        if alert_meta_oom:
            alerts.append('ep_oom_errors')
        if alert_write_failed:
            alerts.append('ep_item_commit_failed')
        if alert_audit_dropped:
            alerts.append('audit_dropped_events')

        if not self._list_compare(alerts, settings["alerts"]):
            log.info("Alerts don't match (%s vs %s)", alerts,
                     settings["alerts"])
            return False

        return True

    def verify_node_settings(self, server, data_path, index_path, hostname):
        rest = RestConnection(server)
        node_settings = rest.get_nodes_self()

        if data_path != node_settings.storage[0].path:
            log.info("Data path does not match (%s vs %s)", data_path,
                     node_settings.storage[0].path)
            return False
        if index_path != node_settings.storage[0].index_path:
            log.info("Index path does not match (%s vs %s)", index_path,
                     node_settings.storage[0].index_path)
            return False
        if hostname is not None:
            if hostname != node_settings.hostname:
                log.info("Hostname does not match (%s vs %s)", hostname,
                         node_settings.hostname)
                return True
        return True

    def verifyCompactionSettings(self, server, db_frag_perc, db_frag_size,
                                 view_frag_perc, view_frag_size, from_period,
                                 to_period, abort_outside, parallel_compact,
                                 purgeInt):
        rest = RestConnection(server)
        settings = rest.get_auto_compaction_settings()
        ac = settings["autoCompactionSettings"]

        if db_frag_perc is not None and str(db_frag_perc) != str(
                ac["databaseFragmentationThreshold"]["percentage"]):
            log.info("DB frag perc does not match (%s vs %s)",
                     str(db_frag_perc),
                     str(ac["databaseFragmentationThreshold"]["percentage"]))
            return False

        if db_frag_size is not None and str(db_frag_size * 1024 ** 2) != str(
                ac["databaseFragmentationThreshold"]["size"]):
            log.info("DB frag size does not match (%s vs %s)",
                     str(db_frag_size * 1024 ** 2),
                     str(ac["databaseFragmentationThreshold"]["size"]))
            return False

        if view_frag_perc is not None and str(view_frag_perc) != str(
                ac["viewFragmentationThreshold"]["percentage"]):
            log.info("View frag perc does not match (%s vs %s)",
                     str(view_frag_perc),
                     str(ac["viewFragmentationThreshold"]["percentage"]))
            return False

        if view_frag_size is not None and str(
                        view_frag_size * 1024 ** 2) != str(
                ac["viewFragmentationThreshold"]["size"]):
            log.info("View frag size does not match (%s vs %s)",
                     str(view_frag_size * 1024 ** 2),
                     str(ac["viewFragmentationThreshold"]["size"]))
            return False

        print from_period, to_period
        if from_period is not None:
            fromHour, fromMin = from_period.split(":", 1)
            if int(fromHour) != int(ac["allowedTimePeriod"]["fromHour"]):
                log.info("From hour does not match (%s vs %s)", str(fromHour),
                         str(ac["allowedTimePeriod"]["fromHour"]))
                return False
            if int(fromMin) != int(ac["allowedTimePeriod"]["fromMinute"]):
                log.info("From minute does not match (%s vs %s)", str(fromMin),
                         str(ac["allowedTimePeriod"]["fromMinute"]))
                return False

        if to_period is not None:
            toHour, toMin = to_period.split(":", 1)
            if int(toHour) != int(ac["allowedTimePeriod"]["toHour"]):
                log.info("To hour does not match (%s vs %s)", str(toHour),
                         str(ac["allowedTimePeriod"]["toHour"]))
                return False
            if int(toMin) != int(ac["allowedTimePeriod"]["toMinute"]):
                log.info("To minute does not match (%s vs %s)", str(toMin),
                         str(ac["allowedTimePeriod"]["toMinute"]))
                return False

        if str(abort_outside) == "1":
            abort_outside = True
        elif str(abort_outside) == "0":
            abort_outside = False

        if abort_outside is not None and abort_outside != \
                ac["allowedTimePeriod"]["abortOutside"]:
            log.info("Abort outside does not match (%s vs %s)", abort_outside,
                     ac["allowedTimePeriod"]["abortOutside"])
            return False

        if str(parallel_compact) == "1":
            parallel_compact = True
        elif str(parallel_compact) == "0":
            parallel_compact = False

        if parallel_compact is not None and parallel_compact != ac[
            "parallelDBAndViewCompaction"]:
            log.info("Parallel compact does not match (%s vs %s)",
                     str(parallel_compact),
                     str(ac["parallelDBAndViewCompaction"]))
            return False

        if purgeInt is not None and str(purgeInt) != str(
                settings["purgeInterval"]):
            log.info("Purge interval does not match (%s vs %s)", str(purgeInt),
                     str(settings["purgeInterval"]))
            return False

        return True

    def verify_gsi_compact_settings(self, compact_mode, compact_percent,
                                    compact_interval,
                                    from_period, to_period, enable_abort):
        rest = RestConnection(self.master)
        settings = rest.get_auto_compaction_settings()
        ac = settings["autoCompactionSettings"]["indexFragmentationThreshold"]
        cc = settings["autoCompactionSettings"]["indexCircularCompaction"]
        if compact_mode is not None:
            if compact_mode == "append":
                self.log.info("append compactino settings %s " % ac)
                if compact_percent is not None and \
                                compact_percent != ac["percentage"]:
                    raise Exception(
                        "setting percent does not match.  Set: %s vs %s :Actual"
                        % (compact_percent, ac["percentage"]))
            if compact_mode == "circular":
                self.log.info("circular compaction settings %s " % cc)
                if enable_abort and not cc["interval"]["abortOutside"]:
                    raise Exception("setting enable abort failed")
                if compact_interval is not None:
                    if compact_interval != cc["daysOfWeek"]:
                        raise Exception(
                            "Failed to set compaction on %s " % compact_interval)
                    elif from_period is None and int(
                            cc["interval"]["fromHour"]) != 0 and \
                                    int(cc["interval"]["fromMinute"]) != 0:
                        raise Exception(
                            "fromHour and fromMinute should be zero")
                if compact_interval is None:
                    if (from_period != str(cc["interval"][
                                                    "fromHour"]) + ":" + str(
                                cc["interval"]["fromMinute"])) \
                    and (to_period != str(cc["interval"]["toHour"]) + ":" + str(
                                cc["interval"]["toMinute"])):
                        raise Exception(
                            "fromHour and fromMinute do not set correctly")
        return True

    def verifyGroupExists(self, server, name):
        rest = RestConnection(server)
        groups = rest.get_zone_names()
        print groups

        for gname, _ in groups.iteritems():
            if name == gname:
                return True

        return False

    def _list_compare(self, list1, list2):
        if len(list1) != len(list2):
            return False
        for elem1 in list1:
            found = False
            for elem2 in list2:
                if elem1 == elem2:
                    found = True
                    break
            if not found:
                return False
        return True

    def waitForItemCount(self, server, bucket_name, count, timeout=30):
        rest = RestConnection(server)
        for sec in range(timeout):
            items = int(
                rest.get_bucket_json(bucket_name)["basicStats"]["itemCount"])
            if items != count:
                time.sleep(1)
            else:
                return True
        log.info("Waiting for item count to be %d timed out", count)
        return False