Esempio n. 1
0
    def set_metadata_purge_interval(self, value, buckets=[], node=None):
        self.log.info(
            "Changing the bucket properties by changing {0} to {1}".format(
                "purge_interval", value))
        if not buckets:
            buckets = self.buckets
        if node is None:
            node = self.cluster.master
        rest = RestConnection(node)

        shell = RemoteMachineShellConnection(node)
        shell.enable_diag_eval_on_non_local_hosts()
        shell.disconnect()

        for bucket in buckets:
            cmd = '{ok, BC} = ns_bucket:get_bucket(' \
                  '"%s"), BC2 = lists:keyreplace(purge_interval, ' \
                  '1, BC, {purge_interval, %f})' \
                  ', ns_bucket:set_bucket_config("%s", BC2).' \
                  % (bucket.name, value, bucket.name)
            rest.diag_eval(cmd)

        # Restart Memcached in all cluster nodes to reflect the settings
        for server in self.cluster_util.get_kv_nodes(master=node):
            shell = RemoteMachineShellConnection(server)
            shell.restart_couchbase()
            shell.disconnect()

        # Check bucket-warm_up after Couchbase restart
        retry_count = 10
        buckets_warmed_up = self.bucket_util.is_warmup_complete(
            buckets, retry_count)
        if not buckets_warmed_up:
            self.log.critical("Few bucket(s) not warmed up "
                              "within expected time")
Esempio n. 2
0
    def _induce_rebalance_test_condition(self, test_failure_condition):
        if test_failure_condition == "verify_replication":
            set_command = "testconditions:set(verify_replication, {fail, \"" + "default" + "\"})"
        elif test_failure_condition == "backfill_done":
            set_command = "testconditions:set(backfill_done, {for_vb_move, \"" + "default\", 1 , " + "fail})"
        else:
            set_command = "testconditions:set({0}, fail)".format(
                test_failure_condition)
        get_command = "testconditions:get({0})".format(test_failure_condition)
        for server in self.servers:
            rest = RestConnection(server)
            shell = RemoteMachineShellConnection(server)
            shell.enable_diag_eval_on_non_local_hosts()
            _, content = rest.diag_eval(set_command)
            self.log.debug("Set Command : {0} Return : {1}".format(
                set_command, content))
            shell.disconnect()

        for server in self.servers:
            rest = RestConnection(server)
            shell = RemoteMachineShellConnection(server)
            shell.enable_diag_eval_on_non_local_hosts()
            _, content = rest.diag_eval(get_command)
            self.log.info("Command : {0} Return : {1}".format(
                get_command, content))
Esempio n. 3
0
    def update_master(self, node_in_cluster=None):
        """
        Update the master of the cluster with the orchestrator of the
        cluster.
        :param node_in_cluster: Any node that is still part of the
        cluster. Note that we need to enable diag_eval on non local
        hosts first before running this method.
        :return:
        """
        if node_in_cluster is None:
            node_in_cluster = self.master

        # Enable diag_eval outside localhost
        shell = RemoteMachineShellConnection(node_in_cluster)
        shell.enable_diag_eval_on_non_local_hosts()
        shell.disconnect()

        rest = RestConnection(node_in_cluster)
        command = "mb_master:master_node()."
        status, content = rest.diag_eval(command)

        master_ip = content.split("@")[1].replace("\\", '').replace(
            "'", "")
        self.master = [server for server in self.servers if server.ip ==
                       master_ip][0]

        return status, content
Esempio n. 4
0
    def test_multiple_backups_merges(self):
        self.log.info("*** start to load items to all buckets")
        self.expected_error = self.input.param("expected_error", None)
        if int(self.active_resident_threshold) > 0:
            self.log.info("Disable compaction to speed up dgm")
            RestConnection(self.master).disable_auto_compaction()
        if self.expires:
            for bucket in self.buckets:
                cb = self._get_python_sdk_client(self.master.ip, bucket,
                                                 self.backupset.cluster_host)
                for i in range(1, self.num_items + 1):
                    cb.upsert("doc" + str(i), {"key": "value"})
        else:
            self._load_all_buckets(self.master, self.initial_load_gen,
                                   "create", self.expires)
        self.log.info("*** done to load items to all buckets")
        self.backup_create_validate()
        for i in range(1, self.number_of_repeats + 1):
            self.do_backup_merge_actions()
        start = self.number_of_backups_taken
        end = self.number_of_backups_taken
        if self.backup_corrupted:
            self.log.info(
                "Stop restore due to backup files corrupted as intended")
            return
        if self.reset_restore_cluster:
            self.log.info("*** start to reset cluster")
            self.backup_reset_clusters(self.cluster_to_restore)
            if self.same_cluster:
                self._initialize_nodes(Cluster(),
                                       self.servers[:self.nodes_init])
            else:
                shell = RemoteMachineShellConnection(self.input.clusters[0][0])
                shell.enable_diag_eval_on_non_local_hosts()
                shell.disconnect()
                rest = RestConnection(self.input.clusters[0][0])
                rest.force_eject_node()
                master_services = self.get_services(
                    [self.backupset.cluster_host],
                    self.services_init,
                    start_node=0)
                info = rest.get_nodes_self()
                if info.memoryQuota and int(info.memoryQuota) > 0:
                    self.quota = info.memoryQuota
                rest.init_node()
            self.log.info("Done reset cluster")
        self.sleep(10)
        """ Add built-in user cbadminbucket to second cluster """
        self.add_built_in_server_user(
            node=self.input.clusters[0][:self.nodes_init][0])

        self.backupset.start = start
        self.backupset.end = end
        self.log.info("*** start restore validation")
        self.backup_restore_validate(compare_uuid=False,
                                     seqno_compare_function=">=",
                                     expected_error=self.expected_error)
Esempio n. 5
0
 def delete_rebalance_test_condition(self, test_failure_condition):
     delete_command = "testconditions:delete({0})".format(test_failure_condition)
     for server in self.servers:
         rest = RestConnection(server)
         shell = RemoteMachineShellConnection(server)
         shell.enable_diag_eval_on_non_local_hosts()
         _, content = rest.diag_eval(delete_command)
         self.log.debug("Delete Command : {0} Return : {1}".format(delete_command, content))
         shell.disconnect()
Esempio n. 6
0
 def setUp(self):
     super(CBTransferTests, self).setUp()
     self.origin_buckets = list(self.buckets)
     self.master = self.server_recovery
     self.add_built_in_server_user(node=self.master)
     shell = RemoteMachineShellConnection(self.master)
     shell.enable_diag_eval_on_non_local_hosts()
     shell.disconnect()
     self._bucket_creation()
     self.buckets = list(self.origin_buckets)
Esempio n. 7
0
    def _delete_rebalance_test_condition(self, test_failure_condition):
        if test_failure_condition.startswith("delay_"):
            test_failure_condition = test_failure_condition[6:]

        delete_command = "testconditions:delete(%s)" % test_failure_condition
        get_command = "testconditions:get(%s)" % test_failure_condition
        for server in self.servers:
            rest = RestConnection(server)
            shell = RemoteMachineShellConnection(server)
            shell.enable_diag_eval_on_non_local_hosts()
            _, content = rest.diag_eval(delete_command)
            self.log.info("Command: %s, Return: %s" %
                          (delete_command, content))

        for server in self.servers:
            rest = RestConnection(server)
            shell = RemoteMachineShellConnection(server)
            shell.enable_diag_eval_on_non_local_hosts()
            _, content = rest.diag_eval(get_command)
            self.log.info("Command: %s, Return: %s" % (get_command, content))
 def check_for_panic_and_mini_dumps(self, servers):
     panic_str = "panic"
     panic_count = 0
     for server in servers:
         shell = RemoteMachineShellConnection(server)
         output, error = shell.enable_diag_eval_on_non_local_hosts()
         if output is not None:
             if "ok" not in output:
                 self.log.error(
                     "Error in enabling diag/eval on non-local hosts on {}: Error: {}"
                     .format(server.ip, error))
             else:
                 self.log.debug(
                     "Enabled diag/eval for non-local hosts from {}".format(
                         server.ip))
         else:
             self.log.debug(
                 "Running in compatibility mode, not enabled diag/eval for non-local hosts"
             )
         _, dir_name = RestConnection(server).diag_eval(
             'filename:absname(element(2, application:get_env(ns_server,error_logger_mf_dir))).'
         )
         log = str(dir_name) + '/*'
         count, err = shell.execute_command(
             "zgrep \"{0}\" {1} | wc -l".format(panic_str, log))
         if isinstance(count, list):
             count = int(count[0])
         else:
             count = int(count)
         if count > panic_count:
             self.log.info(
                 "===== PANIC OBSERVED IN THE LOGS ON SERVER {0}=====".
                 format(server.ip))
             panic_trace, _ = shell.execute_command(
                 "zgrep \"{0}\" {1}".format(panic_str, log))
             self.log.info("\n {0}".format(panic_trace))
             panic_count = count
         os_info = shell.extract_remote_info()
         if os_info.type.lower() == "windows":
             # This is a fixed path in all windows systems inside couchbase
             dir_name_crash = 'c://CrashDumps'
         else:
             dir_name_crash = str(dir_name) + '/../crash/'
         core_dump_count, err = shell.execute_command(
             "ls {0}| wc -l".format(dir_name_crash))
         if isinstance(core_dump_count, list):
             core_dump_count = int(core_dump_count[0])
         else:
             core_dump_count = int(core_dump_count)
         if core_dump_count > 0:
             self.log.info(
                 "===== CORE DUMPS SEEN ON SERVER {0} : {1} crashes seen ====="
                 .format(server.ip, core_dump_count))
         shell.disconnect()
Esempio n. 9
0
    def _induce_rebalance_test_condition(self,
                                         test_failure_condition,
                                         bucket_name="default",
                                         vb_num=1,
                                         delay_time=60000):
        if test_failure_condition == "verify_replication":
            set_command = 'testconditions:set(verify_replication, ' \
                          '{fail, "%s"})' % bucket_name
        elif test_failure_condition == "backfill_done":
            set_command = 'testconditions:set(backfill_done, ' \
                          '{for_vb_move, "%s", %s , fail})' \
                          % (bucket_name, vb_num)
        elif test_failure_condition == "delay_rebalance_start":
            set_command = 'testconditions:set(rebalance_start, {delay, %s}).' \
                          % delay_time
        elif test_failure_condition == "delay_verify_replication":
            set_command = 'testconditions:set(verify_replication, ' \
                          '{delay, "%s", %s})' % (bucket_name, delay_time)
        elif test_failure_condition == "delay_backfill_done":
            set_command = 'testconditions:set(backfill_done, ' \
                          '{for_vb_move, "%s", %s, {delay, %s}})' \
                          % (bucket_name, vb_num, delay_time)
        else:
            set_command = "testconditions:set(%s, fail)" \
                          % test_failure_condition
        get_command = "testconditions:get(%s)" % test_failure_condition
        for server in self.servers:
            rest = RestConnection(server)
            shell = RemoteMachineShellConnection(server)
            shell.enable_diag_eval_on_non_local_hosts()
            _, content = rest.diag_eval(set_command)
            self.log.debug("Set Command: %s. Return: %s" %
                           (set_command, content))
            shell.disconnect()

        for server in self.servers:
            rest = RestConnection(server)
            shell = RemoteMachineShellConnection(server)
            shell.enable_diag_eval_on_non_local_hosts()
            _, content = rest.diag_eval(get_command)
            self.log.info("Command: %s, Return: %s" % (get_command, content))
Esempio n. 10
0
 def setUp(self):
     for server in TestInputSingleton.input.servers:
         remote = RemoteMachineShellConnection(server)
         remote.enable_diag_eval_on_non_local_hosts()
         remote.disconnect()
     super(AlternateAddressTests, self).setUp()
     self.remove_all_alternate_address_settings()
     self.cluster_helper = Cluster()
     self.ex_path = self.tmp_path + "export{0}/".format(self.master.ip)
     self.num_items = self.input.param("items", 1000)
     self.client_os = self.input.param("client_os", "linux")
     self.localhost = self.input.param("localhost", False)
     self.json_create_gen = JsonDocGenerator("altaddr",
                                             op_type="create",
                                             encoding="utf-8",
                                             start=0,
                                             end=self.num_items)
     self.json_delete_gen = JsonDocGenerator("imex",
                                             op_type="delete",
                                             encoding="utf-8",
                                             start=0,
                                             end=self.num_items)
Esempio n. 11
0
 def enable_diag_eval_on_non_local_hosts(self, master):
     """
     Enable diag/eval to be run on non-local hosts.
     :param master: Node information of the master node of the cluster
     :return: Nothing
     """
     remote = RemoteMachineShellConnection(master)
     output, error = remote.enable_diag_eval_on_non_local_hosts()
     if "ok" not in output:
         self.log.error("Error in enabling diag/eval on non-local hosts on {}. {}".format(master.ip, output))
         raise Exception("Error in enabling diag/eval on non-local hosts on {}".format(master.ip))
     else:
         self.log.info("Enabled diag/eval for non-local hosts from {}".format(master.ip))
Esempio n. 12
0
 def setUp(self):
     self.input = TestInputSingleton.input
     self.case_number = self.input.param("case_number", 0)
     self.use_master = self.input.param("use_master", False)
     self.skip_services = self.input.param("skip_services", True)
     self.replicas = self.input.param("replicas", 1)
     self.servers = self.input.servers
     self.log = logger.Logger().get_logger()
     self.master = self.servers[0]
     self.rest = RestConnection(self.master)
     self.timeout = 60
     self.loaded_items = dict()
     AutoReprovisionBaseTest.common_setup(self.input, self)
     self._cluster_setup()
     if self.use_master:
         self.server_fail = self.servers[0]
         self.master = self.servers[1]
     else:
         self.server_fail = self.servers[1]
     for server in self.servers:
         remote = RemoteMachineShellConnection(server)
         output, error = remote.enable_diag_eval_on_non_local_hosts()
Esempio n. 13
0
    def setUp(self):
        super(EnterpriseBKRSNewBaseTest, self).setUp()
        self.clusters = self.get_clusters()
        self.master = self.servers[0]
        self.task = self.get_task()
        self.taskmgr = self.get_task_mgr()

        self.backupset = Backupset()
        self.cmd_ext = ""
        self.should_fail = self.input.param("should-fail", False)
        self.restore_should_fail = self.input.param("restore_should_fail",
                                                    False)
        self.merge_should_fail = self.input.param("merge_should_fail", False)
        self.database_path = Linux.COUCHBASE_DATA_PATH

        cmd = 'curl -g {0}:8091/diag/eval -u {1}:{2} '.format(
            self.master.ip, self.master.rest_username,
            self.master.rest_password)
        cmd += '-d "path_config:component_path(bin)."'
        shell = RemoteMachineShellConnection(self.master)
        output, error = shell.enable_diag_eval_on_non_local_hosts()
        shell.disconnect()

        bin_path = subprocess.check_output(cmd, shell=True)
        if not self.skip_init_check_cbserver:
            if "bin" not in bin_path:
                self.fail("Check if cb server install on %s" % self.master.ip)
            else:
                self.cli_command_location = bin_path.replace('"', '') + "/"

        self.debug_logs = self.input.param("debug-logs", False)
        self.backupset.directory = self.input.param("dir", "/tmp/entbackup")
        self.backupset.user_env = self.input.param("user-env", False)
        self.backupset.passwd_env = self.input.param("passwd-env", False)
        self.backupset.log_archive_env = self.input.param(
            "log-archive-env", False)
        self.backupset.log_redaction = self.input.param("log-redaction", False)
        self.backupset.redaction_salt = self.input.param(
            "redaction-salt", None)
        self.backupset.no_log_output_flag = self.input.param(
            "no-log-output-flag", False)
        self.backupset.ex_logs_path = self.input.param("ex-logs-path", None)
        self.backupset.overwrite_user_env = self.input.param(
            "overwrite-user-env", False)
        self.backupset.overwrite_passwd_env = self.input.param(
            "overwrite-passwd-env", False)
        self.backupset.disable_conf_res_restriction = self.input.param(
            "disable-conf-res-restriction", None)
        self.backupset.force_updates = self.input.param("force-updates", False)
        self.backupset.resume = self.input.param("resume", False)
        self.backupset.purge = self.input.param("purge", False)
        self.backupset.start = self.input.param("start", 1)
        self.backupset.end = self.input.param("stop", 1)
        self.backupset.number_of_backups = self.input.param(
            "number_of_backups", 1)
        self.replace_ttl_with = self.input.param("replace-ttl-with", None)
        self.backupset.backup_host = self.servers[-1]
        self.backupset.name = self.input.param("name", "backup")
        self.backupset.filter_keys = self.input.param("filter-keys", "")
        self.backupset.random_keys = self.input.param("random_keys", False)
        self.backupset.filter_values = self.input.param("filter-values", "")
        self.backupset.no_ssl_verify = self.input.param("no-ssl-verify", False)
        self.backupset.secure_conn = self.input.param("secure-conn", False)
        self.backupset.bk_no_cert = self.input.param("bk-no-cert", False)
        self.backupset.rt_no_cert = self.input.param("rt-no-cert", False)
        self.backupset.backup_list_name = self.input.param("list-names", None)
        self.backupset.backup_incr_backup = self.input.param(
            "incr-backup", None)
        self.backupset.bucket_backup = self.input.param("bucket-backup", None)

        shell = RemoteMachineShellConnection(self.servers[0])
        info = shell.extract_remote_info().type.lower()
        self.root_path = Linux.ROOT_PATH
        self.wget = "wget"
        self.os_name = "linux"
        self.tmp_path = "/tmp/"
        self.long_help_flag = "--help"
        self.short_help_flag = "-h"
        self.cygwin_bin_path = ""
        self.enable_firewal = False
        self.rfc3339_date = "date +%s --date='{0} seconds' | ".format(self.replace_ttl_with) + \
                                "xargs -I {} date --date='@{}' --rfc-3339=seconds | "\
                                "sed 's/ /T/'"
        self.seconds_with_ttl = "date +%s --date='{0} seconds'".format(
            self.replace_ttl_with)
        if info == Linux.NAME:
            if self.nonroot:
                base_path = "/home/{0}".format(self.master.ssh_username)
                self.database_path = "{0}{1}".format(base_path,
                                                     Linux.COUCHBASE_DATA_PATH)
                self.root_path = "/home/{0}/".format(self.master.ssh_username)
        elif info == Windows.NAME:
            self.os_name = Windows.NAME
            self.cmd_ext = ".exe"
            self.wget = "/cygdrive/c/automation/wget.exe"
            self.database_path = Windows.COUCHBASE_DATA_PATH_RAW
            self.root_path = Windows.ROOT_PATH
            self.tmp_path = Windows.TMP_PATH
            self.long_help_flag = "help"
            self.short_help_flag = "h"
            self.cygwin_bin_path = Windows.CYGWIN_BIN_PATH
            self.rfc3339_date = "date +%s --date='{0} seconds' | ".format(self.replace_ttl_with) + \
                            "{0}xargs -I {{}} date --date=\"@'{{}}'\" --rfc-3339=seconds | "\
                                                            .format(self.cygwin_bin_path) + \
                                                                               "sed 's/ /T/'"
            win_format = "C:/Program Files"
            cygwin_format = "/cygdrive/c/Program\ Files"
            if win_format in self.cli_command_location:
                self.cli_command_location = self.cli_command_location.replace(
                    win_format, cygwin_format)
            self.backupset.directory = self.input.param(
                "dir", Windows.TMP_PATH_RAW + "entbackup")
        elif info == 'mac':
            self.backupset.directory = self.input.param(
                "dir", "/tmp/entbackup")
        else:
            raise Exception("OS not supported.")

        self.non_master_host = self.input.param("non-master", False)
        self.value_size = self.input.param("value_size", 512)
        self.no_progress_bar = self.input.param("no-progress-bar", True)
        self.multi_threads = self.input.param("multi_threads", False)
        self.threads_count = self.input.param("threads_count", 1)
        self.bucket_delete = self.input.param("bucket_delete", False)
        self.bucket_flush = self.input.param("bucket_flush", False)
        self.commit = self.input.param("commit", True)
        self.ops_type = self.input.param("ops_type", "create")
        self.num_threads = self.input.param("num_threads", 5)
        self.bk_with_ttl = self.input.param("bk-with-ttl", None)
        self.create_fts_index = self.input.param("create-fts-index", False)
        self.reset_restore_cluster = self.input.param("reset-restore-cluster",
                                                      False)
        self.backupset.user_env_with_prompt = \
                        self.input.param("user-env-with-prompt", False)
        self.backupset.passwd_env_with_prompt = \
                        self.input.param("passwd-env-with-prompt", False)
        self.restore_compression_mode = self.input.param(
            "restore-compression-mode", None)
        self.force_version_upgrade = self.input.param("force-version-upgrade",
                                                      None)
        self.skip_buckets = self.input.param("skip_buckets", False)
        self.num_replicas = self.input.param("replicas", 2)
        self.restore_only = self.input.param("restore-only", False)

        if self.non_master_host:
            self.backupset.cluster_host = self.servers[1]
            self.backupset.cluster_host_username = self.servers[
                1].rest_username
            self.backupset.cluster_host_password = self.servers[
                1].rest_password
        else:
            self.backupset.cluster_host = self.servers[0]
            self.backupset.cluster_host_username = self.servers[
                0].rest_username
            self.backupset.cluster_host_password = self.servers[
                0].rest_password

        self.same_cluster = self.input.param("same-cluster", False)
        if self.same_cluster:
            self.backupset.restore_cluster_host = self.input.clusters[0][0]
            self.backupset.restore_cluster_host_username = self.input.clusters[
                0][0].rest_username
            self.backupset.restore_cluster_host_password = self.input.clusters[
                0][0].rest_password
        else:
            self.backupset.restore_cluster_host = self.input.clusters[1][0]
            self.backupset.restore_cluster_host_username = self.input.clusters[
                1][0].rest_username
            self.backupset.restore_cluster_host_password = self.input.clusters[
                1][0].rest_password
        """ new user to test RBAC """
        self.cluster_new_user = self.input.param("new_user", None)
        if self.cluster_new_user:
            self.backupset.cluster_host_username = self.cluster_new_user
            self.backupset.restore_cluster_host_username = self.cluster_new_user
        self.backups = []
        self.number_of_backups_taken = 0
        for cluster in self.clusters:
            self.cluster_util.add_all_nodes_then_rebalance(
                cluster, cluster.servers[1:])
Esempio n. 14
0
    def test_backup_restore_collection_sanity(self):
        """
        1. Create default bucket on the cluster and loads it with given number of items
        2. Perform updates and create backups for specified number of times (test param number_of_backups)
        3. Perform restores for the same number of times with random start and end values
        """
        self.log.info("*** create collection in all buckets")
        self.log.info("*** start to load items to all buckets")
        self.active_resident_threshold = 100
        self.load_all_buckets(self.backupset.cluster_host)
        self.log.info("*** done to load items to all buckets")
        self.ops_type = self.input.param("ops-type", "update")
        self.expected_error = self.input.param("expected_error", None)
        self.create_scope_cluster_host()
        self.create_collection_cluster_host(self.backupset.col_per_scope)
        backup_scopes = self.get_bucket_scope_cluster_host()
        backup_collections = self.get_bucket_collection_cluster_host()
        col_stats = self.get_collection_stats_cluster_host()
        for backup_scope in backup_scopes:
            bk_scope_id = self.get_scopes_id_cluster_host(backup_scope)
        if self.auto_failover:
            self.log.info("Enabling auto failover on " +
                          str(self.backupset.cluster_host))
            rest_conn = RestConnection(self.backupset.cluster_host)
            rest_conn.update_autofailover_settings(self.auto_failover,
                                                   self.auto_failover_timeout)
        self.backup_create_validate()
        for i in range(1, self.backupset.number_of_backups + 1):
            if self.ops_type == "update":
                self.log.info("*** start to update items in all buckets")
                self.load_all_buckets(self.backupset.cluster_host, ratio=0.1)
                self.log.info("*** done update items in all buckets")
            self.sleep(10)
            self.log.info("*** start to validate backup cluster")
            self.backup_cluster_validate()
        self.targetMaster = True
        start = randrange(1, self.backupset.number_of_backups + 1)
        if start == self.backupset.number_of_backups:
            end = start
        else:
            end = randrange(start, self.backupset.number_of_backups + 1)
        self.log.info("*** start to restore cluster")
        restored = {"{0}/{1}".format(start, end): ""}
        for i in range(1, self.backupset.number_of_backups + 1):
            if self.reset_restore_cluster:
                self.log.info("*** start to reset cluster")
                self.backup_reset_clusters(self.cluster_to_restore)
                if self.same_cluster:
                    self._initialize_nodes(Cluster(),
                                           self.servers[:self.nodes_init])
                else:
                    shell = RemoteMachineShellConnection(
                        self.backupset.restore_cluster_host)
                    shell.enable_diag_eval_on_non_local_hosts()
                    shell.disconnect()
                    rest = RestConnection(self.backupset.restore_cluster_host)
                    rest.force_eject_node()
                    rest.init_node()
                self.log.info("Done reset cluster")
            self.sleep(10)
            """ Add built-in user cbadminbucket to second cluster """
            self.add_built_in_server_user(
                node=self.input.clusters[0][:self.nodes_init][0])

            self.backupset.start = start
            self.backupset.end = end
            self.log.info("*** start restore validation")
            data_map_collection = []
            for scope in backup_scopes:
                if "default" in scope:
                    continue
                data_map_collection.append(self.buckets[0].name + "." + scope + "=" + \
                                           self.buckets[0].name + "." + scope)
            self.bucket_map_collection = ",".join(data_map_collection)
            self.backup_restore_validate(compare_uuid=False,
                                         seqno_compare_function=">=",
                                         expected_error=self.expected_error)
            if self.backupset.number_of_backups == 1:
                continue
            while "{0}/{1}".format(start, end) in restored:
                start = randrange(1, self.backupset.number_of_backups + 1)
                if start == self.backupset.number_of_backups:
                    end = start
                else:
                    end = randrange(start,
                                    self.backupset.number_of_backups + 1)
            restored["{0}/{1}".format(start, end)] = ""
        restore_scopes = self.get_bucket_scope_restore_cluster_host()
        restore_collections = self.get_bucket_collection_restore_cluster_host()
        self.verify_collections_in_restore_cluster_host()
    def test_restore_with_auto_create_buckets(self):
        """
           Restore cluster with --auto-create-buckets option
        """
        self.active_resident_threshold = 100
        self.load_all_buckets(self.backupset.cluster_host)
        self.log.info("*** done to load items to all buckets")
        self.ops_type = self.input.param("ops-type", "update")
        self.expected_error = self.input.param("expected_error", None)
        if self.create_scopes and not self.buckets_only:
                self.create_scope_cluster_host()
        if self.create_collections and not self.buckets_only and not self.scopes_only:
            self.create_collection_cluster_host(self.backupset.col_per_scope)
        backup_scopes = self.get_bucket_scope_cluster_host()
        scopes_id = []
        for scope in backup_scopes:
            if scope == "_default":
                continue
            scopes_id.append(self.get_scopes_id_cluster_host(scope))
        """ remove null and empty element """
        scopes_id = [i for i in scopes_id if i]
        if isinstance(backup_scopes, tuple):
            backup_scopes = backup_scopes[0]
        for backup_scope in backup_scopes:
            backup_collections = self.get_bucket_collection_cluster_host(scope=backup_scope)
            if isinstance(backup_collections, tuple):
                backup_collections = backup_collections[0]
        col_stats = self.get_collection_stats_cluster_host()
        for backup_scope in backup_scopes:
            bk_scope_id = self.get_scopes_id_cluster_host(backup_scope)

        self.backup_create_validate()
        for i in range(1, self.backupset.number_of_backups + 1):
            if self.ops_type == "update":
                self.log.info("*** start to update items in all buckets")
                col_cmd = ""
                if self.backupset.load_to_collection:
                    self.backupset.load_scope_id = choice(scopes_id)
                    col_cmd = " -c {0} ".format(self.backupset.load_scope_id)
                self.load_all_buckets(self.backupset.cluster_host, ratio=0.1,
                                                     command_options=col_cmd)
                self.log.info("*** done update items in all buckets")
            self.sleep(10)
            self.log.info("*** start to validate backup cluster")
            self.backup_cluster_validate()
        self.targetMaster = True
        start = randrange(1, self.backupset.number_of_backups + 1)
        if start == self.backupset.number_of_backups:
            end = start
        else:
            end = randrange(start, self.backupset.number_of_backups + 1)
        self.log.info("*** start to restore cluster")
        restored = {"{0}/{1}".format(start, end): ""}
        for i in range(1, self.backupset.number_of_backups + 1):
            if self.reset_restore_cluster:
                self.log.info("*** start to reset cluster")
                self.backup_reset_clusters(self.cluster_to_restore)
                if self.same_cluster:
                    self._initialize_nodes(Cluster(), self.servers[:self.nodes_init])
                else:
                    shell = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
                    shell.enable_diag_eval_on_non_local_hosts()
                    shell.disconnect()
                    rest = RestConnection(self.backupset.restore_cluster_host)
                    rest.force_eject_node()
                    rest.init_node()
                self.log.info("Done reset cluster")
            self.sleep(10)

            """ Add built-in user cbadminbucket to second cluster """
            self.add_built_in_server_user(node=self.input.clusters[0][:self.nodes_init][0])

            rest = RestConnection(self.backupset.restore_cluster_host)
            rest.set_indexer_storage_mode(username='******',
                                      password='******',
                                      storageMode="memory_optimized")
            self.backupset.start = start
            self.backupset.end = end
            self.log.info("*** start restore validation")
            data_map_collection = []
            for scope in backup_scopes:
                if "default" in scope or scope == '':
                    continue
                data_map_collection.append(self.buckets[0].name + "." + scope + "=" + \
                                           self.buckets[0].name + "." + scope)
            self.bucket_map_collection = ",".join(data_map_collection)
            self.backup_restore_validate(compare_uuid=False,
                                         seqno_compare_function=">=",
                                         expected_error=self.expected_error)
    def test_backup_restore_collection_sanity(self):
        """
        1. Create default bucket on the cluster and loads it with given number of items
        2. Perform updates and create backups for specified number of times (test param number_of_backups)
        3. Perform restores for the same number of times with random start and end values
        """
        if self.reset_backup_cluster:
            self.log.info("*** start to reset backup cluster")
            self._create_backup_cluster(self.backupset.backup_services_init)
            self.sleep(10)
            rest_conn = RestConnection(self.backupset.cluster_host)
            rest_conn.create_bucket(bucket="default", ramQuotaMB="256")
            self.buckets = RestConnection(self.master).get_buckets()
        self.log.info("*** create collection in all buckets")
        self.log.info("*** start to load items to all buckets")
        self.active_resident_threshold = 100
        self.load_all_buckets(self.backupset.cluster_host)
        self.log.info("*** done to load items to all buckets")
        self.ops_type = self.input.param("ops-type", "update")
        self.expected_error = self.input.param("expected_error", None)
        if self.create_gsi:
            self.create_indexes()
        if self.create_scopes and not self.buckets_only:
                self.create_scope_cluster_host()
        if self.create_collections and not self.buckets_only and not self.scopes_only:
            self.create_collection_cluster_host(self.backupset.col_per_scope)
        backup_scopes = self.get_bucket_scope_cluster_host()
        if len(backup_scopes) < 2:
            self.sleep(4)
            backup_scopes = self.get_bucket_scope_cluster_host()
        if backup_scopes[0][:4] == "\x1b[6n":
            backup_scopes[0] = backup_scopes[0][4:]
        self.log.info("scopes in backup cluster: {0}".format(backup_scopes))
        scopes_id = []
        for scope in backup_scopes:
            if scope == "_default":
                continue
            self.log.info("get scope id of scope: {0}".format(scope))
            scopes_id.append(self.get_scopes_id_cluster_host(scope))
        self.log.info("scope id in backup cluster: {0}".format(scopes_id))
        """ remove null and empty element """
        scopes_id = [i for i in scopes_id if i]
        col_stats = self.get_collection_stats_cluster_host()
        for backup_scope in backup_scopes:
            bk_scope_id = self.get_scopes_id_cluster_host(backup_scope)
        if self.auto_failover:
            self.log.info("Enabling auto failover on " + str(self.backupset.cluster_host))
            rest_conn = RestConnection(self.backupset.cluster_host)
            rest_conn.update_autofailover_settings(self.auto_failover, self.auto_failover_timeout)
        if self.drop_scopes:
            self.delete_scope_cluster_host()
        else:
            self.delete_collection_cluster_host()
        rest_bk = RestConnection(self.backupset.cluster_host)
        cluster_srv = list(rest_bk.get_nodes_services().values())
        fts_srv = False
        index_srv = False
        for node_srv in cluster_srv:
            if "fts" in node_srv:
                fts_srv = True
            if "index" in node_srv:
                index_srv = True
        if index_srv:
            bk_storage_mode = rest_bk.get_index_settings()["indexer.settings.storage_mode"]
        else:
            bk_storage_mode = "plasma"

        self.backup_create_validate()
        for i in range(1, self.backupset.number_of_backups + 1):
            if self.ops_type == "update":
                self.log.info("*** start to update items in all buckets")
                col_cmd = ""
                if self.backupset.load_to_collection:
                    if len(scopes_id) > 1:
                        self.backupset.load_scope_id = choice(scopes_id)
                    else:
                        if scopes_id:
                            self.backupset.load_scope_id = scopes_id[0]
                        else:
                            self.log.info("scopes Id: {0}.  Let get scopes again.".format(scopes_id))
                            bk_scopes = self.get_bucket_scope_cluster_host()
                            for scope in bk_scopes:
                                if scope == "_default" or not scope:
                                    continue
                                self.log.info("get scope id of scope: {0}".format(scope))
                                scopes_id.append(self.get_scopes_id_cluster_host(scope))
                            self.backupset.load_scope_id = scopes_id[0]
                    col_cmd = " -c {0} ".format(self.backupset.load_scope_id)
                self.sleep(10, "wait for scopes and collections created")
                self.load_all_buckets(self.backupset.cluster_host, ratio=0.1,
                                                     command_options=col_cmd)
                self.log.info("*** done update items in all buckets")
            self.sleep(10)
            self.log.info("*** start to validate backup cluster")
            self.backup_cluster_validate()
        self.targetMaster = True
        start = randrange(1, self.backupset.number_of_backups + 1)
        if start == self.backupset.number_of_backups:
            end = start
        else:
            end = randrange(start, self.backupset.number_of_backups + 1)
        self.log.info("*** start to restore cluster")
        restored = {"{0}/{1}".format(start, end): ""}
        for i in range(1, self.backupset.number_of_backups + 1):
            if self.reset_restore_cluster:
                self.log.info("*** start to reset cluster")
                self.backup_reset_clusters(self.cluster_to_restore)
                if self.same_cluster:
                    self._initialize_nodes(Cluster(), self.servers[:self.nodes_init])
                else:
                    shell = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
                    shell.enable_diag_eval_on_non_local_hosts()
                    shell.disconnect()
                    rest = RestConnection(self.backupset.restore_cluster_host)
                    rest.force_eject_node()
                    rest.init_node()
                    rest.set_indexer_storage_mode(username='******',
                                      password='******',
                                      storageMode=bk_storage_mode)
                self.log.info("Done reset cluster")
            self.sleep(10)

            """ Add built-in user cbadminbucket to second cluster """
            self.add_built_in_server_user(node=self.input.clusters[0][:self.nodes_init][0])

            self.backupset.start = start
            self.backupset.end = end
            self.log.info("*** start restore validation")
            data_map_collection = []
            for scope in backup_scopes:
                if "default" in scope or scope == '':
                    continue
                data_map_collection.append(self.buckets[0].name + "." + scope + "=" + \
                                           self.buckets[0].name + "." + scope)
            self.bucket_map_collection = ",".join(data_map_collection)
            self.backup_restore_validate(compare_uuid=False,
                                         seqno_compare_function=">=",
                                         expected_error=self.expected_error)
            if self.backupset.number_of_backups == 1:
                continue
            while "{0}/{1}".format(start, end) in restored:
                start = randrange(1, self.backupset.number_of_backups + 1)
                if start == self.backupset.number_of_backups:
                    end = start
                else:
                    end = randrange(start, self.backupset.number_of_backups + 1)
            restored["{0}/{1}".format(start, end)] = ""
        if not self.drop_scopes:
            restore_scopes = self.get_bucket_scope_restore_cluster_host()
            if not self.drop_collections:
                self.verify_collections_in_restore_cluster_host()
            else:
                try:
                    for scope in restore_scopes:
                        restore_collections = self.get_bucket_collection_restore_cluster_host(scope=scope)
                        if restore_collections:
                            self.fail("Restore should not restore delete collection")
                except Exception as e:
                    if e:
                        print("Exception: ", str(e))
        else:
            try:
                for scope in backup_scopes:
                    restore_scopes = self.get_bucket_scope_restore_cluster_host(scope=scope)
                    if restore_scopes:
                        self.fail("Restore should not restore delete scopes")
            except Exception as e:
                if e:
                    print("Exception: ", str(e))
Esempio n. 17
0
    def setUp(self):
        super(ConfigPurging, self).setUp()
        is_windows = False

        for node in self.cluster.servers:
            shell = RemoteMachineShellConnection(node)
            if shell.info.type.lower() == OS.WINDOWS:
                is_windows = True
            shell.enable_diag_eval_on_non_local_hosts()
            shell.disconnect()

        self.cluster_util.update_cluster_nodes_service_list(self.cluster)

        # Default purger values
        self.default_run_interval = 60
        self.default_purge_age = 300

        self.time_stamp = time()
        self.num_index = self.input.param("num_index", 0)
        self.index_type = self.input.param("index_type", CbServer.Services.FTS)
        self.index_name_len = self.input.param("index_name_len", 10)
        self.fts_index_partition = self.input.param("fts_index_partition", 1)
        self.index_replicas = self.input.param("gsi_index_replicas", 1)
        self.fts_helper = FtsHelper(self.cluster.fts_nodes[0]) \
            if self.cluster.fts_nodes else None
        self.n1ql_helper = N1QLHelper(server=self.cluster.query_nodes[0],
                                      use_rest=True, log=self.log) \
            if self.cluster.query_nodes else None
        self.spare_node = self.servers[-1]
        self.couchbase_base_dir = "/opt/couchbase"
        if is_windows:
            self.couchbase_base_dir = \
                "/cygdrive/c/Program\\ Files/Couchbase/Server"

        # Param order:
        # fts_name, bucket_name, index_partitions, scope_name, collection_name
        self.fts_param_template = '{ \
          "type": "fulltext-index", \
          "name": "%s", \
          "sourceType": "gocbcore", \
          "sourceName": "%s", \
          "sourceUUID": "%s", \
          "planParams": { \
            "maxPartitionsPerPIndex": 1024, \
            "indexPartitions": %d \
          }, \
          "params": { \
            "doc_config": { \
              "docid_prefix_delim": "", \
              "docid_regexp": "", \
              "mode": "scope.collection.type_field", \
              "type_field": "type" \
            }, \
            "mapping": { \
              "analysis": {}, \
              "default_analyzer": "standard", \
              "default_datetime_parser": "dateTimeOptional", \
              "default_field": "_all", \
              "default_mapping": { \
                "dynamic": true, \
                "enabled": false \
              }, \
              "default_type": "_default", \
              "docvalues_dynamic": false, \
              "index_dynamic": true, \
              "store_dynamic": false, \
              "type_field": "_type", \
              "types": { \
                "%s.%s": { \
                  "dynamic": true, \
                  "enabled": true \
                } \
              } \
            }, \
            "store": { \
              "indexType": "scorch", \
              "segmentVersion": 15 \
            } \
          }, \
          "sourceParams": {} \
        }'

        self.gsi_index_name_template = "%s_%s_%s_%d"
        self.gsi_create_template = "CREATE PRIMARY INDEX `%s` " \
                                   "ON `%s`.`%s`.`%s` USING GSI " \
                                   "WITH {\"num_replica\": %d}"
        self.gsi_drop_template = "DROP INDEX `%s`.`%s` USING GSI"

        self.op_create = "key_create"
        self.op_remove = "key_delete"

        self.ts_during_start = self.__get_current_timestamps_from_debug_log()
        self.initial_tombstones = \
            self.cluster_util.get_metakv_dicts(self.cluster.master)
        self.log.info(self.ts_during_start)