示例#1
0
 def test_restart_node_with_encrypted_pkeys(self):
     """
     1. Init node cluster, with encrypted node pkeys
     2. Restart a node
     3. Failover and delta recover that node
     4. Restart the node again and rebalance-out this time
     5. Repeat steps 2 to 5 until you are left with master node
     """
     self.x509.generate_multiple_x509_certs(
         servers=self.servers[:self.nodes_init])
     self.x509.upload_root_certs(self.master)
     self.x509.upload_node_certs(servers=self.servers[:self.nodes_init])
     rest = RestConnection(self.master)
     nodes_in_cluster = [node for node in self.servers[:self.nodes_init]]
     for node in self.servers[1:self.nodes_init]:
         shell = RemoteMachineShellConnection(node)
         shell.restart_couchbase()
         shell.disconnect()
         self.sleep(10, "Wait after restart")
         self.cluster.async_failover(nodes_in_cluster, [node],
                                     graceful=False)
         self.wait_for_failover_or_assert(1)
         rest.set_recovery_type("ns_1@" + node.ip, recoveryType="delta")
         https_val = CbServer.use_https  # so that add_node uses https
         CbServer.use_https = True
         task = self.cluster.async_rebalance(nodes_in_cluster, [], [])
         CbServer.use_https = https_val
         self.wait_for_rebalance_to_complete(task)
         shell = RemoteMachineShellConnection(node)
         shell.restart_couchbase()
         shell.disconnect()
         https_val = CbServer.use_https  # so that add_node uses https
         CbServer.use_https = True
         task = self.cluster.async_rebalance(nodes_in_cluster, [], [node])
         self.wait_for_rebalance_to_complete(task)
         CbServer.use_https = https_val
         nodes_in_cluster.remove(node)
示例#2
0
    def test_xdcr_with_security(self):
        #Settings
        self.settings_values_map = {
            "autofailover": ["enable", None],
            "n2n": ["enable", "disable"],
            "tls": ["all", "control", "strict"]
        }

        self.apply_settings_before_setup = self._input.param(
            "apply_settings_before_setup", False)
        self.disable_autofailover = self._input.param("disable_autofailover",
                                                      False)
        self.enable_n2n = self._input.param("enable_n2n", False)
        self.enforce_tls = self._input.param("enforce_tls", None)
        self.tls_level = self._input.param("tls_level", "control")
        self.enable_autofailover = self._input.param("enable_autofailover",
                                                     False)
        self.disable_n2n = self._input.param("disable_n2n", None)
        self.disable_tls = self._input.param("disable_tls", None)

        rebalance_in = self._input.param("rebalance_in", None)
        rebalance_out = self._input.param("rebalance_out", None)
        swap_rebalance = self._input.param("swap_rebalance", None)
        failover = self._input.param("failover", None)
        graceful = self._input.param("graceful", None)
        pause = self._input.param("pause", None)
        reboot = self._input.param("reboot", None)
        initial_xdcr = self._input.param("initial_xdcr",
                                         random.choice([True, False]))
        random_setting = self._input.param("random_setting", False)
        multiple_ca = self._input.param("multiple_ca", None)
        use_client_certs = self._input.param("use_client_certs", None)
        int_ca_name = self._input.param("int_ca_name", "iclient1_clientroot")
        all_node_upload = self._input.param("all_node_upload", False)
        rotate_certs = self._input.param("rotate_certs", None)
        delete_certs = self._input.param("delete_certs", None)
        restart_pkey_nodes = self._input.param("restart_pkey_nodes", None)

        if not self.apply_settings_before_setup:
            if initial_xdcr:
                self.load_and_setup_xdcr()
            else:
                self.setup_xdcr_and_load()

        if self.enforce_tls:
            for cluster in self.get_cluster_objects_for_input(
                    self.enforce_tls):
                if self.tls_level == "rotate":
                    for level in self.settings_values_map["tls"]:
                        cluster.toggle_security_setting(
                            [cluster.get_master_node()], "tls", level)
                        time.sleep(5)
                else:
                    cluster.toggle_security_setting(
                        [cluster.get_master_node()], "tls", self.tls_level)

        #Revert to default (control) tls level
        if self.disable_tls:
            for cluster in self.get_cluster_objects_for_input(
                    self.disable_tls):
                cluster.toggle_security_setting([cluster.get_master_node()],
                                                "tls")

        if self.enable_n2n:
            for cluster in self.get_cluster_objects_for_input(self.enable_n2n):
                cluster.toggle_security_setting([cluster.get_master_node()],
                                                "n2n", "enable")

        if self.disable_n2n:
            for cluster in self.get_cluster_objects_for_input(
                    self.disable_n2n):
                cluster.toggle_security_setting([cluster.get_master_node()],
                                                "n2n")

        if self.enable_autofailover:
            for cluster in self.get_cluster_objects_for_input(
                    self.enable_autofailover):
                cluster.toggle_security_setting([cluster.get_master_node()],
                                                "autofailover", "enable")

        if self.disable_autofailover:
            for cluster in self.get_cluster_objects_for_input(
                    self.disable_autofailover):
                cluster.toggle_security_setting([cluster.get_master_node()],
                                                "autofailover")

        if random_setting:
            for cluster in self.get_cluster_objects_for_input(random_setting):
                setting = random.choice(list(self.settings_values_map.keys()))
                value = random.choice(self.settings_values_map.get(setting))
                cluster.toggle_security_setting([cluster.get_master_node()],
                                                setting, value)

        if multiple_ca:
            for cluster in self.get_cluster_objects_for_input(multiple_ca):
                master = cluster.get_master_node()
                ntonencryptionBase().disable_nton_cluster([master])
                CbServer.x509 = x509main(host=master)
                for server in cluster.get_nodes():
                    CbServer.x509.delete_inbox_folder_on_server(server=server)
                CbServer.x509.generate_multiple_x509_certs(
                    servers=cluster.get_nodes())
                if all_node_upload:
                    for node_num in range(len(cluster.get_nodes())):
                        CbServer.x509.upload_root_certs(
                            server=cluster.get_nodes()[node_num],
                            root_ca_names=[
                                CbServer.x509.root_ca_names[node_num]
                            ])
                else:
                    for server in cluster.get_nodes():
                        CbServer.x509.upload_root_certs(server)
                CbServer.x509.upload_node_certs(servers=cluster.get_nodes())
                if use_client_certs:
                    CbServer.x509.upload_client_cert_settings(server=master)
                    client_cert_path, client_key_path = CbServer.x509.get_client_cert(
                        int_ca_name=int_ca_name)
                    # Copy the certs onto the test machines
                    for server in cluster.get_nodes():
                        shell = RemoteMachineShellConnection(server)
                        shell.execute_command(
                            f"mkdir -p {os.path.dirname(client_cert_path)}")
                        shell.copy_file_local_to_remote(
                            client_cert_path, client_cert_path)
                        shell.execute_command(
                            f"mkdir -p {CbServer.x509.CACERTFILEPATH}all")
                        shell.copy_file_local_to_remote(
                            f"{CbServer.x509.CACERTFILEPATH}all/all_ca.pem",
                            f"{CbServer.x509.CACERTFILEPATH}all/all_ca.pem")
                        shell.disconnect()
                    self._client_cert = self._read_from_file(client_cert_path)
                    self._client_key = self._read_from_file(client_key_path)
                    self.add_built_in_server_user(node=master)
                ntonencryptionBase().setup_nton_cluster(
                    [master], clusterEncryptionLevel="strict")
            if rotate_certs:
                for cluster in self.get_cluster_objects_for_input(
                        rotate_certs):
                    CbServer.x509.rotate_certs(cluster.get_nodes())
            if delete_certs:
                for cluster in self.get_cluster_objects_for_input(
                        delete_certs):
                    for node in cluster.get_nodes():
                        CbServer.x509.delete_trusted_CAs(node)
            if restart_pkey_nodes:
                for cluster in self.get_cluster_objects_for_input(
                        restart_pkey_nodes):
                    for node in cluster.get_nodes():
                        shell = RemoteMachineShellConnection(node)
                        shell.restart_couchbase()
                        shell.disconnect()
                        time.sleep(10)
                        cluster.failover_and_rebalance_nodes()
                        cluster.add_back_node("delta")

        if self.apply_settings_before_setup:
            if initial_xdcr:
                self.load_and_setup_xdcr()
            else:
                self.setup_xdcr_and_load()

        if pause:
            for cluster in self.get_cluster_objects_for_input(pause):
                for remote_cluster_refs in cluster.get_remote_clusters():
                    remote_cluster_refs.pause_all_replications()
                    time.sleep(60)

        if rebalance_in:
            for cluster in self.get_cluster_objects_for_input(rebalance_in):
                cluster.rebalance_in()

        if failover:
            for cluster in self.get_cluster_objects_for_input(failover):
                cluster.failover_and_rebalance_nodes(graceful=graceful,
                                                     rebalance=True)

        if rebalance_out:
            for cluster in self.get_cluster_objects_for_input(rebalance_out):
                cluster.rebalance_out()

        if swap_rebalance:
            for cluster in self.get_cluster_objects_for_input(swap_rebalance):
                cluster.swap_rebalance()

        if pause:
            for cluster in self.get_cluster_objects_for_input(pause):
                for remote_cluster_refs in cluster.get_remote_clusters():
                    remote_cluster_refs.resume_all_replications()

        if reboot:
            for cluster in self.get_cluster_objects_for_input(reboot):
                cluster.warmup_node()
            time.sleep(60)

        self.perform_update_delete()
        self.verify_results()
示例#3
0
    def test_auto_retry_failed_rebalance(self):

        # Auto-retry rebalance settings
        body = {"enabled": "true", "afterTimePeriod": self.retry_time, "maxAttempts": self.num_retries}
        rest = RestConnection(self.master)
        rest.set_retry_rebalance_settings(body)
        result = rest.get_retry_rebalance_settings()

        self.log.info("Pick the incoming and outgoing nodes during rebalance")
        self.rebalance_type = self.input.param("rebalance_type", "in")
        nodes_to_add = [self.rebalanceServers[1]]
        nodes_to_remove = []
        reinitialize_cbas_util = False
        if self.rebalance_type == 'out':
            nodes_to_remove.append(self.rebalanceServers[1])
            self.add_node(self.rebalanceServers[1])
            nodes_to_add = []
        elif self.rebalance_type == 'swap':
            self.add_node(nodes_to_add[0], rebalance=False)
            nodes_to_remove.append(self.cbas_node)
            reinitialize_cbas_util = True
        self.log.info("Incoming nodes - %s, outgoing nodes - %s. For rebalance type %s " % (
        nodes_to_add, nodes_to_remove, self.rebalance_type))

        self.log.info("Creates cbas buckets and dataset")
        dataset_count_query = "select count(*) from {0};".format(self.cbas_dataset_name)
        self.setup_for_test()

        self.log.info("Perform async doc operations on KV")
        json_generator = JsonGenerator()
        generators = json_generator.generate_docs_simple(docs_per_day=self.num_items * 3 / 2, start=self.num_items)
        kv_task = self._async_load_all_buckets(self.master, generators, "create", 0, batch_size=5000)

        self.log.info("Run concurrent queries on CBAS")
        handles = self.cbas_util._run_concurrent_queries(dataset_count_query, "async", self.num_concurrent_queries)

        self.log.info("Fetch the server to restart couchbase on")
        restart_couchbase_on_incoming_or_outgoing_node = self.input.param(
            "restart_couchbase_on_incoming_or_outgoing_node", True)
        if not restart_couchbase_on_incoming_or_outgoing_node:
            node = self.cbas_node
        else:
            node = self.rebalanceServers[1]
        shell = RemoteMachineShellConnection(node)

        try:
            self.log.info("Rebalance nodes")
            self.cluster.async_rebalance(self.servers, nodes_to_add, nodes_to_remove)

            self.sleep(10, message="Restarting couchbase after 10s on node %s" % node.ip)

            shell.restart_couchbase()
            self.sleep(30, message="Waiting for service to be back again...")

            self.sleep(self.retry_time, "Wait for retry time to complete and then check the rebalance results")

            reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
            self.log.info("Rebalance status : {0}".format(reached))
            self.sleep(20)

            self._check_retry_rebalance_succeeded()

            if reinitialize_cbas_util is True:
                self.cbas_util = cbas_utils(self.master, self.rebalanceServers[1])
                self.cbas_util.createConn("default")
                self.cbas_util.wait_for_cbas_to_recover()

            self.log.info("Get KV ops result")
            for task in kv_task:
                task.get_result()

            self.log.info("Log concurrent query status")
            self.cbas_util.log_concurrent_query_outcome(self.master, handles)

            self.log.info("Validate dataset count on CBAS")
            if not self.cbas_util.validate_cbas_dataset_items_count(self.cbas_dataset_name, self.num_items * 3 / 2, 0):
                self.fail("No. of items in CBAS dataset do not match that in the CB bucket")
        except Exception as e:
            self.fail("Some exception occurred : {0}".format(e.message))


        finally:
            body = {"enabled": "false"}
            rest.set_retry_rebalance_settings(body)
示例#4
0
    def test_to_fail_initial_rebalance_and_verify_subsequent_rebalance_succeeds(self):

        self.log.info("Pick the incoming and outgoing nodes during rebalance")
        self.rebalance_type = self.input.param("rebalance_type", "in")
        nodes_to_add = [self.rebalanceServers[1]]
        nodes_to_remove = []
        reinitialize_cbas_util = False
        if self.rebalance_type == 'out':
            nodes_to_remove.append(self.rebalanceServers[1])
            self.add_node(self.rebalanceServers[1])
            nodes_to_add = []
        elif self.rebalance_type == 'swap':
            self.add_node(nodes_to_add[0], rebalance=False)
            nodes_to_remove.append(self.cbas_node)
            reinitialize_cbas_util = True
        self.log.info("Incoming nodes - %s, outgoing nodes - %s. For rebalance type %s " %(nodes_to_add, nodes_to_remove, self.rebalance_type))    

        self.log.info("Creates cbas buckets and dataset")
        dataset_count_query = "select count(*) from {0};".format(self.cbas_dataset_name)
        self.setup_for_test()

        self.log.info("Perform async doc operations on KV")
        json_generator = JsonGenerator()
        generators = json_generator.generate_docs_simple(docs_per_day=self.num_items * 3 / 2, start=self.num_items)
        kv_task = self._async_load_all_buckets(self.master, generators, "create", 0, batch_size=5000)

        self.log.info("Run concurrent queries on CBAS")
        handles = self.cbas_util._run_concurrent_queries(dataset_count_query, "async", self.num_concurrent_queries)
        
        self.log.info("Fetch the server to restart couchbase on")
        restart_couchbase_on_incoming_or_outgoing_node = self.input.param("restart_couchbase_on_incoming_or_outgoing_node", True)
        if not restart_couchbase_on_incoming_or_outgoing_node:
            node = self.cbas_node
        else:
            node = self.rebalanceServers[1]
        shell = RemoteMachineShellConnection(node)
        
        self.log.info("Rebalance nodes")
        self.cluster.async_rebalance(self.servers, nodes_to_add, nodes_to_remove)

        self.log.info("Restart Couchbase on node %s" % node.ip)
        shell.restart_couchbase()
        self.sleep(30, message="Waiting for service to be back again...")
        
        self.log.info("Verify subsequent rebalance is successful")
        nodes_to_add = [] # Node is already added to cluster in previous rebalance, adding it again will throw exception
        self.assertTrue(self.cluster.rebalance(self.servers, nodes_to_add, nodes_to_remove))
        
        if reinitialize_cbas_util is True:
            self.cbas_util = cbas_utils(self.master, self.rebalanceServers[1])
            self.cbas_util.createConn("default")
            self.cbas_util.wait_for_cbas_to_recover()

        self.log.info("Get KV ops result")
        for task in kv_task:
            task.get_result()
        
        self.log.info("Log concurrent query status")
        self.cbas_util.log_concurrent_query_outcome(self.master, handles)
        
        self.log.info("Validate dataset count on CBAS")
        if not self.cbas_util.validate_cbas_dataset_items_count(self.cbas_dataset_name, self.num_items * 3 / 2, 0):
            self.fail("No. of items in CBAS dataset do not match that in the CB bucket")