Ejemplo n.º 1
0
 def tearDown(self):
     CbServer.use_https = False
     self.x509 = x509main(host=self.cluster.master)
     rest = RestConnection(self.cluster.master)
     rest.delete_builtin_user("cbadminbucket")
     self.x509.teardown_certs(servers=self.cluster.servers)
     super(MultipleCA, self).tearDown()
Ejemplo n.º 2
0
    def test_tls_min_version(self):
        """
        1. Create multiple x509 certs
        2. Enforce TLS
        3. Set TLS min version to 1.3
        4. Validate tls 1.2 requests fail and validate tls 1.3 requests pass using curl
        5. Switch back tls min verison to 1.2
        6. Validate tls 1.2 requests pass
        """
        self.x509 = x509main(host=self.cluster.master)
        self.x509.generate_multiple_x509_certs(servers=self.cluster.servers)
        for server in self.cluster.servers:
            _ = self.x509.upload_root_certs(server)
        self.x509.upload_node_certs(servers=self.cluster.servers)
        self.x509.delete_unused_out_of_the_box_CAs(self.cluster.master)
        self.x509.upload_client_cert_settings(server=self.cluster.servers[0])

        self.enable_tls_encryption_cli_on_nodes(nodes=[self.cluster.master])

        rest = RestConnection(self.cluster.master)
        status, content = rest.set_min_tls_version(version='tlsv1.3')
        if not status:
            self.fail("Setting tls min version to 1.3 failed with content {0}".
                      format(content))

        self.validate_tls_min_version(node=self.cluster.master,
                                      version="1.2",
                                      expect="fail")
        self.validate_tls_min_version(node=self.cluster.master,
                                      version="1.3",
                                      expect="pass")

        self.disable_n2n_encryption_cli_on_nodes(nodes=self.cluster.servers)
        CbServer.use_https = False
        rest = RestConnection(self.cluster.master)
        status, content = rest.set_min_tls_version(version='tlsv1.2')
        if not status:
            self.fail("Setting tls min version to 1.2 failed with content {0}".
                      format(content))
        self.enable_tls_encryption_cli_on_nodes(nodes=[self.cluster.master])
        self.validate_tls_min_version(node=self.cluster.master,
                                      version="1.2",
                                      expect="pass")

        self.x509 = x509main(host=self.cluster.master)
        self.x509.teardown_certs(servers=self.cluster.servers)
Ejemplo n.º 3
0
 def tearDown(self):
     self.log_setup_status(self.__class__.__name__,
                           "Started",
                           stage=self.tearDown.__name__)
     x509 = x509main(host=self.cluster.master,
                     standard=self.standard,
                     encryption_type=self.encryption_type,
                     passphrase_type=self.passphrase_type)
     x509.teardown_certs(self.servers)
     super(CBASBugAutomation, self).tearDown()
     self.log_setup_status(self.__class__.__name__,
                           "Finished",
                           stage=self.tearDown.__name__)
Ejemplo n.º 4
0
    def tearDown(self):
        # Perform system event log validation and get failures (if any)
        sys_event_validation_failure = None
        if self.validate_system_event_logs:
            sys_event_validation_failure = \
                self.system_events.validate(self.cluster.master)

        if self.ipv4_only or self.ipv6_only:
            for _, cluster in self.cb_clusters.items():
                self.cluster_util.enable_disable_ip_address_family_type(
                    cluster, False, self.ipv4_only, self.ipv6_only)

        # Disable n2n encryption on nodes of all clusters
        if self.use_https and self.enforce_tls:
            for _, cluster in self.cb_clusters.items():
                tasks = []
                for node in cluster.servers:
                    task = self.node_utils.async_disable_tls(node)
                    tasks.append(task)
                for task in tasks:
                    self.task_manager.get_task_result(task)
        if self.multiple_ca:
            CbServer.use_https = False
            for _, cluster in self.cb_clusters.items():
                rest = RestConnection(cluster.master)
                rest.delete_builtin_user("cbadminbucket")
                x509 = x509main(host=cluster.master)
                x509.teardown_certs(servers=cluster.servers)
        if self.sdk_client_pool:
            self.sdk_client_pool.shutdown()
        if self.collect_pcaps:
            self.log.info("Starting Pcaps collection!!")
            self.start_fetch_pcaps()
        result = self.check_coredump_exist(self.servers, force_collect=True)
        if self.skip_teardown_cleanup:
            self.log.debug("Skipping tearDownEverything")
        else:
            self.tearDownEverything()
        if not self.crash_warning:
            self.assertFalse(result, msg="Cb_log file validation failed")
        if self.crash_warning and result:
            self.log.warn("CRASH | CRITICAL | WARN messages found in cb_logs")

        # Fail test in case of sys_event_logging failure
        if (not self.is_test_failed()) and sys_event_validation_failure:
            self.fail(sys_event_validation_failure)
        elif sys_event_validation_failure:
            self.log.critical("System event log validation failed: %s" %
                              sys_event_validation_failure)

        self.shutdown_task_manager()
Ejemplo n.º 5
0
    def setUp(self):
        super(MultipleCA, self).setUp()
        self.standard = self.input.param("standard", "pkcs8")
        self.passphrase_type = self.input.param("passphrase_type", "script")
        self.encryption_type = self.input.param("encryption_type", "aes256")
        self.x509 = x509main(host=self.cluster.master,
                             standard=self.standard,
                             encryption_type=self.encryption_type,
                             passphrase_type=self.passphrase_type)
        for server in self.cluster.servers:
            self.x509.delete_inbox_folder_on_server(server=server)
        self.basic_url = "https://" + self.cluster.servers[
            0].ip + ":18091/pools/default/"

        payload = "name=cbadminbucket&roles=admin"
        rest = RestConnection(self.cluster.master)
        rest.add_set_builtin_user("cbadminbucket", payload)
        self.create_bucket(self.cluster, bucket_name='default')
Ejemplo n.º 6
0
    def test_cbas_with_n2n_encryption_and_client_cert_auth(self):
        step_count = 1

        self.log.info("Step {0}: Initial Data loading in KV bucket is "
                      "Complete".format(step_count))
        step_count += 1

        self.log.info("Step {0}: Creating CBAS infra".format(step_count))
        step_count += 1
        update_spec = {
            "no_of_dataverses": self.num_dataverses,
            "no_of_datasets_per_dataverse": self.ds_per_dv,
            "no_of_synonyms": 0,
            "no_of_indexes": 0,
            "max_thread_count": 1,
            "dataset": {
                "creation_methods": ["cbas_collection", "cbas_dataset"]
            }
        }
        if self.cbas_spec_name:
            self.cbas_spec = self.cbas_util.get_cbas_spec(self.cbas_spec_name)
            self.cbas_util.update_cbas_spec(self.cbas_spec, update_spec)
            cbas_infra_result = self.cbas_util.create_cbas_infra_from_spec(
                self.cluster,
                self.cbas_spec,
                self.bucket_util,
                wait_for_ingestion=True)
            if not cbas_infra_result[0]:
                self.fail("Error while creating infra from CBAS spec -- " +
                          cbas_infra_result[1])

        x509 = x509main(host=self.cluster.master,
                        standard=self.standard,
                        encryption_type=self.encryption_type,
                        passphrase_type=self.passphrase_type)
        self.log.info("Step {0}: Setting up certificates".format(step_count))
        step_count += 1
        self.generate_and_upload_cert(self.servers,
                                      x509,
                                      upload_root_certs=True,
                                      upload_node_certs=True,
                                      upload_client_certs=True)

        if not self.cbas_util.wait_for_cbas_to_recover(self.cluster, 300):
            self.fail("Analytics service failed to come up after enabling "
                      "Multple CA certificate")

        node_versions = self.cluster.rest.get_nodes_versions()
        if len(set(node_versions)) > 1:
            self.fail("Cluster is in mixed mode.")
        cluster_version = node_versions[0].split("-")[0]

        if cluster_version in ["7.0.0", "7.0.1"]:
            strict_mode_supported = False
        else:
            strict_mode_supported = True

        if strict_mode_supported:
            self.log.info("Step {0}: Setting node to node encryption level to "
                          "strict".format(step_count))
            step_count += 1
            self.security_util.set_n2n_encryption_level_on_nodes(
                self.cluster.nodes_in_cluster, level="strict")
        else:
            self.log.info("Step {0}: Setting node to node encryption level to "
                          "all".format(step_count))
            step_count += 1
            self.security_util.set_n2n_encryption_level_on_nodes(
                self.cluster.nodes_in_cluster, level="all")
        if not self.cbas_util.wait_for_cbas_to_recover(self.cluster, 300):
            self.fail("Analytics service Failed to recover")

        self.log.info("Step {0}: Loading more docs".format(step_count))
        step_count += 1
        self.load_data_into_bucket()

        self.log.info("Step {0}: Creating dataset".format(step_count))
        step_count += 1
        self.create_dataset()

        if self.do_rebalance:
            self.log.info("Step {0}: Rebalancing IN KV and CBAS nodes".format(
                step_count))
            step_count += 1
            rebalance_task, self.available_servers = self.rebalance_util.rebalance(
                self.cluster,
                kv_nodes_in=1,
                kv_nodes_out=0,
                cbas_nodes_in=1,
                cbas_nodes_out=0,
                available_servers=self.available_servers,
                exclude_nodes=[])
            if not self.rebalance_util.wait_for_rebalance_task_to_complete(
                    rebalance_task, self.cluster):
                self.fail("Rebalancing IN KV and CBAS nodes Failed")

        if not self.cbas_util.validate_docs_in_all_datasets(
                self.cluster, self.bucket_util):
            self.fail("Data ingestion into datasets after data reloading "
                      "failed")

        self.log.info("Step {0}: Setting node to node encryption level to "
                      "control".format(step_count))
        step_count += 1
        self.security_util.set_n2n_encryption_level_on_nodes(
            self.cluster.nodes_in_cluster, level="control")
        if not self.cbas_util.wait_for_cbas_to_recover(self.cluster, 300):
            self.fail("Analytics service Failed to recover")

        self.log.info("Step {0}: Loading more docs".format(step_count))
        step_count += 1
        self.load_data_into_bucket()

        self.log.info("Step {0}: Creating dataset".format(step_count))
        step_count += 1
        self.create_dataset()

        if self.do_rebalance:
            self.log.info("Step {0}: Rebalancing OUT KV and CBAS nodes".format(
                step_count))
            step_count += 1
            rebalance_task, self.available_servers = self.rebalance_util.rebalance(
                self.cluster,
                kv_nodes_in=0,
                kv_nodes_out=1,
                cbas_nodes_in=0,
                cbas_nodes_out=1,
                available_servers=self.available_servers,
                exclude_nodes=[self.cluster.cbas_cc_node, self.cluster.master])
            if not self.rebalance_util.wait_for_rebalance_task_to_complete(
                    rebalance_task, self.cluster):
                self.fail("Rebalancing OUT KV and CBAS nodes Failed")

            self.generate_and_upload_cert(self.available_servers,
                                          x509,
                                          generate_certs=False,
                                          delete_inbox_folder=False,
                                          upload_root_certs=True,
                                          upload_node_certs=True,
                                          delete_out_of_the_box_CAs=False,
                                          upload_client_certs=False)

        if not self.cbas_util.validate_docs_in_all_datasets(
                self.cluster, self.bucket_util):
            self.fail("Data ingestion into datasets after data reloading "
                      "failed")

        self.log.info("Step {0}: Setting node to node encryption level to "
                      "all".format(step_count))
        step_count += 1
        self.security_util.set_n2n_encryption_level_on_nodes(
            self.cluster.nodes_in_cluster, level="all")
        if not self.cbas_util.wait_for_cbas_to_recover(self.cluster, 300):
            self.fail("Analytics service Failed to recover")

        self.log.info("Step {0}: Loading more docs".format(step_count))
        step_count += 1
        self.load_data_into_bucket()

        self.log.info("Step {0}: Creating dataset".format(step_count))
        step_count += 1
        self.create_dataset()

        if self.do_rebalance:
            self.log.info("Step {0}: Rebalancing IN KV and CBAS nodes".format(
                step_count))
            step_count += 1
            rebalance_task, self.available_servers = self.rebalance_util.rebalance(
                self.cluster,
                kv_nodes_in=1,
                kv_nodes_out=0,
                cbas_nodes_in=1,
                cbas_nodes_out=0,
                available_servers=self.available_servers,
                exclude_nodes=[])
            if not self.rebalance_util.wait_for_rebalance_task_to_complete(
                    rebalance_task, self.cluster):
                self.fail("Rebalancing IN KV and CBAS nodes Failed")

        if not self.cbas_util.validate_docs_in_all_datasets(
                self.cluster, self.bucket_util):
            self.fail("Data ingestion into datasets after data reloading "
                      "failed")

        if strict_mode_supported:
            self.log.info("Step {0}: Setting node to node encryption level to "
                          "strict".format(step_count))
            step_count += 1
            self.security_util.set_n2n_encryption_level_on_nodes(
                self.cluster.nodes_in_cluster, level="strict")
            if not self.cbas_util.wait_for_cbas_to_recover(self.cluster, 300):
                self.fail("Analytics service Failed to recover")

            self.log.info("Step {0}: Dropping Dataset".format(step_count))
            step_count += 1
            dataset_to_be_dropped = random.choice(
                self.cbas_util.list_all_dataset_objs())
            if not self.cbas_util.drop_dataset(
                    self.cluster, dataset_to_be_dropped.full_name):
                self.fail("Error while dropping dataset")
            del self.cbas_util.dataverses[
                dataset_to_be_dropped.dataverse_name].datasets[
                    dataset_to_be_dropped.name]

            self.log.info("Step {0}: Loading more docs".format(step_count))
            step_count += 1
            self.load_data_into_bucket()

            if self.do_rebalance:
                self.log.info(
                    "Step {0}: Rebalancing OUT KV and CBAS nodes".format(
                        step_count))
                step_count += 1
                rebalance_task, self.available_servers = self.rebalance_util.rebalance(
                    self.cluster,
                    kv_nodes_in=0,
                    kv_nodes_out=1,
                    cbas_nodes_in=0,
                    cbas_nodes_out=1,
                    available_servers=self.available_servers,
                    exclude_nodes=[
                        self.cluster.cbas_cc_node, self.cluster.master
                    ])
                if not self.rebalance_util.wait_for_rebalance_task_to_complete(
                        rebalance_task, self.cluster):
                    self.fail("Rebalancing OUT KV and CBAS nodes Failed")

                self.generate_and_upload_cert(self.available_servers,
                                              x509,
                                              generate_certs=False,
                                              delete_inbox_folder=False,
                                              upload_root_certs=True,
                                              upload_node_certs=True,
                                              delete_out_of_the_box_CAs=False,
                                              upload_client_certs=False)

            self.log.info("Step {0}: Setting node to node encryption level to "
                          "all".format(step_count))
            step_count += 1
            self.security_util.set_n2n_encryption_level_on_nodes(
                self.cluster.nodes_in_cluster, level="all")
            if not self.cbas_util.wait_for_cbas_to_recover(self.cluster, 300):
                self.fail("Analytics service Failed to recover")

            self.log.info("Step {0}: Dropping Dataset".format(step_count))
            step_count += 1
            dataset_to_be_dropped = random.choice(
                self.cbas_util.list_all_dataset_objs())
            if not self.cbas_util.drop_dataset(
                    self.cluster, dataset_to_be_dropped.full_name):
                self.fail("Error while dropping dataset")
            del self.cbas_util.dataverses[
                dataset_to_be_dropped.dataverse_name].datasets[
                    dataset_to_be_dropped.name]

            self.log.info("Step {0}: Loading more docs".format(step_count))
            step_count += 1
            self.load_data_into_bucket()

            if self.do_rebalance:
                self.log.info(
                    "Step {0}: Rebalancing IN KV and CBAS nodes".format(
                        step_count))
                step_count += 1
                rebalance_task, self.available_servers = self.rebalance_util.rebalance(
                    self.cluster,
                    kv_nodes_in=1,
                    kv_nodes_out=0,
                    cbas_nodes_in=1,
                    cbas_nodes_out=0,
                    available_servers=self.available_servers,
                    exclude_nodes=[])
                if not self.rebalance_util.wait_for_rebalance_task_to_complete(
                        rebalance_task, self.cluster):
                    self.fail("Rebalancing IN KV and CBAS nodes Failed")

            if not self.cbas_util.validate_docs_in_all_datasets(
                    self.cluster, self.bucket_util):
                self.fail("Data ingestion into datasets after data reloading "
                          "failed")

            self.log.info("Step {0}: Setting node to node encryption level to "
                          "strict".format(step_count))
            step_count += 1
            self.security_util.set_n2n_encryption_level_on_nodes(
                self.cluster.nodes_in_cluster, level="strict")
            if not self.cbas_util.wait_for_cbas_to_recover(self.cluster, 300):
                self.fail("Analytics service Failed to recover")

            self.log.info("Step {0}: Loading more docs".format(step_count))
            step_count += 1
            self.load_data_into_bucket()

            self.log.info("Step {0}: Creating dataset".format(step_count))
            step_count += 1
            self.create_dataset()

            if self.do_rebalance:
                self.log.info(
                    "Step {0}: Rebalancing OUT KV and CBAS nodes".format(
                        step_count))
                step_count += 1
                rebalance_task, self.available_servers = self.rebalance_util.rebalance(
                    self.cluster,
                    kv_nodes_in=0,
                    kv_nodes_out=1,
                    cbas_nodes_in=0,
                    cbas_nodes_out=1,
                    available_servers=self.available_servers,
                    exclude_nodes=[
                        self.cluster.cbas_cc_node, self.cluster.master
                    ])
                if not self.rebalance_util.wait_for_rebalance_task_to_complete(
                        rebalance_task, self.cluster):
                    self.fail("Rebalancing OUT KV and CBAS nodes Failed")

                self.generate_and_upload_cert(self.available_servers,
                                              x509,
                                              generate_certs=False,
                                              delete_inbox_folder=False,
                                              upload_root_certs=True,
                                              upload_node_certs=True,
                                              delete_out_of_the_box_CAs=False,
                                              upload_client_certs=False)

            if not self.cbas_util.validate_docs_in_all_datasets(
                    self.cluster, self.bucket_util):
                self.fail("Data ingestion into datasets after data reloading "
                          "failed")
        else:
            self.log.info("Step {0}: Setting node to node encryption level to "
                          "control".format(step_count))
            step_count += 1
            self.security_util.set_n2n_encryption_level_on_nodes(
                self.cluster.nodes_in_cluster, level="control")
            if not self.cbas_util.wait_for_cbas_to_recover(self.cluster, 300):
                self.fail("Analytics service Failed to recover")

            self.log.info("Step {0}: Dropping Dataset".format(step_count))
            step_count += 1
            dataset_to_be_dropped = random.choice(
                self.cbas_util.list_all_dataset_objs())
            if not self.cbas_util.drop_dataset(
                    self.cluster, dataset_to_be_dropped.full_name):
                self.fail("Error while dropping dataset")
            del self.cbas_util.dataverses[
                dataset_to_be_dropped.dataverse_name].datasets[
                    dataset_to_be_dropped.name]

            self.log.info("Step {0}: Loading more docs".format(step_count))
            step_count += 1
            self.load_data_into_bucket()

            if self.do_rebalance:
                self.log.info(
                    "Step {0}: Rebalancing OUT KV and CBAS nodes".format(
                        step_count))
                step_count += 1
                rebalance_task, self.available_servers = self.rebalance_util.rebalance(
                    self.cluster,
                    kv_nodes_in=0,
                    kv_nodes_out=1,
                    cbas_nodes_in=0,
                    cbas_nodes_out=1,
                    available_servers=self.available_servers,
                    exclude_nodes=[
                        self.cluster.cbas_cc_node, self.cluster.master
                    ])
                if not self.rebalance_util.wait_for_rebalance_task_to_complete(
                        rebalance_task, self.cluster):
                    self.fail("Rebalancing OUT KV and CBAS nodes Failed")

                self.generate_and_upload_cert(self.available_servers,
                                              x509,
                                              generate_certs=False,
                                              delete_inbox_folder=False,
                                              upload_root_certs=True,
                                              upload_node_certs=True,
                                              delete_out_of_the_box_CAs=False,
                                              upload_client_certs=False)

            if not self.cbas_util.validate_docs_in_all_datasets(
                    self.cluster, self.bucket_util):
                self.fail("Data ingestion into datasets after data reloading "
                          "failed")

        self.log.info("Step {0}: Disabling node-to-node encryption and "
                      "client cert auth".format(step_count))
        step_count += 1
        self.security_util.disable_n2n_encryption_cli_on_nodes(self.servers)
        if not self.cbas_util.wait_for_cbas_to_recover(self.cluster, 300):
            self.fail("Analytics service Failed to recover")

        self.log.info("Step {0}: Tearing down Certs".format(step_count))
        step_count += 1
        x509.teardown_certs(self.servers)

        self.log.info("Step {0}: Loading more docs".format(step_count))
        step_count += 1
        self.load_data_into_bucket()

        self.log.info("Step {0}: Creating dataset".format(step_count))
        step_count += 1
        self.create_dataset()

        if self.do_rebalance:
            self.log.info("Step {0}: Rebalancing IN KV and CBAS nodes".format(
                step_count))
            step_count += 1
            rebalance_task, self.available_servers = self.rebalance_util.rebalance(
                self.cluster,
                kv_nodes_in=1,
                kv_nodes_out=0,
                cbas_nodes_in=1,
                cbas_nodes_out=0,
                available_servers=self.available_servers,
                exclude_nodes=[])
            if not self.rebalance_util.wait_for_rebalance_task_to_complete(
                    rebalance_task, self.cluster):
                self.fail("Rebalancing IN KV and CBAS nodes Failed")

        if not self.cbas_util.validate_docs_in_all_datasets(
                self.cluster, self.bucket_util):
            self.fail("Data ingestion into datasets after data reloading "
                      "failed")

        if strict_mode_supported:
            self.log.info("Step {0}: Setting node to node encryption level to "
                          "strict".format(step_count))
            step_count += 1
            self.security_util.set_n2n_encryption_level_on_nodes(
                self.cluster.nodes_in_cluster, level="strict")
        else:
            self.log.info("Step {0}: Setting node to node encryption level to "
                          "all".format(step_count))
            step_count += 1
            self.security_util.set_n2n_encryption_level_on_nodes(
                self.cluster.nodes_in_cluster, level="all")
        if not self.cbas_util.wait_for_cbas_to_recover(self.cluster, 300):
            self.fail("Analytics service Failed to recover")

        self.log.info("Step {0}: Loading more docs".format(step_count))
        step_count += 1
        self.load_data_into_bucket()

        self.log.info("Step {0}: Creating dataset".format(step_count))
        step_count += 1
        self.create_dataset()

        if self.do_rebalance:
            self.log.info("Step {0}: Rebalancing OUT KV and CBAS nodes".format(
                step_count))
            step_count += 1
            rebalance_task, self.available_servers = self.rebalance_util.rebalance(
                self.cluster,
                kv_nodes_in=0,
                kv_nodes_out=1,
                cbas_nodes_in=0,
                cbas_nodes_out=1,
                available_servers=self.available_servers,
                exclude_nodes=[self.cluster.cbas_cc_node, self.cluster.master])
            if not self.rebalance_util.wait_for_rebalance_task_to_complete(
                    rebalance_task, self.cluster):
                self.fail("Rebalancing OUT KV and CBAS nodes Failed")

        if not self.cbas_util.validate_docs_in_all_datasets(
                self.cluster, self.bucket_util):
            self.fail("Data ingestion into datasets after data reloading "
                      "failed")

        self.log.info("Step {0}: Setting up certificates".format(step_count))
        step_count += 1
        x509 = x509main(host=self.cluster.master,
                        standard=self.standard,
                        encryption_type=self.encryption_type,
                        passphrase_type=self.passphrase_type)
        self.generate_and_upload_cert(self.servers,
                                      x509,
                                      upload_root_certs=True,
                                      upload_node_certs=True,
                                      upload_client_certs=True)

        if not self.cbas_util.wait_for_cbas_to_recover(self.cluster, 300):
            self.fail("Analytics service failed to come up after enabling "
                      "Multple CA certificate")

        self.log.info("Step {0}: Loading more docs".format(step_count))
        step_count += 1
        self.load_data_into_bucket()

        self.log.info("Step {0}: Dropping Dataset".format(step_count))
        step_count += 1
        dataset_to_be_dropped = random.choice(
            self.cbas_util.list_all_dataset_objs())
        if not self.cbas_util.drop_dataset(self.cluster,
                                           dataset_to_be_dropped.full_name):
            self.fail("Error while dropping dataset")
        del self.cbas_util.dataverses[
            dataset_to_be_dropped.dataverse_name].datasets[
                dataset_to_be_dropped.name]

        if self.do_rebalance:
            self.log.info("Step {0}: Rebalancing IN KV and CBAS nodes".format(
                step_count))
            step_count += 1
            rebalance_task, self.available_servers = self.rebalance_util.rebalance(
                self.cluster,
                kv_nodes_in=1,
                kv_nodes_out=0,
                cbas_nodes_in=1,
                cbas_nodes_out=0,
                available_servers=self.available_servers,
                exclude_nodes=[])
            if not self.rebalance_util.wait_for_rebalance_task_to_complete(
                    rebalance_task, self.cluster):
                self.fail("Rebalancing IN KV and CBAS nodes Failed")

        if not self.cbas_util.validate_docs_in_all_datasets(
                self.cluster, self.bucket_util):
            self.fail("Data ingestion into datasets after data reloading "
                      "failed")

        self.log.info("Step {0}: Setting node to node encryption level to "
                      "control".format(step_count))
        step_count += 1
        self.security_util.set_n2n_encryption_level_on_nodes(
            self.cluster.nodes_in_cluster, level="control")
        if not self.cbas_util.wait_for_cbas_to_recover(self.cluster, 300):
            self.fail("Analytics service Failed to recover")

        self.log.info("Step {0}: Loading more docs".format(step_count))
        step_count += 1
        self.load_data_into_bucket()

        self.log.info("Step {0}: Creating dataset".format(step_count))
        step_count += 1
        self.create_dataset()

        if self.do_rebalance:
            self.log.info("Step {0}: Rebalancing OUT KV and CBAS nodes".format(
                step_count))
            step_count += 1
            rebalance_task, self.available_servers = self.rebalance_util.rebalance(
                self.cluster,
                kv_nodes_in=0,
                kv_nodes_out=1,
                cbas_nodes_in=0,
                cbas_nodes_out=1,
                available_servers=self.available_servers,
                exclude_nodes=[self.cluster.cbas_cc_node, self.cluster.master])
            if not self.rebalance_util.wait_for_rebalance_task_to_complete(
                    rebalance_task, self.cluster):
                self.fail("Rebalancing OUT KV and CBAS nodes Failed")

        if not self.cbas_util.validate_docs_in_all_datasets(
                self.cluster, self.bucket_util):
            self.fail("Data ingestion into datasets after data reloading "
                      "failed")
Ejemplo n.º 7
0
    def setUp(self):
        super(OnPremBaseTest, self).setUp()

        # Framework specific parameters (Extension from cb_basetest)
        self.skip_cluster_reset = self.input.param("skip_cluster_reset", False)
        self.skip_setup_cleanup = self.input.param("skip_setup_cleanup", False)
        # End of framework parameters

        # Cluster level info settings
        self.log_info = self.input.param("log_info", None)
        self.log_location = self.input.param("log_location", None)
        self.stat_info = self.input.param("stat_info", None)
        self.port = self.input.param("port", None)
        self.port_info = self.input.param("port_info", None)
        self.servers = self.input.servers
        self.num_servers = self.input.param("servers", len(self.servers))
        self.vbuckets = self.input.param("vbuckets", CbServer.total_vbuckets)
        self.gsi_type = self.input.param("gsi_type", 'plasma')
        # Memory quota settings
        # Max memory quota to utilize per node
        self.quota_percent = self.input.param("quota_percent", 100)
        # Services' RAM quota to set on cluster
        self.kv_mem_quota_percent = self.input.param("kv_quota_percent", None)
        self.index_mem_quota_percent = \
            self.input.param("index_quota_percent", None)
        self.fts_mem_quota_percent = \
            self.input.param("fts_quota_percent", None)
        self.cbas_mem_quota_percent = \
            self.input.param("cbas_quota_percent", None)
        self.eventing_mem_quota_percent = \
            self.input.param("eventing_quota_percent", None)
        # CBAS setting
        self.jre_path = self.input.param("jre_path", None)
        self.enable_dp = self.input.param("enable_dp", False)
        # End of cluster info parameters

        # Bucket specific params
        # Note: Over riding bucket_eviction_policy from CouchbaseBaseTest
        self.bucket_eviction_policy = \
            self.input.param("bucket_eviction_policy",
                             Bucket.EvictionPolicy.VALUE_ONLY)
        self.bucket_replica_index = self.input.param("bucket_replica_index", 1)
        if self.bucket_storage == Bucket.StorageBackend.magma:
            self.bucket_eviction_policy = Bucket.EvictionPolicy.FULL_EVICTION
        # End of bucket parameters

        self.services_in = self.input.param("services_in", None)
        self.forceEject = self.input.param("forceEject", False)
        self.wait_timeout = self.input.param("wait_timeout", 120)
        self.verify_unacked_bytes = \
            self.input.param("verify_unacked_bytes", False)
        self.disabled_consistent_view = \
            self.input.param("disabled_consistent_view", None)
        self.rebalanceIndexWaitingDisabled = \
            self.input.param("rebalanceIndexWaitingDisabled", None)
        self.rebalanceIndexPausingDisabled = \
            self.input.param("rebalanceIndexPausingDisabled", None)
        self.maxParallelIndexers = \
            self.input.param("maxParallelIndexers", None)
        self.maxParallelReplicaIndexers = \
            self.input.param("maxParallelReplicaIndexers", None)
        self.use_https = self.input.param("use_https", False)
        self.enforce_tls = self.input.param("enforce_tls", False)
        self.ipv4_only = self.input.param("ipv4_only", False)
        self.ipv6_only = self.input.param("ipv6_only", False)
        self.multiple_ca = self.input.param("multiple_ca", False)
        if self.use_https:
            CbServer.use_https = True
            trust_all_certs()

        self.node_utils.cleanup_pcaps(self.servers)
        self.collect_pcaps = self.input.param("collect_pcaps", False)
        if self.collect_pcaps:
            self.node_utils.start_collect_pcaps(self.servers)
        '''
        Be careful while using this flag.
        This is only and only for stand-alone tests.
        During bugs reproductions, when a crash is seen
        stop_server_on_crash will stop the server
        so that we can collect data/logs/dumps at the right time
        '''
        self.stop_server_on_crash = self.input.param("stop_server_on_crash",
                                                     False)
        self.collect_data = self.input.param("collect_data", False)
        self.validate_system_event_logs = \
            self.input.param("validate_sys_event_logs", False)

        self.nonroot = False
        self.crash_warning = self.input.param("crash_warning", False)

        # Populate memcached_port in case of cluster_run
        cluster_run_base_port = ClusterRun.port
        if int(self.input.servers[0].port) == ClusterRun.port:
            for server in self.input.servers:
                server.port = cluster_run_base_port
                cluster_run_base_port += 1
                # If not defined in node.ini under 'memcached_port' section
                if server.memcached_port is CbServer.memcached_port:
                    server.memcached_port = \
                        ClusterRun.memcached_port \
                        + (2 * (int(server.port) - ClusterRun.port))

        self.log_setup_status(self.__class__.__name__, "started")
        cluster_name_format = "C%s"
        default_cluster_index = counter_index = 1
        if len(self.input.clusters) > 1:
            # Multi cluster setup
            for _, nodes in self.input.clusters.iteritems():
                cluster_name = cluster_name_format % counter_index
                tem_cluster = CBCluster(name=cluster_name,
                                        servers=nodes,
                                        vbuckets=self.vbuckets)
                self.cb_clusters[cluster_name] = tem_cluster
                counter_index += 1
        else:
            # Single cluster
            cluster_name = cluster_name_format % counter_index
            self.cb_clusters[cluster_name] = CBCluster(name=cluster_name,
                                                       servers=self.servers,
                                                       vbuckets=self.vbuckets)

        # Initialize self.cluster with first available cluster as default
        self.cluster = self.cb_clusters[cluster_name_format %
                                        default_cluster_index]
        self.cluster_util = ClusterUtils(self.task_manager)
        self.bucket_util = BucketUtils(self.cluster_util, self.task)

        CbServer.enterprise_edition = \
            self.cluster_util.is_enterprise_edition(self.cluster)
        if CbServer.enterprise_edition:
            self.cluster.edition = "enterprise"
        else:
            self.cluster.edition = "community"

        if self.standard_buckets > 10:
            self.bucket_util.change_max_buckets(self.cluster.master,
                                                self.standard_buckets)

        for cluster_name, cluster in self.cb_clusters.items():
            # Append initial master node to the nodes_in_cluster list
            cluster.nodes_in_cluster.append(cluster.master)

            shell = RemoteMachineShellConnection(cluster.master)
            self.os_info = shell.extract_remote_info().type.lower()
            if self.os_info != 'windows':
                if cluster.master.ssh_username != "root":
                    self.nonroot = True
                    shell.disconnect()
                    break
            shell.disconnect()

        self.log_setup_status("OnPremBaseTest", "started")
        try:
            # Construct dict of mem. quota percent / mb per service
            mem_quota_percent = dict()
            # Construct dict of mem. quota percent per service
            if self.kv_mem_quota_percent:
                mem_quota_percent[CbServer.Services.KV] = \
                    self.kv_mem_quota_percent
            if self.index_mem_quota_percent:
                mem_quota_percent[CbServer.Services.INDEX] = \
                    self.index_mem_quota_percent
            if self.cbas_mem_quota_percent:
                mem_quota_percent[CbServer.Services.CBAS] = \
                    self.cbas_mem_quota_percent
            if self.fts_mem_quota_percent:
                mem_quota_percent[CbServer.Services.FTS] = \
                    self.fts_mem_quota_percent
            if self.eventing_mem_quota_percent:
                mem_quota_percent[CbServer.Services.EVENTING] = \
                    self.eventing_mem_quota_percent

            if not mem_quota_percent:
                mem_quota_percent = None

            if self.skip_setup_cleanup:
                # Update current server/service map and buckets for the cluster
                for _, cluster in self.cb_clusters.items():
                    self.cluster_util.update_cluster_nodes_service_list(
                        cluster)
                    cluster.buckets = self.bucket_util.get_all_buckets(cluster)
                return
            else:
                for cluster_name, cluster in self.cb_clusters.items():
                    self.log.info("Delete all buckets and rebalance out "
                                  "other nodes from '%s'" % cluster_name)
                    self.cluster_util.cluster_cleanup(cluster,
                                                      self.bucket_util)

            reload(Cb_constants)

            # avoid clean up if the previous test has been tear down
            if self.case_number == 1 or self.case_number > 1000:
                if self.case_number > 1000:
                    self.log.warn("TearDown for prev test failed. Will retry")
                    self.case_number -= 1000
                self.tearDownEverything(reset_cluster_env_vars=False)

            for cluster_name, cluster in self.cb_clusters.items():
                if not self.skip_cluster_reset:
                    self.initialize_cluster(
                        cluster_name,
                        cluster,
                        services=None,
                        services_mem_quota_percent=mem_quota_percent)

                # Update initial service map for the master node
                self.cluster_util.update_cluster_nodes_service_list(cluster)

                # Set this unconditionally
                RestConnection(cluster.master).set_internalSetting(
                    "magmaMinMemoryQuota", 256)

            # Enable dp_version since we need collections enabled
            if self.enable_dp:
                tasks = []
                for server in self.cluster.servers:
                    task = self.node_utils.async_enable_dp(server)
                    tasks.append(task)
                for task in tasks:
                    self.task_manager.get_task_result(task)

            # Enforce tls on nodes of all clusters
            if self.use_https and self.enforce_tls:
                for _, cluster in self.cb_clusters.items():
                    tasks = []
                    for node in cluster.servers:
                        task = self.node_utils.async_enable_tls(node)
                        tasks.append(task)
                    for task in tasks:
                        self.task_manager.get_task_result(task)
                    self.log.info(
                        "Validating if services obey tls only on servers {0}".
                        format(cluster.servers))
                    status = self.cluster_util.check_if_services_obey_tls(
                        cluster.servers)
                    if not status:
                        self.fail("Services did not honor enforce tls")

            # Enforce IPv4 or IPv6 or both
            if self.ipv4_only or self.ipv6_only:
                for _, cluster in self.cb_clusters.items():
                    status, msg = self.cluster_util.enable_disable_ip_address_family_type(
                        cluster, True, self.ipv4_only, self.ipv6_only)
                    if not status:
                        self.fail(msg)

            self.standard = self.input.param("standard", "pkcs8")
            self.passphrase_type = self.input.param("passphrase_type",
                                                    "script")
            self.encryption_type = self.input.param("encryption_type",
                                                    "aes256")
            if self.multiple_ca:
                for _, cluster in self.cb_clusters.items():
                    cluster.x509 = x509main(
                        host=cluster.master,
                        standard=self.standard,
                        encryption_type=self.encryption_type,
                        passphrase_type=self.passphrase_type)
                    self.generate_and_upload_cert(cluster.servers,
                                                  cluster.x509,
                                                  upload_root_certs=True,
                                                  upload_node_certs=True,
                                                  upload_client_certs=True)
                    payload = "name=cbadminbucket&roles=admin&password=password"
                    rest = RestConnection(cluster.master)
                    rest.add_set_builtin_user("cbadminbucket", payload)

            for cluster_name, cluster in self.cb_clusters.items():
                self.modify_cluster_settings(cluster)

            # Track test start time only if we need system log validation
            if self.validate_system_event_logs:
                self.system_events.set_test_start_time()
            self.log_setup_status("OnPremBaseTest", "finished")
            self.__log("started")
        except Exception as e:
            traceback.print_exc()
            self.task.shutdown(force=True)
            self.fail(e)
        finally:
            # Track test start time only if we need system log validation
            if self.validate_system_event_logs:
                self.system_events.set_test_start_time()

            self.log_setup_status("OnPremBaseTest", "finished")