def tearDown(self): if self.input.param("n2n_encryption_level_multiple_CA", None): ntonencryptionBase().disable_nton_cluster([self.master]) CbServer.use_https = False self.x509 = x509main(host=self.master) self.x509.teardown_certs(servers=self.servers) super(MultipleCA, self).tearDown()
def setUp(self): super(MultipleCA, self).setUp() self.standard = self.input.param("standard", "pkcs8") self.passphrase_type = self.input.param("passphrase_type", "script") self.encryption_type = self.input.param("encryption_type", "aes256") self.wildcard_dns = self.input.param("wildcard_dns", None) self.passphrase_url = self.input.param( "rest_url", "https://testingsomething.free.beeceptor.com/") self.x509 = x509main(host=self.master, standard=self.standard, encryption_type=self.encryption_type, passphrase_type=self.passphrase_type, wildcard_dns=self.wildcard_dns, passhprase_url=self.passphrase_url) for server in self.servers: self.x509.delete_inbox_folder_on_server(server=server) sample_bucket = self.input.param("sample_bucket", "travel-sample") if sample_bucket is not None: self.load_sample_bucket(self.master, sample_bucket) self.buckets = RestConnection(self.master).get_buckets() rest = RestConnection(self.master) for bucket in self.buckets: rest.change_bucket_props(bucket, replicaNumber=self.num_replicas) task = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], []) self.wait_for_rebalance_to_complete(task) self.n2n_encryption_level_multiple_CA = self.input.param( "n2n_encryption_level_multiple_CA", None) if self.n2n_encryption_level_multiple_CA: ntonencryptionBase().setup_nton_cluster( [self.master], clusterEncryptionLevel=self.n2n_encryption_level_multiple_CA) CbServer.use_https = True
def setup_master(self, state=None, paths=None, prefixs=None, delimeters=None, mode='rest', user='******', password='******', non_local_CA_upload=True): level = ntonencryptionBase().get_encryption_level_cli(self.host) if level: ntonencryptionBase().disable_nton_cluster([self.host]) copy_host = copy.deepcopy(self.host) if non_local_CA_upload: self.non_local_CA_upload(server=copy_host, allow=True) x509main(copy_host)._upload_cluster_ca_certificate(user, password) x509main(copy_host)._setup_node_certificates() if state is not None: self.write_client_cert_json_new(state, paths, prefixs, delimeters) if mode == 'rest': x509main(copy_host)._upload_cluster_ca_settings(user, password) elif mode == 'cli': x509main(copy_host)._upload_cert_file_via_cli(user, password) if level: ntonencryptionBase().setup_nton_cluster( [self.host], clusterEncryptionLevel=level) if non_local_CA_upload: # Disable it back as uploading is done self.non_local_CA_upload(server=copy_host, allow=False)
def test_enforcing_tls_during_handler_lifecycle_operation(self): ntonencryptionBase().setup_nton_cluster( self.servers, clusterEncryptionLevel=self.ntonencrypt_level) body = self.create_save_function_body( self.function_name, "handler_code/ABO/insert_rebalance.js") self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False, batch_size=self.batch_size) self.deploy_function(body) self.verify_doc_count_collections("dst_bucket._default._default", self.docs_per_day * self.num_docs) if self.pause_resume: self.pause_function(body) else: self.undeploy_function(body) if self.pause_resume: self.resume_function(body, wait_for_resume=False) else: self.deploy_function(body, wait_for_bootstrap=False) ntonencryptionBase().setup_nton_cluster( [self.master], clusterEncryptionLevel="strict") self.wait_for_handler_state(body['appname'], "deployed") self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False, batch_size=self.batch_size, op_type='delete') self.verify_doc_count_collections("dst_bucket._default._default", 0) assert ClusterOperationHelper.check_if_services_obey_tls( servers=[self.master ]), "Port binding after enforcing TLS incorrect" self.undeploy_and_delete_function(body)
def test_eventing_rebalance_with_n2n_encryption_and_enforce_tls(self): ntonencryptionBase().disable_nton_cluster([self.master]) body = self.create_save_function_body( self.function_name, "handler_code/ABO/insert_rebalance.js") self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False, batch_size=self.batch_size) self.deploy_function(body) self.verify_doc_count_collections("dst_bucket._default._default", self.docs_per_day * self.num_docs) for level in ["control", "all", "strict"]: if self.pause_resume: self.pause_function(body) else: self.undeploy_function(body) ntonencryptionBase().setup_nton_cluster( [self.master], clusterEncryptionLevel=level) if self.x509enable: self.upload_x509_certs(self.servers[self.nodes_init]) services_in = ["eventing"] rebalance = self.cluster.async_rebalance( self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [], services=services_in) reached = RestHelper(self.rest).rebalance_reached(retry_count=150) self.assertTrue(reached, "rebalance failed, stuck or did not complete") rebalance.result() if self.pause_resume: self.resume_function(body) else: self.deploy_function(body) self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False, batch_size=self.batch_size, op_type='delete') self.verify_doc_count_collections("dst_bucket._default._default", 0) rebalance = self.cluster.async_rebalance( self.servers[:self.nodes_init], [], [self.servers[self.nodes_init]]) reached = RestHelper(self.rest).rebalance_reached(retry_count=150) self.assertTrue(reached, "rebalance failed, stuck or did not complete") rebalance.result() self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False, batch_size=self.batch_size) self.verify_doc_count_collections( "dst_bucket._default._default", self.docs_per_day * self.num_docs) self.undeploy_and_delete_function(body)
def test_port_security_with_encryption(self): """ Scanning the ports to test vulnerabilities with cluster encryption enabled """ ntonencryptionBase().setup_nton_cluster(servers=self.servers, ntonStatus="enable", clusterEncryptionLevel="all") self.test_port_security() CbServer.use_https = True ntonencryptionBase().setup_nton_cluster(servers=self.servers, ntonStatus="enable", clusterEncryptionLevel="strict") self.test_port_security()
def tearDown(self): log.info( "============== Multiple CA Upgrade teardown has started ==============" ) ntonencryptionBase().disable_nton_cluster(self.servers) CbServer.use_https = False if self.test_setup_finished: self._install(self.servers) super(MultipleCAUpgrade, self).tearDown() log.info( "============== Multiple CA Upgrade teardown finished ==============" )
def test_n2n_encryption_enabled_rebalance_in_with_ddoc_ops(self): self.assertTrue(self.num_servers > self.nodes_in + self.nodes_out, "ERROR: Not enough nodes to do rebalance in and out") ntonencryptionBase().disable_nton_cluster([self.master]) self._load_doc_data_all_buckets() servs_in = self.servers[1:self.nodes_in + 1] rebalance = self.cluster.async_rebalance(self.servers[:1], servs_in, []) for bucket in self.buckets: self._execute_ddoc_ops("create", self.test_with_view, self.num_ddocs, self.num_views_per_ddoc, bucket=bucket) self._execute_ddoc_ops("update", self.test_with_view, self.num_ddocs // 2, self.num_views_per_ddoc // 2, bucket=bucket) self._execute_ddoc_ops("delete", self.test_with_view, self.num_ddocs // 2, self.num_views_per_ddoc // 2, bucket=bucket) rebalance.result() self._wait_for_stats_all_buckets(self.servers[:self.nodes_in + 1]) max_verify = None if self.num_items > 500000: max_verify = 100000 ntonencryptionBase().setup_nton_cluster( [self.master], clusterEncryptionLevel="strict") self._verify_all_buckets(server=self.master, timeout=self.wait_timeout * 15 if not self.dgm_run else None, max_verify=max_verify) self._verify_stats_all_buckets( self.servers[:self.nodes_in + 1], timeout=self.wait_timeout if not self.dgm_run else None) self._verify_ddoc_ops_all_buckets() if self.test_with_view: self._verify_ddoc_data_all_buckets() assert ClusterOperationHelper.check_if_services_obey_tls( servers=[self.master ]), "Port binding after enforcing TLS incorrect"
def test_view_ops_n2n_encryption_enabled(self): ntonencryptionBase().disable_nton_cluster([self.master]) self.log.info("###### Generating x509 certificate#####") self.generate_x509_certs(self.servers) self.log.info("###### uploading x509 certificate#####") self.upload_x509_certs(self.servers) self._load_doc_data_all_buckets() for bucket in self.buckets: self._execute_ddoc_ops("create", self.test_with_view, self.num_ddocs, self.num_views_per_ddoc, bucket=bucket) self._wait_for_stats_all_buckets([self.master]) ntonencryptionBase().setup_nton_cluster( [self.master], clusterEncryptionLevel="strict") self.x509enable = True encryption_result = ntonencryptionBase().setup_nton_cluster( self.servers, 'enable', self.ntonencrypt_level) self.assertTrue(encryption_result, "Retries Exceeded. Cannot enable n2n encryption") self._verify_ddoc_ops_all_buckets() self._verify_ddoc_data_all_buckets() self._execute_ddoc_ops("update", self.test_with_view, self.num_ddocs // 2, self.num_views_per_ddoc // 2, bucket=bucket) self._wait_for_stats_all_buckets([self.master]) ntonencryptionBase().disable_nton_cluster([self.master]) self._verify_ddoc_ops_all_buckets() self._verify_ddoc_data_all_buckets() self._execute_ddoc_ops("delete", self.test_with_view, self.num_ddocs // 2, self.num_views_per_ddoc // 2, bucket=bucket) self._wait_for_stats_all_buckets([self.master]) ntonencryptionBase().setup_nton_cluster( [self.master], clusterEncryptionLevel="strict") self._verify_ddoc_ops_all_buckets() self._verify_ddoc_data_all_buckets() assert ClusterOperationHelper.check_if_services_obey_tls( servers=[self.master ]), "Port binding after enforcing TLS incorrect"
def test_port_security_with_certificates(self): """ Scanning the ports to test vulnerabilities with certificates """ self.x509 = x509main(host=self.master) self.x509.generate_multiple_x509_certs(servers=self.servers) for server in self.servers: _ = self.x509.upload_root_certs(server) self.x509.upload_node_certs(servers=self.servers) self.x509.delete_unused_out_of_the_box_CAs(server=self.master) self.x509.upload_client_cert_settings(server=self.servers[0]) CbServer.use_https = True ntonencryptionBase().setup_nton_cluster(servers=self.servers, clusterEncryptionLevel="strict") self.test_port_security()
def test_multiple_CAs_offline(self): """ 1. Init Pre-neo cluster with x509 (single CA) 2. Offline upgrade node one by one 3. Optionally convert pkcs#1 key to encrypted pkcs#8 key in mixed mode cluster 4. After cluster is fully upgraded, rotate certs to use pkcs#8 keys and multiple CAs """ self.log.info("------------- test started-------------") self.generate_x509_certs(self.servers) self.upload_x509_certs(self.servers) ntonencryptionBase().setup_nton_cluster(servers=self.servers, clusterEncryptionLevel="all") for node in self.servers: self.log.info( "-------------Performing upgrade on node {0} to version------------- {1}" .format(node, self.upgrade_version)) upgrade_threads = self._async_update( upgrade_version=self.upgrade_version, servers=[node]) for threads in upgrade_threads: threads.join() self.log.info("Upgrade finished") if self.enc_key_mixed_mode: self.convert_to_pkcs8(node) self.reload_node_cert_with_plain_password(node) CbServer.use_https = True ntonencryptionBase().setup_nton_cluster( servers=self.servers, clusterEncryptionLevel="strict") self._reset_original(self.servers) self.x509_new = x509main(host=self.master, standard="pkcs8", encryption_type="aes256", passphrase_type="script") self.x509_new.generate_multiple_x509_certs(servers=self.servers) for server in self.servers: _ = self.x509_new.upload_root_certs(server) self.x509_new.upload_node_certs(servers=self.servers) self.x509_new.delete_unused_out_of_the_box_CAs(server=self.master) self.x509_new.upload_client_cert_settings(server=self.servers[0]) task = self.cluster.async_rebalance(self.servers[:self.nodes_init], self.servers[self.nodes_init:], []) self.wait_for_rebalance_to_complete(task) self.auth(servers=self.servers) self.validate_rbac_users_post_upgrade(self.master) self.validate_user_roles_sdk(self.master)
def test_teardown_with_n2n_encryption(self): """ Verify that regenerate, deletion of trusted CAs work with n2n encryption turned on """ CbServer.use_https = True for level in ["strict", "control", "all"]: self.x509 = x509main(host=self.master, standard=self.standard, encryption_type=self.encryption_type, passphrase_type=self.passphrase_type, wildcard_dns=self.wildcard_dns) self.x509.generate_multiple_x509_certs( servers=self.servers[:self.nodes_init]) self.x509.upload_root_certs(self.master) self.x509.upload_node_certs(servers=self.servers[:self.nodes_init]) ntonencryptionBase().setup_nton_cluster( [self.master], clusterEncryptionLevel=level) self.x509.teardown_certs(servers=self.servers) ntonencryptionBase().disable_nton_cluster([self.master])
def test_eventing_with_n2n_encryption_enabled(self): ntonencryptionBase().disable_nton_cluster([self.master]) body = self.create_save_function_body( self.function_name, "handler_code/ABO/insert_rebalance.js") self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default") self.deploy_function(body) self.verify_doc_count_collections("dst_bucket._default._default", self.docs_per_day * self.num_docs) if self.pause_resume: self.pause_function(body) else: self.undeploy_function(body) ntonencryptionBase().setup_nton_cluster( self.servers, clusterEncryptionLevel=self.ntonencrypt_level) if self.pause_resume: self.resume_function(body) else: self.deploy_function(body) self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default", is_delete=True) self.verify_doc_count_collections("dst_bucket._default._default", 0) if self.pause_resume: self.pause_function(body) else: self.undeploy_function(body) ntonencryptionBase().setup_nton_cluster(self.servers, clusterEncryptionLevel="all") if self.pause_resume: self.resume_function(body) else: self.deploy_function(body) self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default") self.verify_doc_count_collections("dst_bucket._default._default", self.docs_per_day * self.num_docs) if self.pause_resume: self.pause_function(body) else: self.undeploy_function(body) ntonencryptionBase().disable_nton_cluster([self.master]) if self.pause_resume: self.resume_function(body) else: self.deploy_function(body) self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default", is_delete=True) self.verify_doc_count_collections("dst_bucket._default._default", 0) self.undeploy_and_delete_function(body)
def test_view_system_events_settings_change(self): self._load_doc_data_all_buckets() for bucket in self.buckets: self._execute_ddoc_ops("create", self.test_with_view, self.num_ddocs, self.num_views_per_ddoc, bucket=bucket) self._wait_for_stats_all_buckets([self.master]) self._verify_ddoc_ops_all_buckets() self._verify_ddoc_data_all_buckets() ntonencryptionBase().disable_nton_cluster([self.master]) ntonencryptionBase().setup_nton_cluster([self.master]) ntonencryptionBase().disable_nton_cluster([self.master]) global_vars.system_event_logs.add_event( ViewsServiceEvents.DDoc_Settings_change(self.master)) self.system_events.validate(server=self.master)
def test_multiple_CAs_online(self): """ 1. Init Pre-neo cluster with x509 (single CA) 2. Online upgrade by rebalance-in and rebalance-out 3. Optionally convert pkcs#1 key to encrypted pkcs#8 key in mixed mode cluster 4. After cluster is fully upgraded, rotate certs to use pkcs#8 keys and multiple CAs """ self.log.info("------------- test started-------------") self.generate_x509_certs(self.servers) self.upload_x509_certs(self.servers) ntonencryptionBase().setup_nton_cluster(servers=self.servers, clusterEncryptionLevel="all") nodes_to_upgrade = self.servers[self.nodes_init:] nodes_to_upgrade = nodes_to_upgrade[:self.nodes_init] self.log.info( "-------------Performing upgrade on node {0} to version------------- {1}" .format(nodes_to_upgrade, self.upgrade_version)) upgrade_threads = self._async_update( upgrade_version=self.upgrade_version, servers=nodes_to_upgrade) for threads in upgrade_threads: threads.join() nodes_in_cluster = self.servers[:self.nodes_init] old_nodes = copy.deepcopy(nodes_in_cluster) for node in nodes_to_upgrade: self.log.info("Rebalance-in a Neo node {0}".format(node)) task = self.cluster.async_rebalance( nodes_in_cluster, [node], [], services=["kv,n1ql,cbas,eventing,fts,index"]) self.wait_for_rebalance_to_complete(task) nodes_in_cluster.append(node) node_out = old_nodes[-1] self.log.info("Rebalance-out a pre-Neo node {0}".format(node_out)) task = self.cluster.async_rebalance(nodes_in_cluster, [], [node_out]) self.wait_for_rebalance_to_complete(task) old_nodes.remove(node_out) for enode in nodes_in_cluster: if enode.ip == node_out.ip: nodes_in_cluster.remove(enode) self.master = copy.deepcopy(node) if self.enc_key_mixed_mode: self.convert_to_pkcs8(node) self.reload_node_cert_with_plain_password(node) CbServer.use_https = True ntonencryptionBase().setup_nton_cluster( servers=nodes_in_cluster, clusterEncryptionLevel="strict") self._reset_original(nodes_in_cluster) self.x509_new = x509main(host=self.master, standard="pkcs8", encryption_type="aes256", passphrase_type="script") self.x509_new.generate_multiple_x509_certs(servers=nodes_in_cluster) for server in nodes_in_cluster: _ = self.x509_new.upload_root_certs(server) self.x509_new.upload_node_certs(servers=nodes_in_cluster) self.x509_new.delete_unused_out_of_the_box_CAs(server=self.master) self.x509_new.upload_client_cert_settings(server=nodes_in_cluster[0]) self.auth(servers=nodes_in_cluster) self.validate_rbac_users_post_upgrade(self.master) self.validate_user_roles_sdk(self.master)
def validate_user_roles_sdk(self, server): """ 1. Create SDK connection 2. Check whether users created before upgrade are able to write/read docs into the bucket post upgrade depending on their roles Written in accordance with python sdk version - 2.4 """ ntonencryptionBase().disable_nton_cluster(self.servers) CbServer.use_https = False for user in self.user_roles: # select users with data roles data_roles = [ role for role in self.user_roles[user] if "data_" in role ] if data_roles: self.log.info("USERNAME -- {0}".format(user)) # create sdk connection bucket_name = "travel-sample" url = 'couchbase://{ip}/{name}'.format(ip=server.ip, name=bucket_name) if user == "bjones": url = 'couchbase://{ip}/{name}?sasl_mech_force=PLAIN'.format( ip=server.ip, name=bucket_name) bucket = Bucket(url, username=user, password="******") try: # write/upsert into the doc bucket.upsert('doc123', {'key123': 'value123'}) except CouchbaseInputError: if any("data_reader" in role for role in data_roles) or ("data_dcp_reader" in \ role for role in data_roles) or any("data_monitoring" in role for \ role in data_roles): self.log.info( "Write permission not granted as expected") else: self.fail( "Validation failed. The user should have permission to " "write to the given bucket") except CouchbaseError: if any("data_writer" in role for role in data_roles) or any("data_backup" \ in role for role in data_roles): self.log.info("Write permission granted as expected") else: self.fail( "Validation failed. The user should not have permission to " "write to the given bucket") else: if any("data_writer" in role for role in data_roles) or any("data_backup" \ in role for role in data_roles): self.log.info("Write permission granted as expected") else: self.fail( "Validation failed. The user should not have permission to " "write to the given bucket") try: # read the doc bucket.get('airline_10') except CouchbaseInputError: if any("data_writer" in role for role in data_roles) or any("data_monitoring" \ in role for role in data_roles): self.log.info( "Read permission not granted as expected") else: self.fail( "Validation failed. The user should have permission to " "read the given bucket") except CouchbaseError: if any("data_reader" in role for role in data_roles) or any("data_dcp_reader" \ in role for role in data_roles) or any("data_backup" in role for role \ in data_roles): self.log.info("Read permission granted as expected") else: self.fail( "Validation failed. The user should not have permission to " "read the given bucket") else: if any("data_reader" in role for role in data_roles) or any("data_dcp_reader" \ in role for role in data_roles) or any("data_backup" in role for role \ in data_roles): self.log.info("Read permission granted as expected") else: self.fail( "Validation failed. The user should not have permission to " "read the given bucket")
def test_upgrade_with_ldap_root_cert(self): """ 1. Setup a cluster with x509 certs with n2n encryption enabled. 2. Start an ldap container with a root certificate. 3. Setup an ldap client connection from CB cluster to ldap server. 4. Create an ldap user. 5. Upgrade the CB cluster offline. 6. Add ldap's root cert to cluster's trusted CAs and make ldap client connection from CB to ldap server using cluster's trusted CAs 7. Validate ldap user authentication works """ self.log.info("------------- test started-------------") self.generate_x509_certs(self.servers) self.upload_x509_certs(self.servers) ntonencryptionBase().setup_nton_cluster(servers=self.servers, clusterEncryptionLevel="all") # setup and start ldap container self.log.info("Setting up ldap container") self.docker = LdapContainer() shell = RemoteMachineShellConnection(self.master) self.docker.start_docker(shell) self.docker.stop_all_containers(shell) self.docker.remove_all_containers(shell) self.docker.start_ldap_container(shell) shell.disconnect() # Setup ldap config and add an ldap user self.log.info("Setting up ldap config and creating ldap user") param = self.get_ldap_params( hosts='ldap.example.org', port='636', encryption='TLS', bindDN='cn=admin,dc=example,dc=org', bindPass="******", serverCertValidation='false', userDNMapping='{"template":"cn=%u,dc=example,dc=org"}') self.setup_ldap_config(server=self.master, param=param) self.add_ldap_user(server=self.master, user_name=self.docker.ldap_user) for node in self.servers: self.log.info( "-------------Performing upgrade on node {0} to version------------- {1}" .format(node, self.upgrade_version)) upgrade_threads = self._async_update( upgrade_version=self.upgrade_version, servers=[node]) for threads in upgrade_threads: threads.join() self.log.info("Upgrade finished") # Add multiple CAs with strict level of n2n encryption CbServer.use_https = True ntonencryptionBase().setup_nton_cluster( servers=self.servers, clusterEncryptionLevel="strict") self._reset_original(self.servers) self.x509_new = x509main(host=self.master, standard="pkcs8", encryption_type="aes256", passphrase_type="script") self.x509_new.generate_multiple_x509_certs(servers=self.servers) for server in self.servers: _ = self.x509_new.upload_root_certs(server) self.x509_new.upload_node_certs(servers=self.servers) self.x509_new.delete_unused_out_of_the_box_CAs(server=self.master) # Upload ldap's root CA to cluster's trusted CAs self.log.info("Copying ldap CA to inbox/CA folder") # TODO: Change the hardcoded path (for now it's okay since the VM on which container is running is fixed ) self.copy_file_from_slave_to_server( server=self.master, src=self.docker.ldap_ca, dst= "/opt/couchbase/var/lib/couchbase/inbox/CA/ldap_container_ca.crt") self.log.info("Uploading ldap CA to CB") self.x509_new.upload_root_certs(self.master) self.log.info("Changing ldap config to use CB trusted CAs") param = self.get_ldap_params( hosts=self.docker.hostname, port=str(self.docker.ssl_port), encryption='TLS', bindDN=self.docker.bindDN, bindPass=self.docker.bindPass, serverCertValidation='true', userDNMapping='{"template":"cn=%u,dc=example,dc=org"}') self.setup_ldap_config(server=self.master, param=param) # Validate ldap user post upgrade ldap_rest = RestConnection(self.master) ldap_rest.username = self.docker.ldap_user ldap_rest.password = self.docker.bindPass status, content, response = self.make_rest_call_with_ldap_user( ldap_rest=ldap_rest) if not status: self.fail( "Rest call with ldap user credentials failed with content " "{0}, response{1}".format(content, response))
def test_eventing_with_enforce_tls_feature(self): ntonencryptionBase().disable_nton_cluster([self.master]) body = self.create_save_function_body(self.function_name, self.handler_code) self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False, batch_size=self.batch_size) self.deploy_function(body) self.verify_doc_count_collections("dst_bucket._default._default", self.docs_per_day * self.num_docs) if self.pause_resume: self.pause_function(body) else: self.undeploy_function(body) ntonencryptionBase().setup_nton_cluster( self.servers, clusterEncryptionLevel=self.ntonencrypt_level) if self.pause_resume: self.resume_function(body) else: self.deploy_function(body) self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False, batch_size=self.batch_size, op_type='delete') self.verify_doc_count_collections("dst_bucket._default._default", 0) if self.pause_resume: self.pause_function(body) else: self.undeploy_function(body) ntonencryptionBase().setup_nton_cluster( [self.master], clusterEncryptionLevel="strict") if self.pause_resume: self.resume_function(body) else: self.deploy_function(body) self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False, batch_size=self.batch_size) self.verify_doc_count_collections("dst_bucket._default._default", self.docs_per_day * self.num_docs) assert ClusterOperationHelper.check_if_services_obey_tls( servers=[self.master ]), "Port binding after enforcing TLS incorrect" if self.pause_resume: self.pause_function(body) else: self.undeploy_function(body) ntonencryptionBase().setup_nton_cluster( self.servers, clusterEncryptionLevel=self.ntonencrypt_level) if self.pause_resume: self.resume_function(body) else: self.deploy_function(body) self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False, batch_size=self.batch_size, op_type='delete') self.verify_doc_count_collections("dst_bucket._default._default", 0) if self.pause_resume: self.pause_function(body) else: self.undeploy_function(body) ntonencryptionBase().disable_nton_cluster([self.master]) if self.pause_resume: self.resume_function(body) else: self.deploy_function(body) self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False, batch_size=self.batch_size) self.verify_doc_count_collections("dst_bucket._default._default", self.docs_per_day * self.num_docs) self.undeploy_and_delete_function(body)
def test_n2n_encryption_enabled_rebalance_in_and_out_with_ddoc_ops(self): #assert if number of nodes_in and nodes_out are not sufficient self.assertTrue(self.num_servers > self.nodes_in + self.nodes_out, "ERROR: Not enough nodes to do rebalance in and out") ntonencryptionBase().disable_nton_cluster([self.master]) servs_in = self.servers[self.num_servers - self.nodes_in:] #subtract the servs_in from the list of servers servs_for_rebal = [ serv for serv in self.servers if serv not in servs_in ] servs_out = servs_for_rebal[self.num_servers - self.nodes_in - self.nodes_out:] x509main().setup_cluster_nodes_ssl(servs_out) #list of server which will be available after the in/out operation servs_after_rebal = [ serv for serv in self.servers if serv not in servs_out ] self.log.info( "create a cluster of all the available servers except nodes_in") self.cluster.rebalance(servs_for_rebal[:1], servs_for_rebal[1:], []) #ntonencryptionBase().disable_nton_cluster([self.master]) # load initial documents self._load_doc_data_all_buckets() #start the rebalance in/out operation rebalance = self.cluster.async_rebalance(servs_for_rebal, servs_in, servs_out) for bucket in self.buckets: self._execute_ddoc_ops("create", self.test_with_view, self.num_ddocs, self.num_views_per_ddoc, bucket=bucket) if self.ddoc_ops in ["update", "delete"]: self._execute_ddoc_ops(self.ddoc_ops, self.test_with_view, self.num_ddocs // 2, self.num_views_per_ddoc // 2, bucket=bucket) rebalance.result() self._wait_for_stats_all_buckets(servs_after_rebal) max_verify = None if self.num_items > 500000: max_verify = 100000 encryption_result = ntonencryptionBase().setup_nton_cluster( [self.master], clusterEncryptionLevel="strict") self.assertTrue(encryption_result, "Retries Exceeded. Cannot enable n2n encryption") self.cluster.async_rebalance(servs_for_rebal, servs_in, servs_out) self._verify_all_buckets(server=self.master, timeout=self.wait_timeout * 15 if not self.dgm_run else None, max_verify=max_verify) self._verify_stats_all_buckets( servs_after_rebal, timeout=self.wait_timeout if not self.dgm_run else None) self._verify_ddoc_ops_all_buckets() if self.test_with_view: self._verify_ddoc_data_all_buckets() assert ClusterOperationHelper.check_if_services_obey_tls( servers=[self.master ]), "Port binding after enforcing TLS incorrect"