def test_compression_with_optimistic_threshold_change(self): self.setup_xdcr() repl_time = int(time.time()) self.sleep(60) compression_type = self._input.param("compression_type", "Snappy") self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type) self._set_compression_type(self.src_cluster, "standard_bucket_2") src_conn = RestConnection(self.src_cluster.get_master_node()) src_conn.set_xdcr_param('standard_bucket_1', 'standard_bucket_1', 'optimisticReplicationThreshold', self._optimistic_threshold) src_conn.set_xdcr_param('standard_bucket_2', 'standard_bucket_2', 'optimisticReplicationThreshold', self._optimistic_threshold) self.src_cluster.pause_all_replications() gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items) self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create) self.src_cluster.resume_all_replications() self.async_perform_update_delete() self._wait_for_replication_to_catchup() self._verify_compression(cluster=self.src_cluster, compr_bucket_name="standard_bucket_1", uncompr_bucket_name="standard_bucket_2", compression_type=compression_type, repl_time=repl_time) self.verify_results()
def test_compression_with_replication_delete_and_create(self): self.setup_xdcr() repl_time = int(time.time()) self.sleep(60) gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items) self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create) self.async_perform_update_delete() rest_conn = RestConnection(self.src_master) rest_conn.remove_all_replications() rest_conn.remove_all_remote_clusters() self.src_cluster.get_remote_clusters()[0].clear_all_replications() self.src_cluster.clear_all_remote_clusters() self.setup_xdcr() compression_type = self._input.param("compression_type", "Snappy") self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type) self._set_compression_type(self.src_cluster, "standard_bucket_2") self._wait_for_replication_to_catchup() self.verify_results()
def test_compression_with_unixdcr_backfill_load(self): self.setup_xdcr() # Set bucket compression policy=off for standard_bucket_2 # to ensure data is uncompressed RestConnection(self.src_cluster.get_master_node()). \ set_bucket_compressionMode("standard_bucket_2", "off") repl_time = int(time.time()) self.sleep(60) self._set_compression_type(self.src_cluster, "standard_bucket_1", self.compression_type) self._set_compression_type(self.src_cluster, "standard_bucket_2") self.src_cluster.pause_all_replications() gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items) self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create) self.src_cluster.resume_all_replications() self.perform_update_delete() self._wait_for_replication_to_catchup() self._verify_compression(cluster=self.src_cluster, compr_bucket_name="standard_bucket_1", uncompr_bucket_name="standard_bucket_2", compression_type=self.compression_type, repl_time=repl_time) self.verify_results()
def test_compression_with_optimistic_threshold_change(self): self.setup_xdcr() repl_time = int(time.time()) self.sleep(60) compression_type = self._input.param("compression_type", "Snappy") self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type) src_conn = RestConnection(self.src_cluster.get_master_node()) src_conn.set_xdcr_param('standard_bucket_1', 'standard_bucket_1', 'optimisticReplicationThreshold', self._optimistic_threshold) src_conn.set_xdcr_param('standard_bucket_2', 'standard_bucket_2', 'optimisticReplicationThreshold', self._optimistic_threshold) self.src_cluster.pause_all_replications() gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items) self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create) self.src_cluster.resume_all_replications() self.async_perform_update_delete() self._wait_for_replication_to_catchup() self._verify_compression(cluster=self.src_cluster, compr_bucket_name="standard_bucket_1", uncompr_bucket_name="standard_bucket_2", compression_type=compression_type, repl_time=repl_time) self.verify_results()
def test_compression_with_failover(self): self.setup_xdcr() repl_time = int(time.time()) self.sleep(60) compression_type = self._input.param("compression_type", "Snappy") self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type) self._set_compression_type(self.src_cluster, "standard_bucket_2") self.src_cluster.pause_all_replications() gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items) self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create) self.src_cluster.resume_all_replications() self.async_perform_update_delete() src_conn = RestConnection(self.src_cluster.get_master_node()) graceful = self._input.param("graceful", False) self.recoveryType = self._input.param("recoveryType", None) self.src_cluster.failover(graceful=graceful) self.sleep(30) if self.recoveryType: server_nodes = src_conn.node_statuses() for node in server_nodes: if node.ip == self._input.servers[1].ip: src_conn.set_recovery_type(otpNode=node.id, recoveryType=self.recoveryType) self.sleep(30) src_conn.add_back_node(otpNode=node.id) rebalance = self.cluster.async_rebalance( self.src_cluster.get_nodes(), [], []) rebalance.result() self._wait_for_replication_to_catchup() self._verify_compression(cluster=self.src_cluster, compr_bucket_name="standard_bucket_1", uncompr_bucket_name="standard_bucket_2", compression_type=compression_type, repl_time=repl_time) self.verify_results()
def test_compression_with_failover(self): self.setup_xdcr() repl_time = int(time.time()) self.sleep(60) compression_type = self._input.param("compression_type", "Snappy") self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type) self._set_compression_type(self.src_cluster, "standard_bucket_2") self.src_cluster.pause_all_replications() gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items) self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create) self.src_cluster.resume_all_replications() self.async_perform_update_delete() src_conn = RestConnection(self.src_cluster.get_master_node()) graceful = self._input.param("graceful", False) self.recoveryType = self._input.param("recoveryType", None) self.src_cluster.failover(graceful=graceful) self.sleep(30) if self.recoveryType: server_nodes = src_conn.node_statuses() for node in server_nodes: if node.ip == self._input.servers[1].ip: src_conn.set_recovery_type(otpNode=node.id, recoveryType=self.recoveryType) self.sleep(30) src_conn.add_back_node(otpNode=node.id) rebalance = self.cluster.async_rebalance(self.src_cluster.get_nodes(), [], []) rebalance.result() self._wait_for_replication_to_catchup() self._verify_compression(cluster=self.src_cluster, compr_bucket_name="standard_bucket_1", uncompr_bucket_name="standard_bucket_2", compression_type=compression_type, repl_time=repl_time) self.verify_results()
def test_compression_with_advanced_settings(self): batch_count = self._input.param("batch_count", 10) batch_size = self._input.param("batch_size", 2048) source_nozzle = self._input.param("source_nozzle", 2) target_nozzle = self._input.param("target_nozzle", 2) self.setup_xdcr() repl_time = int(time.time()) self.sleep(60) compression_type = self._input.param("compression_type", "Snappy") self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type) self._set_compression_type(self.src_cluster, "standard_bucket_2") src_conn = RestConnection(self.src_cluster.get_master_node()) src_conn.set_xdcr_param('standard_bucket_1', 'standard_bucket_1', 'workerBatchSize', batch_count) src_conn.set_xdcr_param('standard_bucket_1', 'standard_bucket_1', 'docBatchSizeKb', batch_size) src_conn.set_xdcr_param('standard_bucket_1', 'standard_bucket_1', 'sourceNozzlePerNode', source_nozzle) src_conn.set_xdcr_param('standard_bucket_1', 'standard_bucket_1', 'targetNozzlePerNode', target_nozzle) src_conn.set_xdcr_param('standard_bucket_2', 'standard_bucket_2', 'workerBatchSize', batch_count) src_conn.set_xdcr_param('standard_bucket_2', 'standard_bucket_2', 'docBatchSizeKb', batch_size) src_conn.set_xdcr_param('standard_bucket_2', 'standard_bucket_2', 'sourceNozzlePerNode', source_nozzle) src_conn.set_xdcr_param('standard_bucket_2', 'standard_bucket_2', 'targetNozzlePerNode', target_nozzle) self.src_cluster.pause_all_replications() gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items) self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create) self.src_cluster.resume_all_replications() self.async_perform_update_delete() self._wait_for_replication_to_catchup() self._verify_compression(cluster=self.src_cluster, compr_bucket_name="standard_bucket_1", uncompr_bucket_name="standard_bucket_2", compression_type=compression_type, repl_time=repl_time) self.verify_results()