def test_replication_with_view_queries(self): self.verify_referenced_by_names(self.src_nodes, self.src_cluster.get_host_names()) self.verify_referenced_by_names(self.dest_nodes, self.dest_cluster.get_host_names()) self.setup_xdcr_and_load() num_views = self._input.param("num_views", 5) is_dev_ddoc = self._input.param("is_dev_ddoc", True) src_buckets = self.src_cluster.get_buckets() dest_buckets = self.dest_cluster.get_buckets() for bucket in src_buckets: views = Utility.make_default_views(bucket.name, num_views, is_dev_ddoc) for bucket in dest_buckets: views = Utility.make_default_views(bucket.name, num_views, is_dev_ddoc) ddoc_name = "ddoc1" prefix = ("", "dev_")[is_dev_ddoc] query = {"full_set" : "true", "stale" : "false"} tasks = self.src_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT) tasks += self.dest_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT) for task in tasks: task.result(self._poll_timeout) self.merge_all_buckets() tasks = [] for view in views: tasks.append(self.src_cluster.async_query_view(prefix + ddoc_name, view.name, query, src_buckets[0].kvs[1].__len__())) tasks.append(self.dest_cluster.async_query_view(prefix + ddoc_name, view.name, query, dest_buckets[0].kvs[1].__len__())) for task in tasks: task.result(self._poll_timeout) self.verify_results() self.verify_referenced_by_names(self.src_nodes, self.src_cluster.get_host_names()) self.verify_referenced_by_names(self.dest_nodes, self.dest_cluster.get_host_names())
def view_query_pause_resume(self): load_tasks = self.__async_load_xdcr() self.pause_xdcr() for bucket in self.dest_cluster.get_buckets(): views = Utility.make_default_views(bucket.name, self.num_views, self.is_dev_ddoc) ddoc_name = "ddoc1" prefix = ("", "dev_")[self.is_dev_ddoc] query = {"full_set": "true", "stale": "false"} tasks = self.dest_cluster.async_create_views(ddoc_name, views) [task.result(self._poll_timeout) for task in tasks] # Wait for load data to finish if asynchronous [load_task.result() for load_task in load_tasks] # Resume the XDCR's paused self.resume_xdcr() self.merge_all_buckets() tasks = [] for view in views: tasks.append( self.dest_cluster.async_query_view( prefix + ddoc_name, view.name, query, self.dest_cluster.get_buckets()[0].kvs[1].__len__())) [task.result(self._poll_timeout) for task in tasks] self.verify_results()
def view_query_pause_resume(self): load_tasks = self.__async_load_xdcr() self.pause_xdcr() for bucket in self.dest_cluster.get_buckets(): views = Utility.make_default_views(bucket.name, self.num_views, self.is_dev_ddoc) ddoc_name = "ddoc1" prefix = ("", "dev_")[self.is_dev_ddoc] query = {"full_set": "true", "stale": "false"} tasks = self.dest_cluster.async_create_views(ddoc_name, views) [task.result(self._poll_timeout) for task in tasks] # Wait for load data to finish if asynchronous [load_task.result() for load_task in load_tasks] # Resume the XDCR's paused self.resume_xdcr() self.merge_all_buckets() tasks = [] for view in views: tasks.append(self.dest_cluster.async_query_view( prefix + ddoc_name, view.name, query, self.dest_cluster.get_buckets()[0].kvs[1].__len__())) [task.result(self._poll_timeout) for task in tasks] self.verify_results()
def swap_rebalance_replication_with_ddoc_compaction(self): bucket_type = self._input.param("bucket_type", "membase") if bucket_type == "ephemeral": self.log.info("Test case does not apply to ephemeral") return try: self.setup_xdcr_and_load() num_views = self._input.param("num_views", 5) is_dev_ddoc = self._input.param("is_dev_ddoc", True) fragmentation_value = self._input.param("fragmentation_value", 80) for bucket in self.src_cluster.get_buckets(): views = Utility.make_default_views(bucket.name, num_views, is_dev_ddoc) ddoc_name = "ddoc1" prefix = ("", "dev_")[is_dev_ddoc] query = {"full_set": "true", "stale": "false"} tasks = self.src_cluster.async_create_views( ddoc_name, views, BUCKET_NAME.DEFAULT) tasks += self.dest_cluster.async_create_views( ddoc_name, views, BUCKET_NAME.DEFAULT) # Swap-Rebalance for _ in range(self.__num_rebalance): if "C1" in self.__rebalance: tasks.append(self.src_cluster.async_swap_rebalance()) if "C2" in self.__rebalance: tasks.append(self.dest_cluster.async_swap_rebalance()) self.sleep(self._wait_timeout / 2) for task in tasks: task.result(self._poll_timeout) self.src_cluster.disable_compaction() fragmentation_monitor = self.src_cluster.async_monitor_view_fragmentation( prefix + ddoc_name, fragmentation_value, BUCKET_NAME.DEFAULT) # generate load until fragmentation reached while fragmentation_monitor.state != "FINISHED": # update docs to create fragmentation self.src_cluster.update_delete_data(OPS.UPDATE, self._perc_upd, self._expires) for view in views: # run queries to create indexes self.src_cluster.query_view(prefix + ddoc_name, view.name, query) self.dest_cluster.query_view(prefix + ddoc_name, view.name, query) fragmentation_monitor.result() compaction_task = self.src_cluster.async_compact_view( prefix + ddoc_name, 'default') self.assertTrue(compaction_task.result()) self.verify_results() finally: pass
def swap_rebalance_replication_with_view_queries_and_ops(self): bucket_type = self._input.param("bucket_type", "membase") if bucket_type == "ephemeral": self.log.info("Test case does not apply to ephemeral") return tasks = [] try: self.setup_xdcr_and_load() num_views = self._input.param("num_views", 5) is_dev_ddoc = self._input.param("is_dev_ddoc", True) for bucket in self.src_cluster.get_buckets(): views = Utility.make_default_views(bucket.name, num_views, is_dev_ddoc) ddoc_name = "ddoc1" prefix = ("", "dev_")[is_dev_ddoc] query = {"full_set" : "true", "stale" : "false", "connection_timeout" : 60000} tasks = self.src_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT) tasks += self.dest_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT) for task in tasks: task.result(self._poll_timeout) self.async_perform_update_delete() tasks=[] # Swap-Rebalance for _ in range(self.__num_rebalance): if "C1" in self.__rebalance: tasks.append(self.src_cluster.async_swap_rebalance()) if "C2" in self.__rebalance: tasks.append(self.dest_cluster.async_swap_rebalance()) for task in tasks: task.result() self.merge_all_buckets() self.src_cluster.verify_items_count() self.dest_cluster.verify_items_count() tasks = [] src_buckets = self.src_cluster.get_buckets() dest_buckets = self.dest_cluster.get_buckets() for view in views: tasks.append(self.src_cluster.async_query_view(prefix + ddoc_name, view.name, query, src_buckets[0].kvs[1].__len__())) tasks.append(self.src_cluster.async_query_view(prefix + ddoc_name, view.name, query, dest_buckets[0].kvs[1].__len__())) for task in tasks: task.result(self._poll_timeout) self.verify_results() finally: # Some query tasks not finished after timeout and keep on running, # it should be cancelled before proceeding to next test. [task.cancel() for task in tasks]
def swap_rebalance_replication_with_ddoc_compaction(self): bucket_type = self._input.param("bucket_type", "membase") if bucket_type == "ephemeral": self.log.info("Test case does not apply to ephemeral") return try: self.setup_xdcr_and_load() num_views = self._input.param("num_views", 5) is_dev_ddoc = self._input.param("is_dev_ddoc", True) fragmentation_value = self._input.param("fragmentation_value", 80) for bucket in self.src_cluster.get_buckets(): views = Utility.make_default_views(bucket.name, num_views, is_dev_ddoc) ddoc_name = "ddoc1" prefix = ("", "dev_")[is_dev_ddoc] query = {"full_set": "true", "stale": "false"} tasks = self.src_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT) tasks += self.dest_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT) # Swap-Rebalance for _ in range(self.__num_rebalance): if "C1" in self.__rebalance: tasks.append(self.src_cluster.async_swap_rebalance()) if "C2" in self.__rebalance: tasks.append(self.dest_cluster.async_swap_rebalance()) self.sleep(self._wait_timeout / 2) for task in tasks: task.result(self._poll_timeout) self.src_cluster.disable_compaction() fragmentation_monitor = self.src_cluster.async_monitor_view_fragmentation(prefix + ddoc_name, fragmentation_value, BUCKET_NAME.DEFAULT) # generate load until fragmentation reached while fragmentation_monitor.state != "FINISHED": # update docs to create fragmentation self.src_cluster.update_delete_data(OPS.UPDATE, self._perc_upd, self._expires) for view in views: # run queries to create indexes self.src_cluster.query_view(prefix + ddoc_name, view.name, query) self.dest_cluster.query_view(prefix + ddoc_name, view.name, query) fragmentation_monitor.result() compaction_task = self.src_cluster.async_compact_view(prefix + ddoc_name, 'default') self.assertTrue(compaction_task.result()) self.verify_results() finally: pass
def replication_with_ddoc_compaction(self): self.setup_xdcr() self.src_cluster.load_all_buckets(self._num_items) self.dest_cluster.load_all_buckets(self._num_items) num_views = self._input.param("num_views", 5) is_dev_ddoc = self._input.param("is_dev_ddoc", True) fragmentation_value = self._input.param("fragmentation_value", 80) for bucket in self.src_cluster.get_buckets(): views = Utility.make_default_views(bucket.name, num_views, is_dev_ddoc) ddoc_name = "ddoc1" prefix = ("", "dev_")[is_dev_ddoc] query = {"full_set": "true", "stale": "false"} tasks = self.src_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT) tasks += self.dest_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT) for task in tasks: task.result(self._poll_timeout) self.src_cluster.disable_compaction() fragmentation_monitor = self.src_cluster.async_monitor_view_fragmentation( prefix + ddoc_name, fragmentation_value, BUCKET_NAME.DEFAULT) # generate load until fragmentation reached while fragmentation_monitor.state != "FINISHED": # update docs to create fragmentation self.src_cluster.update_delete_data(OPS.UPDATE) for view in views: # run queries to create indexes self.src_cluster.query_view(prefix + ddoc_name, view.name, query) self.dest_cluster.query_view(prefix + ddoc_name, view.name, query) fragmentation_monitor.result() compaction_task = self.src_cluster.async_compact_view( prefix + ddoc_name, 'default') self.assertTrue(compaction_task.result()) self.verify_results()
def replication_with_ddoc_compaction(self): self.setup_xdcr() self.src_cluster.load_all_buckets(self._num_items) self.dest_cluster.load_all_buckets(self._num_items) num_views = self._input.param("num_views", 5) is_dev_ddoc = self._input.param("is_dev_ddoc", True) fragmentation_value = self._input.param("fragmentation_value", 80) for bucket in self.src_cluster.get_buckets(): views = Utility.make_default_views(bucket.name, num_views, is_dev_ddoc) ddoc_name = "ddoc1" prefix = ("", "dev_")[is_dev_ddoc] query = {"full_set": "true", "stale": "false"} tasks = self.src_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT) tasks += self.dest_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT) for task in tasks: task.result(self._poll_timeout) self.src_cluster.disable_compaction() fragmentation_monitor = self.src_cluster.async_monitor_view_fragmentation(prefix + ddoc_name, fragmentation_value, BUCKET_NAME.DEFAULT) # generate load until fragmentation reached while fragmentation_monitor.state != "FINISHED": # update docs to create fragmentation self.src_cluster.update_delete_data(OPS.UPDATE) for view in views: # run queries to create indexes self.src_cluster.query_view(prefix + ddoc_name, view.name, query) self.dest_cluster.query_view(prefix + ddoc_name, view.name, query) fragmentation_monitor.result() compaction_task = self.src_cluster.async_compact_view(prefix + ddoc_name, 'default') self.assertTrue(compaction_task.result()) self.verify_results()
def replication_with_view_queries_and_ops(self): tasks = [] try: self.setup_xdcr() self.src_cluster.load_all_buckets(self._num_items) self.dest_cluster.load_all_buckets(self._num_items) num_views = self._input.param("num_views", 5) is_dev_ddoc = self._input.param("is_dev_ddoc", True) for bucket in self.src_cluster.get_buckets(): views = Utility.make_default_views(bucket.name, num_views, is_dev_ddoc) ddoc_name = "ddoc1" prefix = ("", "dev_")[is_dev_ddoc] query = {"full_set": "true", "stale": "false", "connection_timeout": 60000} tasks = self.src_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT) tasks += self.dest_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT) for task in tasks: task.result(self._poll_timeout) tasks = [] # Setting up doc-ops at source nodes if "C1" in self._upd_clusters: tasks.extend(self.src_cluster.async_update_delete(OPS.UPDATE, self._perc_upd, self._expires)) if "C1" in self._del_clusters: tasks.extend(self.src_cluster.async_update_delete(OPS.DELETE, self._perc_del)) if "C2" in self._upd_clusters: tasks.extend(self.dest_cluster.async_update_delete(OPS.UPDATE, self._perc_upd, self._expires)) if "C2" in self._del_clusters: tasks.extend(self.dest_cluster.async_update_delete(OPS.DELETE, self._perc_del)) self.sleep(5) while True: for view in views: self.src_cluster.query_view(prefix + ddoc_name, view.name, query) self.dest_cluster.query_view(prefix + ddoc_name, view.name, query) if set([task.state for task in tasks]) != set(["FINISHED"]): continue else: if self._wait_for_expiration: if "C1" in self._upd_clusters or "C2" in self._upd_clusters: self.sleep(self._expires) break self.merge_all_buckets() self.src_cluster.verify_items_count() self.dest_cluster.verify_items_count() tasks = [] src_buckets = self.src_cluster.get_buckets() dest_buckets = self.dest_cluster.get_buckets() for view in views: tasks.append(self.src_cluster.async_query_view(prefix + ddoc_name, view.name, query, src_buckets[0].kvs[1].__len__())) tasks.append(self.src_cluster.async_query_view(prefix + ddoc_name, view.name, query, dest_buckets[0].kvs[1].__len__())) for task in tasks: task.result(self._poll_timeout) self.verify_results() finally: # For timeout error, all tasks to be cancelled # Before proceeding to next test for task in tasks: task.cancel()
def replication_with_view_queries_and_ops(self): tasks = [] try: self.setup_xdcr() self.src_cluster.load_all_buckets(self._num_items) self.dest_cluster.load_all_buckets(self._num_items) num_views = self._input.param("num_views", 5) is_dev_ddoc = self._input.param("is_dev_ddoc", True) for bucket in self.src_cluster.get_buckets(): views = Utility.make_default_views(bucket.name, num_views, is_dev_ddoc) ddoc_name = "ddoc1" prefix = ("", "dev_")[is_dev_ddoc] query = { "full_set": "true", "stale": "false", "connection_timeout": 60000 } tasks = self.src_cluster.async_create_views( ddoc_name, views, BUCKET_NAME.DEFAULT) tasks += self.dest_cluster.async_create_views( ddoc_name, views, BUCKET_NAME.DEFAULT) for task in tasks: task.result(self._poll_timeout) tasks = [] # Setting up doc-ops at source nodes if "C1" in self._upd_clusters: tasks.extend( self.src_cluster.async_update_delete( OPS.UPDATE, self._perc_upd, self._expires)) if "C1" in self._del_clusters: tasks.extend( self.src_cluster.async_update_delete( OPS.DELETE, self._perc_del)) if "C2" in self._upd_clusters: tasks.extend( self.dest_cluster.async_update_delete( OPS.UPDATE, self._perc_upd, self._expires)) if "C2" in self._del_clusters: tasks.extend( self.dest_cluster.async_update_delete( OPS.DELETE, self._perc_del)) self.sleep(5) while True: for view in views: self.src_cluster.query_view(prefix + ddoc_name, view.name, query) self.dest_cluster.query_view(prefix + ddoc_name, view.name, query) if set([task.state for task in tasks]) != set(["FINISHED"]): continue else: if self._wait_for_expiration: if "C1" in self._upd_clusters or "C2" in self._upd_clusters: self.sleep(self._expires) break self.merge_all_buckets() self.src_cluster.verify_items_count() self.dest_cluster.verify_items_count() tasks = [] src_buckets = self.src_cluster.get_buckets() dest_buckets = self.dest_cluster.get_buckets() for view in views: tasks.append( self.src_cluster.async_query_view( prefix + ddoc_name, view.name, query, src_buckets[0].kvs[1].__len__())) tasks.append( self.src_cluster.async_query_view( prefix + ddoc_name, view.name, query, dest_buckets[0].kvs[1].__len__())) for task in tasks: task.result(self._poll_timeout) self.verify_results() finally: # For timeout error, all tasks to be cancelled # Before proceeding to next test for task in tasks: task.cancel()
def swap_rebalance_replication_with_view_queries_and_ops(self): bucket_type = self._input.param("bucket_type", "membase") if bucket_type == "ephemeral": self.log.info("Test case does not apply to ephemeral") return tasks = [] try: self.setup_xdcr_and_load() num_views = self._input.param("num_views", 5) is_dev_ddoc = self._input.param("is_dev_ddoc", True) for bucket in self.src_cluster.get_buckets(): views = Utility.make_default_views(bucket.name, num_views, is_dev_ddoc) ddoc_name = "ddoc1" prefix = ("", "dev_")[is_dev_ddoc] query = { "full_set": "true", "stale": "false", "connection_timeout": 60000 } tasks = self.src_cluster.async_create_views( ddoc_name, views, BUCKET_NAME.DEFAULT) tasks += self.dest_cluster.async_create_views( ddoc_name, views, BUCKET_NAME.DEFAULT) for task in tasks: task.result(self._poll_timeout) self.async_perform_update_delete() tasks = [] # Swap-Rebalance for _ in range(self.__num_rebalance): if "C1" in self.__rebalance: tasks.append(self.src_cluster.async_swap_rebalance()) if "C2" in self.__rebalance: tasks.append(self.dest_cluster.async_swap_rebalance()) for task in tasks: task.result() self.merge_all_buckets() self.src_cluster.verify_items_count() self.dest_cluster.verify_items_count() tasks = [] src_buckets = self.src_cluster.get_buckets() dest_buckets = self.dest_cluster.get_buckets() for view in views: tasks.append( self.src_cluster.async_query_view( prefix + ddoc_name, view.name, query, src_buckets[0].kvs[1].__len__())) tasks.append( self.src_cluster.async_query_view( prefix + ddoc_name, view.name, query, dest_buckets[0].kvs[1].__len__())) for task in tasks: task.result(self._poll_timeout) self.verify_results() finally: # Some query tasks not finished after timeout and keep on running, # it should be cancelled before proceeding to next test. [task.cancel() for task in tasks]