Exemplo n.º 1
0
    def test_replication_with_view_queries(self):
        self.verify_referenced_by_names(self.src_nodes, self.src_cluster.get_host_names())
        self.verify_referenced_by_names(self.dest_nodes, self.dest_cluster.get_host_names())

        self.setup_xdcr_and_load()

        num_views = self._input.param("num_views", 5)
        is_dev_ddoc = self._input.param("is_dev_ddoc", True)
        src_buckets = self.src_cluster.get_buckets()
        dest_buckets = self.dest_cluster.get_buckets()
        for bucket in src_buckets:
            views = Utility.make_default_views(bucket.name, num_views, is_dev_ddoc)
        for bucket in dest_buckets:
            views = Utility.make_default_views(bucket.name, num_views, is_dev_ddoc)
        ddoc_name = "ddoc1"
        prefix = ("", "dev_")[is_dev_ddoc]
        query = {"full_set" : "true", "stale" : "false"}

        tasks = self.src_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT)
        tasks += self.dest_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT)
        for task in tasks:
            task.result(self._poll_timeout)

        self.merge_all_buckets()
        tasks = []
        for view in views:
            tasks.append(self.src_cluster.async_query_view(prefix + ddoc_name, view.name, query, src_buckets[0].kvs[1].__len__()))
            tasks.append(self.dest_cluster.async_query_view(prefix + ddoc_name, view.name, query, dest_buckets[0].kvs[1].__len__()))
        for task in tasks:
            task.result(self._poll_timeout)

        self.verify_results()
        self.verify_referenced_by_names(self.src_nodes, self.src_cluster.get_host_names())
        self.verify_referenced_by_names(self.dest_nodes, self.dest_cluster.get_host_names())
Exemplo n.º 2
0
    def view_query_pause_resume(self):

        load_tasks = self.__async_load_xdcr()
        self.pause_xdcr()

        for bucket in self.dest_cluster.get_buckets():
            views = Utility.make_default_views(bucket.name, self.num_views,
                                               self.is_dev_ddoc)
        ddoc_name = "ddoc1"
        prefix = ("", "dev_")[self.is_dev_ddoc]
        query = {"full_set": "true", "stale": "false"}
        tasks = self.dest_cluster.async_create_views(ddoc_name, views)

        [task.result(self._poll_timeout) for task in tasks]
        # Wait for load data to finish if asynchronous
        [load_task.result() for load_task in load_tasks]

        # Resume the XDCR's paused
        self.resume_xdcr()

        self.merge_all_buckets()
        tasks = []
        for view in views:
            tasks.append(
                self.dest_cluster.async_query_view(
                    prefix + ddoc_name, view.name, query,
                    self.dest_cluster.get_buckets()[0].kvs[1].__len__()))

        [task.result(self._poll_timeout) for task in tasks]
        self.verify_results()
Exemplo n.º 3
0
    def view_query_pause_resume(self):

        load_tasks = self.__async_load_xdcr()
        self.pause_xdcr()

        for bucket in self.dest_cluster.get_buckets():
            views = Utility.make_default_views(bucket.name, self.num_views,
                                            self.is_dev_ddoc)
        ddoc_name = "ddoc1"
        prefix = ("", "dev_")[self.is_dev_ddoc]
        query = {"full_set": "true", "stale": "false"}
        tasks = self.dest_cluster.async_create_views(ddoc_name, views)

        [task.result(self._poll_timeout) for task in tasks]
        # Wait for load data to finish if asynchronous
        [load_task.result() for load_task in load_tasks]

        # Resume the XDCR's paused
        self.resume_xdcr()

        self.merge_all_buckets()
        tasks = []
        for view in views:
            tasks.append(self.dest_cluster.async_query_view(
                            prefix + ddoc_name, view.name, query,
                            self.dest_cluster.get_buckets()[0].kvs[1].__len__()))

        [task.result(self._poll_timeout) for task in tasks]
        self.verify_results()
    def swap_rebalance_replication_with_ddoc_compaction(self):
        bucket_type = self._input.param("bucket_type", "membase")
        if bucket_type == "ephemeral":
            self.log.info("Test case does not apply to ephemeral")
            return
        try:
            self.setup_xdcr_and_load()

            num_views = self._input.param("num_views", 5)
            is_dev_ddoc = self._input.param("is_dev_ddoc", True)
            fragmentation_value = self._input.param("fragmentation_value", 80)
            for bucket in self.src_cluster.get_buckets():
                views = Utility.make_default_views(bucket.name, num_views,
                                                   is_dev_ddoc)

            ddoc_name = "ddoc1"
            prefix = ("", "dev_")[is_dev_ddoc]

            query = {"full_set": "true", "stale": "false"}

            tasks = self.src_cluster.async_create_views(
                ddoc_name, views, BUCKET_NAME.DEFAULT)
            tasks += self.dest_cluster.async_create_views(
                ddoc_name, views, BUCKET_NAME.DEFAULT)

            # Swap-Rebalance
            for _ in range(self.__num_rebalance):
                if "C1" in self.__rebalance:
                    tasks.append(self.src_cluster.async_swap_rebalance())
                if "C2" in self.__rebalance:
                    tasks.append(self.dest_cluster.async_swap_rebalance())

            self.sleep(self._wait_timeout / 2)
            for task in tasks:
                task.result(self._poll_timeout)

            self.src_cluster.disable_compaction()
            fragmentation_monitor = self.src_cluster.async_monitor_view_fragmentation(
                prefix + ddoc_name, fragmentation_value, BUCKET_NAME.DEFAULT)
            # generate load until fragmentation reached
            while fragmentation_monitor.state != "FINISHED":
                # update docs to create fragmentation
                self.src_cluster.update_delete_data(OPS.UPDATE, self._perc_upd,
                                                    self._expires)
                for view in views:
                    # run queries to create indexes
                    self.src_cluster.query_view(prefix + ddoc_name, view.name,
                                                query)
                    self.dest_cluster.query_view(prefix + ddoc_name, view.name,
                                                 query)
            fragmentation_monitor.result()

            compaction_task = self.src_cluster.async_compact_view(
                prefix + ddoc_name, 'default')

            self.assertTrue(compaction_task.result())

            self.verify_results()
        finally:
            pass
Exemplo n.º 5
0
    def swap_rebalance_replication_with_view_queries_and_ops(self):
        bucket_type = self._input.param("bucket_type", "membase")
        if bucket_type == "ephemeral":
            self.log.info("Test case does not apply to ephemeral")
            return
        tasks = []
        try:
            self.setup_xdcr_and_load()

            num_views = self._input.param("num_views", 5)
            is_dev_ddoc = self._input.param("is_dev_ddoc", True)
            for bucket in self.src_cluster.get_buckets():
                views = Utility.make_default_views(bucket.name, num_views, is_dev_ddoc)

            ddoc_name = "ddoc1"
            prefix = ("", "dev_")[is_dev_ddoc]

            query = {"full_set" : "true", "stale" : "false", "connection_timeout" : 60000}

            tasks = self.src_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT)
            tasks += self.dest_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT)

            for task in tasks:
                task.result(self._poll_timeout)

            self.async_perform_update_delete()

            tasks=[]
            # Swap-Rebalance
            for _ in range(self.__num_rebalance):
                if "C1" in self.__rebalance:
                    tasks.append(self.src_cluster.async_swap_rebalance())
                if "C2" in self.__rebalance:
                    tasks.append(self.dest_cluster.async_swap_rebalance())

            for task in tasks:
                task.result()

            self.merge_all_buckets()
            self.src_cluster.verify_items_count()
            self.dest_cluster.verify_items_count()

            tasks = []
            src_buckets = self.src_cluster.get_buckets()
            dest_buckets = self.dest_cluster.get_buckets()
            for view in views:
                tasks.append(self.src_cluster.async_query_view(prefix + ddoc_name, view.name, query, src_buckets[0].kvs[1].__len__()))
                tasks.append(self.src_cluster.async_query_view(prefix + ddoc_name, view.name, query, dest_buckets[0].kvs[1].__len__()))

            for task in tasks:
                task.result(self._poll_timeout)

            self.verify_results()
        finally:
            # Some query tasks not finished after timeout and keep on running,
            # it should be cancelled before proceeding to next test.
            [task.cancel() for task in tasks]
Exemplo n.º 6
0
    def swap_rebalance_replication_with_ddoc_compaction(self):
        bucket_type = self._input.param("bucket_type", "membase")
        if bucket_type == "ephemeral":
            self.log.info("Test case does not apply to ephemeral")
            return
        try:
            self.setup_xdcr_and_load()

            num_views = self._input.param("num_views", 5)
            is_dev_ddoc = self._input.param("is_dev_ddoc", True)
            fragmentation_value = self._input.param("fragmentation_value", 80)
            for bucket in self.src_cluster.get_buckets():
                views = Utility.make_default_views(bucket.name, num_views, is_dev_ddoc)

            ddoc_name = "ddoc1"
            prefix = ("", "dev_")[is_dev_ddoc]

            query = {"full_set": "true", "stale": "false"}

            tasks = self.src_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT)
            tasks += self.dest_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT)

            # Swap-Rebalance
            for _ in range(self.__num_rebalance):
                if "C1" in self.__rebalance:
                    tasks.append(self.src_cluster.async_swap_rebalance())
                if "C2" in self.__rebalance:
                    tasks.append(self.dest_cluster.async_swap_rebalance())

            self.sleep(self._wait_timeout / 2)
            for task in tasks:
                task.result(self._poll_timeout)

            self.src_cluster.disable_compaction()
            fragmentation_monitor = self.src_cluster.async_monitor_view_fragmentation(prefix + ddoc_name, fragmentation_value, BUCKET_NAME.DEFAULT)
            # generate load until fragmentation reached
            while fragmentation_monitor.state != "FINISHED":
                # update docs to create fragmentation
                self.src_cluster.update_delete_data(OPS.UPDATE, self._perc_upd, self._expires)
                for view in views:
                    # run queries to create indexes
                    self.src_cluster.query_view(prefix + ddoc_name, view.name, query)
                    self.dest_cluster.query_view(prefix + ddoc_name, view.name, query)
            fragmentation_monitor.result()

            compaction_task = self.src_cluster.async_compact_view(prefix + ddoc_name, 'default')

            self.assertTrue(compaction_task.result())

            self.verify_results()
        finally:
            pass
Exemplo n.º 7
0
    def replication_with_ddoc_compaction(self):
        self.setup_xdcr()
        self.src_cluster.load_all_buckets(self._num_items)
        self.dest_cluster.load_all_buckets(self._num_items)

        num_views = self._input.param("num_views", 5)
        is_dev_ddoc = self._input.param("is_dev_ddoc", True)
        fragmentation_value = self._input.param("fragmentation_value", 80)
        for bucket in self.src_cluster.get_buckets():
            views = Utility.make_default_views(bucket.name, num_views,
                                               is_dev_ddoc)

        ddoc_name = "ddoc1"
        prefix = ("", "dev_")[is_dev_ddoc]

        query = {"full_set": "true", "stale": "false"}

        tasks = self.src_cluster.async_create_views(ddoc_name, views,
                                                    BUCKET_NAME.DEFAULT)
        tasks += self.dest_cluster.async_create_views(ddoc_name, views,
                                                      BUCKET_NAME.DEFAULT)
        for task in tasks:
            task.result(self._poll_timeout)

        self.src_cluster.disable_compaction()
        fragmentation_monitor = self.src_cluster.async_monitor_view_fragmentation(
            prefix + ddoc_name, fragmentation_value, BUCKET_NAME.DEFAULT)
        # generate load until fragmentation reached
        while fragmentation_monitor.state != "FINISHED":
            # update docs to create fragmentation
            self.src_cluster.update_delete_data(OPS.UPDATE)
            for view in views:
                # run queries to create indexes
                self.src_cluster.query_view(prefix + ddoc_name, view.name,
                                            query)
                self.dest_cluster.query_view(prefix + ddoc_name, view.name,
                                             query)
        fragmentation_monitor.result()

        compaction_task = self.src_cluster.async_compact_view(
            prefix + ddoc_name, 'default')

        self.assertTrue(compaction_task.result())

        self.verify_results()
Exemplo n.º 8
0
 def _start_es_replication(self, bucket='default', xdcr_params={}):
     rest_conn = RestConnection(self.src_cluster.get_master_node())
     if bucket == 'default':
         self.log.info("Creating default bucket")
         rest_conn.create_bucket(bucket='default', ramQuotaMB=100, authType='none', saslPassword='', replicaNumber=1,
                             proxyPort=11211, bucketType='membase', replica_index=1, threadsNumber=3,
                             flushEnabled=1, lww=False)
         self.src_cluster.add_bucket(ramQuotaMB=100, bucket='default', authType='none',
                                saslPassword='', replicaNumber=1, proxyPort=11211, bucketType='membase',
                                evictionPolicy='valueOnly')
     elif bucket == 'sasl':
         self.log.info("Creating sasl bucket")
         rest_conn.create_bucket(bucket='sasl', ramQuotaMB=100, authType='sasl', saslPassword='******', replicaNumber=1,
                             proxyPort=11211, bucketType='membase', replica_index=1, threadsNumber=3,
                             flushEnabled=1, lww=False)
         self.src_cluster.add_bucket(ramQuotaMB=100, bucket='sasl', authType='sasl',
                                saslPassword='******', replicaNumber=1, proxyPort=11211, bucketType='membase',
                                evictionPolicy='valueOnly')
     elif bucket == 'standard':
         self.log.info("Creating standard bucket")
         rest_conn.create_bucket(bucket='standard', ramQuotaMB=100, authType='none', saslPassword='', replicaNumber=1,
                             proxyPort=STANDARD_BUCKET_PORT, bucketType='membase', replica_index=1, threadsNumber=3,
                             flushEnabled=1, lww=False)
         self.src_cluster.add_bucket(ramQuotaMB=100, bucket='standard', authType='none',
                                saslPassword='', replicaNumber=1, proxyPort=STANDARD_BUCKET_PORT, bucketType='membase',
                                evictionPolicy='valueOnly')
     elif bucket== 'lww':
         self.log.info("Creating lww bucket")
         rest_conn.create_bucket(bucket='lww', ramQuotaMB=100, authType='none', saslPassword='', replicaNumber=1,
                             proxyPort=11211, bucketType='membase', replica_index=1, threadsNumber=3,
                             flushEnabled=1, lww=True)
         self.src_cluster.add_bucket(ramQuotaMB=100, bucket='lww', authType='none',
                                saslPassword='', replicaNumber=1, proxyPort=11211, bucketType='membase',
                                evictionPolicy='valueOnly')
     esrest_conn = EsRestConnection(self.dest_cluster.get_master_node())
     esrest_conn.create_index(bucket)
     rest_conn.add_remote_cluster(remoteIp=self.dest_master.ip, remotePort=9091, username='******',
                                  password='******', name='es')
     self.src_cluster.get_remote_clusters().append(XDCRRemoteClusterRef(self.src_cluster, self.dest_cluster,
                                                                    Utility.get_rc_name(self.src_cluster.get_name(),
                                                                                     self.dest_cluster.get_name())))
     repl_id = rest_conn.start_replication(replicationType='continuous', fromBucket=bucket, toCluster='es',
                                           rep_type='capi', toBucket=bucket, xdcr_params=xdcr_params)
     return repl_id
Exemplo n.º 9
0
    def replication_with_ddoc_compaction(self):
        self.setup_xdcr()
        self.src_cluster.load_all_buckets(self._num_items)
        self.dest_cluster.load_all_buckets(self._num_items)

        num_views = self._input.param("num_views", 5)
        is_dev_ddoc = self._input.param("is_dev_ddoc", True)
        fragmentation_value = self._input.param("fragmentation_value", 80)
        for bucket in self.src_cluster.get_buckets():
            views = Utility.make_default_views(bucket.name, num_views, is_dev_ddoc)

        ddoc_name = "ddoc1"
        prefix = ("", "dev_")[is_dev_ddoc]

        query = {"full_set": "true", "stale": "false"}

        tasks = self.src_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT)
        tasks += self.dest_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT)
        for task in tasks:
            task.result(self._poll_timeout)

        self.src_cluster.disable_compaction()
        fragmentation_monitor = self.src_cluster.async_monitor_view_fragmentation(prefix + ddoc_name, fragmentation_value, BUCKET_NAME.DEFAULT)
        # generate load until fragmentation reached
        while fragmentation_monitor.state != "FINISHED":
            # update docs to create fragmentation
            self.src_cluster.update_delete_data(OPS.UPDATE)
            for view in views:
                # run queries to create indexes
                self.src_cluster.query_view(prefix + ddoc_name, view.name, query)
                self.dest_cluster.query_view(prefix + ddoc_name, view.name, query)
        fragmentation_monitor.result()

        compaction_task = self.src_cluster.async_compact_view(prefix + ddoc_name, 'default')

        self.assertTrue(compaction_task.result())

        self.verify_results()
Exemplo n.º 10
0
    def replication_with_view_queries_and_ops(self):
        tasks = []
        try:
            self.setup_xdcr()
            self.src_cluster.load_all_buckets(self._num_items)
            self.dest_cluster.load_all_buckets(self._num_items)

            num_views = self._input.param("num_views", 5)
            is_dev_ddoc = self._input.param("is_dev_ddoc", True)
            for bucket in self.src_cluster.get_buckets():
                views = Utility.make_default_views(bucket.name, num_views, is_dev_ddoc)

            ddoc_name = "ddoc1"
            prefix = ("", "dev_")[is_dev_ddoc]

            query = {"full_set": "true", "stale": "false", "connection_timeout": 60000}

            tasks = self.src_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT)
            tasks += self.dest_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT)

            for task in tasks:
                task.result(self._poll_timeout)

            tasks = []
            # Setting up doc-ops at source nodes
            if "C1" in self._upd_clusters:
                tasks.extend(self.src_cluster.async_update_delete(OPS.UPDATE, self._perc_upd, self._expires))
            if "C1" in self._del_clusters:
                tasks.extend(self.src_cluster.async_update_delete(OPS.DELETE, self._perc_del))
            if "C2" in self._upd_clusters:
                tasks.extend(self.dest_cluster.async_update_delete(OPS.UPDATE, self._perc_upd, self._expires))
            if "C2" in self._del_clusters:
                tasks.extend(self.dest_cluster.async_update_delete(OPS.DELETE, self._perc_del))

            self.sleep(5)
            while True:
                for view in views:
                    self.src_cluster.query_view(prefix + ddoc_name, view.name, query)
                    self.dest_cluster.query_view(prefix + ddoc_name, view.name, query)
                if set([task.state for task in tasks]) != set(["FINISHED"]):
                    continue
                else:
                    if self._wait_for_expiration:
                        if "C1" in self._upd_clusters or "C2" in self._upd_clusters:
                            self.sleep(self._expires)
                    break

            self.merge_all_buckets()
            self.src_cluster.verify_items_count()
            self.dest_cluster.verify_items_count()

            tasks = []
            src_buckets = self.src_cluster.get_buckets()
            dest_buckets = self.dest_cluster.get_buckets()
            for view in views:
                tasks.append(self.src_cluster.async_query_view(prefix + ddoc_name, view.name, query, src_buckets[0].kvs[1].__len__()))
                tasks.append(self.src_cluster.async_query_view(prefix + ddoc_name, view.name, query, dest_buckets[0].kvs[1].__len__()))

            for task in tasks:
                task.result(self._poll_timeout)

            self.verify_results()
        finally:
            # For timeout error, all tasks to be cancelled
            # Before proceeding to next test
            for task in tasks:
                task.cancel()
Exemplo n.º 11
0
    def replication_with_view_queries_and_ops(self):
        tasks = []
        try:
            self.setup_xdcr()
            self.src_cluster.load_all_buckets(self._num_items)
            self.dest_cluster.load_all_buckets(self._num_items)

            num_views = self._input.param("num_views", 5)
            is_dev_ddoc = self._input.param("is_dev_ddoc", True)
            for bucket in self.src_cluster.get_buckets():
                views = Utility.make_default_views(bucket.name, num_views,
                                                   is_dev_ddoc)

            ddoc_name = "ddoc1"
            prefix = ("", "dev_")[is_dev_ddoc]

            query = {
                "full_set": "true",
                "stale": "false",
                "connection_timeout": 60000
            }

            tasks = self.src_cluster.async_create_views(
                ddoc_name, views, BUCKET_NAME.DEFAULT)
            tasks += self.dest_cluster.async_create_views(
                ddoc_name, views, BUCKET_NAME.DEFAULT)

            for task in tasks:
                task.result(self._poll_timeout)

            tasks = []
            # Setting up doc-ops at source nodes
            if "C1" in self._upd_clusters:
                tasks.extend(
                    self.src_cluster.async_update_delete(
                        OPS.UPDATE, self._perc_upd, self._expires))
            if "C1" in self._del_clusters:
                tasks.extend(
                    self.src_cluster.async_update_delete(
                        OPS.DELETE, self._perc_del))
            if "C2" in self._upd_clusters:
                tasks.extend(
                    self.dest_cluster.async_update_delete(
                        OPS.UPDATE, self._perc_upd, self._expires))
            if "C2" in self._del_clusters:
                tasks.extend(
                    self.dest_cluster.async_update_delete(
                        OPS.DELETE, self._perc_del))

            self.sleep(5)
            while True:
                for view in views:
                    self.src_cluster.query_view(prefix + ddoc_name, view.name,
                                                query)
                    self.dest_cluster.query_view(prefix + ddoc_name, view.name,
                                                 query)
                if set([task.state for task in tasks]) != set(["FINISHED"]):
                    continue
                else:
                    if self._wait_for_expiration:
                        if "C1" in self._upd_clusters or "C2" in self._upd_clusters:
                            self.sleep(self._expires)
                    break

            self.merge_all_buckets()
            self.src_cluster.verify_items_count()
            self.dest_cluster.verify_items_count()

            tasks = []
            src_buckets = self.src_cluster.get_buckets()
            dest_buckets = self.dest_cluster.get_buckets()
            for view in views:
                tasks.append(
                    self.src_cluster.async_query_view(
                        prefix + ddoc_name, view.name, query,
                        src_buckets[0].kvs[1].__len__()))
                tasks.append(
                    self.src_cluster.async_query_view(
                        prefix + ddoc_name, view.name, query,
                        dest_buckets[0].kvs[1].__len__()))

            for task in tasks:
                task.result(self._poll_timeout)

            self.verify_results()
        finally:
            # For timeout error, all tasks to be cancelled
            # Before proceeding to next test
            for task in tasks:
                task.cancel()
    def swap_rebalance_replication_with_view_queries_and_ops(self):
        bucket_type = self._input.param("bucket_type", "membase")
        if bucket_type == "ephemeral":
            self.log.info("Test case does not apply to ephemeral")
            return
        tasks = []
        try:
            self.setup_xdcr_and_load()

            num_views = self._input.param("num_views", 5)
            is_dev_ddoc = self._input.param("is_dev_ddoc", True)
            for bucket in self.src_cluster.get_buckets():
                views = Utility.make_default_views(bucket.name, num_views,
                                                   is_dev_ddoc)

            ddoc_name = "ddoc1"
            prefix = ("", "dev_")[is_dev_ddoc]

            query = {
                "full_set": "true",
                "stale": "false",
                "connection_timeout": 60000
            }

            tasks = self.src_cluster.async_create_views(
                ddoc_name, views, BUCKET_NAME.DEFAULT)
            tasks += self.dest_cluster.async_create_views(
                ddoc_name, views, BUCKET_NAME.DEFAULT)

            for task in tasks:
                task.result(self._poll_timeout)

            self.async_perform_update_delete()

            tasks = []
            # Swap-Rebalance
            for _ in range(self.__num_rebalance):
                if "C1" in self.__rebalance:
                    tasks.append(self.src_cluster.async_swap_rebalance())
                if "C2" in self.__rebalance:
                    tasks.append(self.dest_cluster.async_swap_rebalance())

            for task in tasks:
                task.result()

            self.merge_all_buckets()
            self.src_cluster.verify_items_count()
            self.dest_cluster.verify_items_count()

            tasks = []
            src_buckets = self.src_cluster.get_buckets()
            dest_buckets = self.dest_cluster.get_buckets()
            for view in views:
                tasks.append(
                    self.src_cluster.async_query_view(
                        prefix + ddoc_name, view.name, query,
                        src_buckets[0].kvs[1].__len__()))
                tasks.append(
                    self.src_cluster.async_query_view(
                        prefix + ddoc_name, view.name, query,
                        dest_buckets[0].kvs[1].__len__()))

            for task in tasks:
                task.result(self._poll_timeout)

            self.verify_results()
        finally:
            # Some query tasks not finished after timeout and keep on running,
            # it should be cancelled before proceeding to next test.
            [task.cancel() for task in tasks]