class XDCRCallable:
    def __init__(self, nodes, num_clusters=2):
        self.log = logger.Logger.get_logger()
        self.cluster_list = []
        self.__clusterop = Cluster()
        self.setup_xdcr(nodes, num_clusters)

    def setup_xdcr(self, nodes, num_clusters):
        self._setup_xdcr_topology(nodes, num_clusters)

    def __assign_nodes_to_clusters(self, nodes, num_nodes_per_cluster):
        for _ in range(0, len(nodes), num_nodes_per_cluster):
            yield nodes[_:_ + num_nodes_per_cluster]

    def _setup_xdcr_topology(self, nodes, num_clusters):
        num_nodes_per_cluster = len(nodes) // num_clusters
        count = 1
        for cluster_nodes in list(self.__assign_nodes_to_clusters(nodes, num_nodes_per_cluster)):
            cluster = CouchbaseCluster(name="C" + str(count), nodes=cluster_nodes, log=self.log)
            self.cleanup_cluster(cluster)
            #cluster.cleanup_cluster(test_case="xdcr upgrade")
            self.__init_cluster(cluster)
            self.log.info("Cluster {0}:{1} created".format(cluster.get_name(), cluster.get_nodes()))
            self.cluster_list.append(cluster)
            count += 1

        # TODO: implementing chain topology for now, need to extend to other xdcr topologies
        # C1->C2, C2->C3..
        for count, cluster in enumerate(self.cluster_list):
            if count < len(self.cluster_list) - 1:
                cluster.add_remote_cluster(self.cluster_list[count + 1],
                                           'C' + str(count) + "-to-" + 'C' + str(count + 1))

    def ___init_nodes(self, cluster, disabled_consistent_view=None):
        """Initialize all nodes.
        """
        tasks = []
        for node in cluster.get_nodes():
            tasks.append(
                self.__clusterop.async_init_node(
                    node))
        for task in tasks:
            task.result(60)

    def __init_cluster(self, cluster):
        """Initialize cluster.
        1. Initialize all nodes.
        2. Add all nodes to the cluster.
        """
        self.___init_nodes(cluster)
        self.__clusterop.async_rebalance(
            cluster.get_nodes(),
            cluster.get_nodes()[1:],
            []).result()

    def cleanup_cluster(self, cluster):
        """Cleanup cluster.
        1. Remove all remote cluster references.
        2. Remove all replications.
        3. Remove all buckets.
        """
        self.log.info("removing xdcr/nodes settings")
        rest = RestConnection(cluster.get_master_node())
        rest.remove_all_replications()
        rest.remove_all_remote_clusters()
        rest.remove_all_recoveries()
        cluster.cleanup_cluster("upgradeXDCR")

    def _create_replication(self):
        for cluster in self.cluster_list:
            self.log.info("Creating replication from {0}->{1}".format(cluster.get_name, cluster.get_remote_clusters()))

    def _set_replication_properties(self, param_str):
        pass

    def _get_replication_properties(self, replid):
        pass

    def __del__(self):
        for cluster in self.cluster_list:
            self.cleanup_cluster(cluster)
Ejemplo n.º 2
0
class nwusage(XDCRNewBaseTest):
    def setUp(self):
        super(nwusage, self).setUp()
        self.src_cluster = self.get_cb_cluster_by_name('C1')
        self.src_master = self.src_cluster.get_master_node()
        self.dest_cluster = self.get_cb_cluster_by_name('C2')
        self.dest_master = self.dest_cluster.get_master_node()
        self.cluster = Cluster()
        self.num_src_nodes = len(self.src_cluster.get_nodes())
        self.num_dest_nodes = len(self.dest_cluster.get_nodes())

    def tearDown(self):
        super(nwusage, self).tearDown()

    def _get_nwusage_limit(self):
        # Pick random nw_limit between 1-100 MB
        return random.randint(1, 100)

    def _set_nwusage_limit(self, cluster, nw_limit):
        repl_id = cluster.get_remote_clusters()[0].get_replications(
        )[0].get_repl_id()
        shell = RemoteMachineShellConnection(cluster.get_master_node())
        repl_id = str(repl_id).replace('/', '%2F')
        self.log.info(
            "Network bandwidth is throttled at {0} MB".format(nw_limit))
        base_url = "http://" + cluster.get_master_node(
        ).ip + ":8091/settings/replications/" + repl_id
        command = "curl -X POST -u Administrator:password " + base_url + " -d networkUsageLimit=" + str(
            nw_limit)
        output, error = shell.execute_command(command)
        shell.log_command_output(output, error)

    def _set_doc_size_num(self):
        # Weighted randomization of doc sizes ranging from 500 KB to 20 MB
        self.doc_sizes = [500] * 10 + [10 ** 3] * 30 + [10 ** 4] * 20 + [10 ** 5] * 15 + \
                         [10 ** 6] * 10 + [10 ** 7] * 10 + [2 * 10 ** 7] * 5
        self._value_size = random.choice(self.doc_sizes)
        self._num_items = 10**7
        self._temp = self._value_size
        # Decrease number of docs as size increases
        while self._temp > 10:
            self._temp //= 10
            self._num_items //= 10
        self._value_size *= self.num_src_nodes
        self._num_items *= self.num_src_nodes
        self.log.info("Doc size = {0} bytes, Number of docs = {1}".format(
            self._value_size, self._num_items))

    def _extract_timestamp(self, logmsg):
        # matches timestamp format : 2018-10-11T00:02:35
        timestamp_str = re.search(r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}',
                                  logmsg)
        timestamp = datetime.datetime.strptime(timestamp_str.group(),
                                               '%Y-%m-%dT%H:%M:%S')
        return timestamp

    '''Extract current, non zero bandwidth usage stats from logs'''

    def _extract_bandwidth_usage(self, node, time_to_compare, nw_max, nw_usage,
                                 end_time):
        valid_count = 0
        skip_count = 0
        matches, count = NodeHelper.check_goxdcr_log(
            node,
            "\\\"bandwidth_usage\\\": " + nw_usage,
            print_matches=True,
            timeout=60)
        for item in matches:
            item_datetime = self._extract_timestamp(item)
            # Ignore entries that happened before the replication was set up
            if item_datetime < time_to_compare:
                skip_count += 1
                continue
            if end_time:
                end_datetime = self._extract_timestamp(end_time)
                if item_datetime > end_datetime:
                    skip_count += 1
                    continue
            bandwidth_usage = int(
                float(((item.split('"bandwidth_usage": ')[1]).split(' ')[0]
                       ).rstrip(',')))
            if bandwidth_usage > nw_max:
                self.fail(
                    "Bandwidth usage {0} is higher than Bandwidth limit {1} in {2}"
                    .format(bandwidth_usage, nw_max, item))
            self.log.info("BANDWIDTH_USAGE ={0}".format(bandwidth_usage))
            if nw_usage == "0" and bandwidth_usage != 0:
                self.fail(
                    "Expecting bandwidth usage to be 0 but it is {0}".format(
                        bandwidth_usage))
            valid_count += 1
        self.log.info("Stale entries :{0}, Valid entries :{1}".format(
            skip_count, valid_count))
        return valid_count

    def _extract_bandwith_quota(self, node):
        matches, count = NodeHelper.check_goxdcr_log(node,
                                                     "bandwidth_usage_quota=" +
                                                     "[0-9][0-9]*",
                                                     print_matches=True,
                                                     timeout=60)
        bandwidth_quota = int(
            float(((
                matches[-1].split('bandwidth_usage_quota=')[1]).rstrip(' '))))
        return bandwidth_quota

    def _verify_bandwidth_usage(self,
                                node,
                                nw_limit,
                                no_of_nodes,
                                event_time=None,
                                nw_usage="[0-9][0-9]*",
                                end_time=None):
        #nw_max = (nw_limit * 1024 * 1024) // no_of_nodes
        if event_time:
            time_to_compare = self._extract_timestamp(event_time)
        else:
            matches, count = NodeHelper.check_goxdcr_log(
                node,
                "Success adding replication specification",
                print_matches=True,
                timeout=60)
            # Time when replication was set up
            if count > 0:
                time_to_compare = self._extract_timestamp(matches[-1])
            else:
                self.fail("Replication not successful")
        nw_max = self._extract_bandwith_quota(node)
        self.sleep(60, 'Waiting for bandwidth usage logs..')
        # Try 3 times to extract current bandwidth usage from logs
        iter = 0
        while iter < 3:
            self.sleep(30, 'Waiting for bandwidth usage logs..')
            valid_count = self._extract_bandwidth_usage(
                node, time_to_compare, nw_max, nw_usage, end_time)
            if valid_count == 0 and self._input.param(
                    "replication_type") == "capi" or nw_limit == 0:
                self.log.info(
                    "Bandwidth Throttler not enabled on replication as expected"
                )
                break
            if valid_count > 0:
                break
            iter += 1
        else:
            self.fail("Bandwidth Throttler not enabled!")
        # Check if large docs are not getting stuck
        matches, src_count = NodeHelper.check_goxdcr_log(
            self.src_master,
            "The connection is ruined",
            print_matches=True,
            timeout=10)
        if src_count:
            for item in matches:
                item_datetime = self._extract_timestamp(item)
                # Ignore errors that happened before the replication was set up
                if item_datetime < time_to_compare:
                    continue
                else:
                    self.fail("Possibly hit MB-31765")

    def _get_current_time(self, server):
        shell = RemoteMachineShellConnection(server)
        command = "date +'%Y-%m-%dT%H:%M:%S'"
        output, error = shell.execute_command(command)
        shell.log_command_output(output, error)
        curr_time = output[0].strip()
        return curr_time

    def _get_generator(self, prefix, docsize, numitems):
        if self._use_java_sdk:
            gen_create = SDKDataLoader(num_ops=numitems,
                                       percent_create=100,
                                       key_prefix=prefix,
                                       doc_size=docsize,
                                       timeout=1000)
        else:
            gen_create = BlobGenerator(prefix, prefix, docsize, end=numitems)
        return gen_create

    def test_nwusage_with_unidirection(self):
        self.setup_xdcr()
        self.sleep(60)

        self._set_doc_size_num()
        nw_limit = self._input.param("nw_limit", self._get_nwusage_limit())
        self._set_nwusage_limit(self.src_cluster,
                                nw_limit * self.num_src_nodes)

        gen_create = self._get_generator('nwOne', self._value_size,
                                         self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.perform_update_delete()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     nw_limit=nw_limit,
                                     no_of_nodes=self.num_src_nodes)

    def test_nwusage_with_bidirection(self):
        self.setup_xdcr()
        self.sleep(60)

        self._set_doc_size_num()
        src_nw_limit = self._input.param("nw_limit", self._get_nwusage_limit())
        self._set_nwusage_limit(self.src_cluster,
                                src_nw_limit * self.num_src_nodes)

        self._set_doc_size_num()
        dest_nw_limit = self._input.param("nw_limit",
                                          self._get_nwusage_limit())
        self._set_nwusage_limit(self.dest_cluster,
                                dest_nw_limit * self.num_dest_nodes)

        gen_create1 = self._get_generator('nwOne', self._value_size,
                                          self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create1)
        gen_create2 = self._get_generator('nwTwo', self._value_size,
                                          self._num_items)
        self.dest_cluster.load_all_buckets_from_generator(kv_gen=gen_create2)

        self.perform_update_delete()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     nw_limit=src_nw_limit,
                                     no_of_nodes=self.num_src_nodes)
        self._verify_bandwidth_usage(node=self.dest_cluster.get_master_node(),
                                     nw_limit=dest_nw_limit,
                                     no_of_nodes=self.num_dest_nodes)

    def test_nwusage_with_unidirection_pause_resume(self):
        self.setup_xdcr()

        gen_create = self._get_generator('nwOne', self._value_size,
                                         self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(
            kv_gen=gen_create)

        self.src_cluster.pause_all_replications()

        self._set_doc_size_num()
        nw_limit = self._input.param("nw_limit", self._get_nwusage_limit())
        self._set_nwusage_limit(self.src_cluster,
                                nw_limit * self.num_src_nodes)

        self.src_cluster.resume_all_replications()

        for task in tasks:
            task.result()

        self._wait_for_replication_to_catchup()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     nw_limit=nw_limit,
                                     no_of_nodes=self.num_src_nodes)

    def test_nwusage_with_bidirection_pause_resume(self):
        self.setup_xdcr()

        gen_create1 = self._get_generator('nwOne', self._value_size,
                                          self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(
            kv_gen=gen_create1)
        gen_create2 = self._get_generator('nwTwo', self._value_size,
                                          self._num_items)
        tasks.extend(
            self.dest_cluster.async_load_all_buckets_from_generator(
                kv_gen=gen_create2))

        self.src_cluster.pause_all_replications()
        self.dest_cluster.pause_all_replications()

        self._set_doc_size_num()
        nw_limit = self._input.param("nw_limit", self._get_nwusage_limit())
        self._set_nwusage_limit(self.src_cluster,
                                nw_limit * self.num_src_nodes)
        self._set_nwusage_limit(self.dest_cluster,
                                nw_limit * self.num_dest_nodes)

        self.src_cluster.resume_all_replications()
        self.dest_cluster.resume_all_replications()

        for task in tasks:
            task.result()

        self._wait_for_replication_to_catchup()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     nw_limit=nw_limit,
                                     no_of_nodes=self.num_src_nodes)
        self._verify_bandwidth_usage(node=self.dest_cluster.get_master_node(),
                                     nw_limit=nw_limit,
                                     no_of_nodes=self.num_dest_nodes)

    def test_nwusage_with_unidirection_in_parallel(self):
        self.setup_xdcr()

        gen_create = self._get_generator('nwOne', self._value_size,
                                         self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(
            kv_gen=gen_create)

        self._set_doc_size_num()
        nw_limit = self._input.param("nw_limit", self._get_nwusage_limit())
        self._set_nwusage_limit(self.src_cluster,
                                nw_limit * self.num_src_nodes)

        for task in tasks:
            task.result()

        self._wait_for_replication_to_catchup()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     nw_limit=nw_limit,
                                     no_of_nodes=self.num_src_nodes)

    def test_nwusage_with_bidirection_in_parallel(self):
        self.setup_xdcr()

        gen_create1 = self._get_generator('nwOne', self._value_size,
                                          self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(
            kv_gen=gen_create1)
        gen_create2 = self._get_generator('nwTwo', self._value_size,
                                          self._num_items)
        tasks.extend(
            self.dest_cluster.async_load_all_buckets_from_generator(
                kv_gen=gen_create2))

        self._set_doc_size_num()
        nw_limit = self._input.param("nw_limit", self._get_nwusage_limit())
        self._set_nwusage_limit(self.src_cluster,
                                nw_limit * self.num_src_nodes)
        self._set_nwusage_limit(self.dest_cluster,
                                nw_limit * self.num_dest_nodes)

        for task in tasks:
            task.result()

        self._wait_for_replication_to_catchup()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     nw_limit=nw_limit,
                                     no_of_nodes=self.num_src_nodes)
        self._verify_bandwidth_usage(node=self.dest_cluster.get_master_node(),
                                     nw_limit=nw_limit,
                                     no_of_nodes=self.num_dest_nodes)

    def test_nwusage_with_rebalance_in(self):
        self.setup_xdcr()
        self.sleep(60)
        no_of_nodes = self.num_src_nodes + 1
        self._set_doc_size_num()
        nw_limit = self._input.param("nw_limit", self._get_nwusage_limit())
        self._set_nwusage_limit(self.src_cluster, nw_limit * no_of_nodes)

        gen_create = self._get_generator('nwOne', self._value_size,
                                         self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.rebalance_in()

        self.perform_update_delete()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     nw_limit=nw_limit,
                                     no_of_nodes=no_of_nodes)

    def test_nwusage_with_rebalance_out(self):
        self.setup_xdcr()
        self.sleep(60)
        no_of_nodes = self.num_src_nodes - 1
        self._set_doc_size_num()
        nw_limit = self._input.param("nw_limit", self._get_nwusage_limit())
        self._set_nwusage_limit(self.src_cluster, nw_limit * no_of_nodes)

        gen_create = self._get_generator('nwOne', self._value_size,
                                         self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.rebalance_out()

        self.perform_update_delete()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     nw_limit=nw_limit,
                                     no_of_nodes=no_of_nodes)

    def test_nwusage_reset_to_zero(self):
        self.setup_xdcr()
        self.sleep(60)

        self._set_doc_size_num()
        nw_limit = self._input.param("nw_limit", self._get_nwusage_limit())
        self._set_nwusage_limit(self.src_cluster,
                                nw_limit * self.num_src_nodes)

        gen_create = self._get_generator('nwOne', self._value_size,
                                         self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(
            kv_gen=gen_create)

        self.sleep(30)
        self._set_nwusage_limit(self.src_cluster, 0)
        event_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Network limit reset to 0 at {0}".format(event_time))

        for task in tasks:
            task.result()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.dest_cluster.get_master_node(),
                                     nw_limit=nw_limit,
                                     no_of_nodes=self.num_dest_nodes,
                                     end_time=event_time)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     nw_limit=0,
                                     no_of_nodes=self.num_src_nodes,
                                     event_time=event_time,
                                     nw_usage="0")

    def test_nwusage_with_hard_failover_and_bwthrottle_enabled(self):
        self.setup_xdcr()
        self.sleep(60)
        self._set_doc_size_num()
        nw_limit = self._input.param("nw_limit", self._get_nwusage_limit())
        self._set_nwusage_limit(self.src_cluster,
                                nw_limit * self.num_src_nodes)

        self.src_cluster.pause_all_replications()

        gen_create = self._get_generator('nwOne', self._value_size,
                                         self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.sleep(15)

        self.src_cluster.failover_and_rebalance_nodes()
        failover_time = self._get_current_time(
            self.src_cluster.get_master_node())
        self.log.info("Node failed over at {0}".format(failover_time))

        self.sleep(15)

        self.src_cluster.rebalance_in()
        node_back_time = self._get_current_time(
            self.src_cluster.get_master_node())
        self.log.info("Node added back at {0}".format(node_back_time))

        self._wait_for_replication_to_catchup()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     end_time=failover_time)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     event_time=failover_time,
                                     end_time=node_back_time,
                                     no_of_nodes=self.num_src_nodes - 1)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     event_time=node_back_time,
                                     no_of_nodes=self.num_src_nodes)

    def test_nwusage_with_hard_failover_and_bwthrottle_enabled_later(self):
        self.setup_xdcr()
        self.sleep(60)
        self._set_doc_size_num()

        self.src_cluster.pause_all_replications()

        gen_create = self._get_generator('nwOne', self._value_size,
                                         self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.sleep(15)

        self.src_cluster.failover_and_rebalance_nodes()

        self.sleep(15)

        nw_limit = self._input.param("nw_limit", self._get_nwusage_limit())
        self._set_nwusage_limit(self.src_cluster,
                                nw_limit * self.num_src_nodes)
        bw_enable_time = self._get_current_time(
            self.src_cluster.get_master_node())
        self.log.info(
            "Bandwidth throttler enabled at {0}".format(bw_enable_time))

        self.sleep(60)

        self.src_cluster.rebalance_in()
        node_back_time = self._get_current_time(
            self.src_cluster.get_master_node())
        self.log.info("Node added back at {0}".format(node_back_time))

        self._wait_for_replication_to_catchup(timeout=600)

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     event_time=bw_enable_time,
                                     end_time=node_back_time,
                                     no_of_nodes=self.num_src_nodes - 1)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     no_of_nodes=self.num_src_nodes,
                                     event_time=node_back_time)

    def test_nwusage_with_auto_failover_and_bwthrottle_enabled(self):
        self.setup_xdcr()
        self.sleep(60)
        self._set_doc_size_num()
        self.src_cluster.rebalance_in()

        nw_limit = self._input.param("nw_limit", self._get_nwusage_limit())
        self._set_nwusage_limit(self.src_cluster,
                                nw_limit * self.num_src_nodes)

        src_conn = RestConnection(self.src_cluster.get_master_node())
        src_conn.update_autofailover_settings(enabled=True, timeout=30)

        self.src_cluster.pause_all_replications()

        gen_create = self._get_generator('nwOne', self._value_size,
                                         self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.sleep(15)

        shell = RemoteMachineShellConnection(self._input.servers[1])
        shell.stop_couchbase()
        self.sleep(30)
        task = self.cluster.async_rebalance(self.src_cluster.get_nodes(), [],
                                            [])
        task.result()
        failover_time = self._get_current_time(
            self.src_cluster.get_master_node())
        self.log.info("Node auto failed over at {0}".format(failover_time))
        FloatingServers._serverlist.append(self._input.servers[1])

        self.sleep(15)

        shell.start_couchbase()
        shell.disable_firewall()
        self.sleep(45)
        self.src_cluster.rebalance_in()
        node_back_time = self._get_current_time(
            self.src_cluster.get_master_node())
        self.log.info("Node added back at {0}".format(node_back_time))

        self._wait_for_replication_to_catchup(timeout=600)

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     end_time=failover_time,
                                     no_of_nodes=3)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     event_time=failover_time,
                                     end_time=node_back_time,
                                     no_of_nodes=2)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     event_time=node_back_time,
                                     no_of_nodes=3)

    def test_nwusage_with_auto_failover_and_bwthrottle_enabled_later(self):
        self.setup_xdcr()
        self.sleep(60)
        self._set_doc_size_num()

        self.src_cluster.rebalance_in()

        self.src_cluster.pause_all_replications()

        gen_create = self._get_generator('nwOne', self._value_size,
                                         self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.sleep(15)

        shell = RemoteMachineShellConnection(self._input.servers[1])
        shell.stop_couchbase()
        self.sleep(45)
        task = self.cluster.async_rebalance(self.src_cluster.get_nodes(), [],
                                            [])
        task.result()
        FloatingServers._serverlist.append(self._input.servers[1])

        self.sleep(15)

        nw_limit = self._input.param("nw_limit", self._get_nwusage_limit())
        self._set_nwusage_limit(self.src_cluster,
                                nw_limit * self.num_src_nodes)
        bw_enable_time = self._get_current_time(
            self.src_cluster.get_master_node())
        self.log.info(
            "Bandwidth throttler enabled at {0}".format(bw_enable_time))

        self.sleep(60)

        shell.start_couchbase()
        shell.disable_firewall()
        self.sleep(30)
        self.src_cluster.rebalance_in()
        node_back_time = self._get_current_time(
            self.src_cluster.get_master_node())
        self.log.info("Node added back at {0}".format(node_back_time))

        self._wait_for_replication_to_catchup(timeout=600)

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     event_time=bw_enable_time,
                                     end_time=node_back_time,
                                     no_of_nodes=self.num_src_nodes)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     event_time=node_back_time,
                                     no_of_nodes=self.num_src_nodes + 1)
Ejemplo n.º 3
0
class Capi(XDCRNewBaseTest, NewUpgradeBaseTest):

    def setUp(self):
        super(Capi, self).setUp()
        self.cluster = Cluster()
        self.src_cluster = self.get_cb_cluster_by_name('C1')
        self.src_master = self.src_cluster.get_master_node()
        self.dest_cluster = self.get_cb_cluster_by_name('C2')
        self.dest_master = self.dest_cluster.get_master_node()
        self.use_hostnames = self._input.param("use_hostnames", False)
        self.src_init = self._input.param('src_init', 2)
        self.dest_init = self._input.param('dest_init', 1)
        self.product = self._input.param('product', 'couchbase-server')
        self.initial_version = self._input.param('initial_version', '2.5.1-1083')
        self.initial_vbuckets = self._input.param('initial_vbuckets', 1024)
        self.init_nodes = self._input.param('init_nodes', True)
        self.initial_build_type = self._input.param('initial_build_type', None)
        self.upgrade_build_type = self._input.param('upgrade_build_type', self.initial_build_type)
        self.master = self.src_master
        self.rest = RestConnection(self.src_master)

    def tearDown(self):
        super(Capi, self).tearDown()

    def _start_es_replication(self, bucket='default', xdcr_params={}):
        rest_conn = RestConnection(self.src_cluster.get_master_node())
        if bucket == 'default':
            self.log.info("Creating default bucket")
            rest_conn.create_bucket(bucket='default', ramQuotaMB=100, authType='none', saslPassword='', replicaNumber=1,
                                proxyPort=11211, bucketType='membase', replica_index=1, threadsNumber=3,
                                flushEnabled=1, lww=False)
            self.src_cluster.add_bucket(ramQuotaMB=100, bucket='default', authType='none',
                                   saslPassword='', replicaNumber=1, proxyPort=11211, bucketType='membase',
                                   evictionPolicy='valueOnly')
        elif bucket == 'sasl':
            self.log.info("Creating sasl bucket")
            rest_conn.create_bucket(bucket='sasl', ramQuotaMB=100, authType='sasl', saslPassword='******', replicaNumber=1,
                                proxyPort=11211, bucketType='membase', replica_index=1, threadsNumber=3,
                                flushEnabled=1, lww=False)
            self.src_cluster.add_bucket(ramQuotaMB=100, bucket='sasl', authType='sasl',
                                   saslPassword='******', replicaNumber=1, proxyPort=11211, bucketType='membase',
                                   evictionPolicy='valueOnly')
        elif bucket == 'standard':
            self.log.info("Creating standard bucket")
            rest_conn.create_bucket(bucket='standard', ramQuotaMB=100, authType='none', saslPassword='', replicaNumber=1,
                                proxyPort=STANDARD_BUCKET_PORT, bucketType='membase', replica_index=1, threadsNumber=3,
                                flushEnabled=1, lww=False)
            self.src_cluster.add_bucket(ramQuotaMB=100, bucket='standard', authType='none',
                                   saslPassword='', replicaNumber=1, proxyPort=STANDARD_BUCKET_PORT, bucketType='membase',
                                   evictionPolicy='valueOnly')
        elif bucket== 'lww':
            self.log.info("Creating lww bucket")
            rest_conn.create_bucket(bucket='lww', ramQuotaMB=100, authType='none', saslPassword='', replicaNumber=1,
                                proxyPort=11211, bucketType='membase', replica_index=1, threadsNumber=3,
                                flushEnabled=1, lww=True)
            self.src_cluster.add_bucket(ramQuotaMB=100, bucket='lww', authType='none',
                                   saslPassword='', replicaNumber=1, proxyPort=11211, bucketType='membase',
                                   evictionPolicy='valueOnly')
        esrest_conn = EsRestConnection(self.dest_cluster.get_master_node())
        esrest_conn.create_index(bucket)
        rest_conn.add_remote_cluster(remoteIp=self.dest_master.ip, remotePort=9091, username='******',
                                     password='******', name='es')
        self.src_cluster.get_remote_clusters().append(XDCRRemoteClusterRef(self.src_cluster, self.dest_cluster,
                                                                       Utility.get_rc_name(self.src_cluster.get_name(),
                                                                                        self.dest_cluster.get_name())))
        repl_id = rest_conn.start_replication(replicationType='continuous', fromBucket=bucket, toCluster='es',
                                              rep_type='capi', toBucket=bucket, xdcr_params=xdcr_params)
        return repl_id

    def _verify_es_results(self, bucket='default'):
        esrest_conn = EsRestConnection(self.dest_master)
        es_docs = esrest_conn.all_docs()
        self.log.info("Retrieved ES Docs")
        rest_conn = RestConnection(self.src_master)
        memcached_conn = VBucketAwareMemcached(rest_conn, bucket)
        self.log.info("Comparing CB and ES data")
        for doc in es_docs:
            es_data = doc['doc']
            mc_active = memcached_conn.memcached(str(es_data['_id']))
            cb_flags, cb_cas, cb_data = mc_active.get(str(es_data['_id']))
            self.assertDictEqual(es_data, json.loads(cb_data), "Data mismatch found - es data: {0} cb data: {1}".
                                 format(str(es_data), str(cb_data)))
        self.log.info("Data verified")

    def test_crud_ops_from_cb_to_es(self):
        bucket = self._input.param("bucket", 'default')
        repl_id = self._start_es_replication(bucket=bucket)

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.perform_update_delete()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results(bucket=bucket)

    def test_incr_crud_ops_from_cb_to_es(self):
        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.async_perform_update_delete()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results()

    def test_capi_with_pause_resume(self):
        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.async_load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        self.sleep(30)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results()

    def test_capi_with_checkpointing(self):
        repl_id = self._start_es_replication(xdcr_params={"checkpointInterval":"60"})

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self.sleep(120)

        vb0_node = None
        nodes = self.src_cluster.get_nodes()
        ip = VBucketAwareMemcached(rest_conn,'default').vBucketMap[0].split(':')[0]
        for node in nodes:
            if ip == node.ip:
                vb0_node = node
        if not vb0_node:
            raise XDCRCheckpointException("Error determining the node containing active vb0")
        vb0_conn = RestConnection(vb0_node)
        try:
            checkpoint_record = vb0_conn.get_recent_xdcr_vb_ckpt(repl_id)
            self.log.info("Checkpoint record : {0}".format(checkpoint_record))
        except Exception as e:
            raise XDCRCheckpointException("Error retrieving last checkpoint document - {0}".format(e))

        self._verify_es_results()

    def test_capi_with_optimistic_replication(self):
        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)

        rest_conn.set_xdcr_param('default', 'default', 'optimisticReplicationThreshold', self._optimistic_threshold)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.perform_update_delete()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results()

    def test_capi_with_filter(self):
        repl_id = self._start_es_replication(xdcr_params={'filterExpression':'es-5*'})

        rest_conn = RestConnection(self.src_master)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results()

    def test_capi_with_advanced_settings(self):
        batch_count = self._input.param("batch_count", 10)
        batch_size = self._input.param("batch_size", 2048)
        source_nozzle = self._input.param("source_nozzle", 2)
        target_nozzle = self._input.param("target_nozzle", 2)
        enable_firewall = self._input.param("enable_firewall", False)

        capi_data_chan_size_multi = self._input.param("capi_data_chan_size_multi", None)
        if capi_data_chan_size_multi:
            shell = RemoteMachineShellConnection(self.src_master)
            command = "curl -X POST -u Administrator:password http://127.0.0.1:9998/xdcr/internalSettings " + \
                      "-d CapiDataChanSizeMultiplier=" + str(capi_data_chan_size_multi)
            output, error = shell.execute_command(command)
            shell.log_command_output(output, error)

        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)

        rest_conn.set_xdcr_param('default', 'default', 'workerBatchSize', batch_count)
        rest_conn.set_xdcr_param('default', 'default', 'docBatchSizeKb', batch_size)
        rest_conn.set_xdcr_param('default', 'default', 'sourceNozzlePerNode', source_nozzle)
        rest_conn.set_xdcr_param('default', 'default', 'targetNozzlePerNode', target_nozzle)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        if enable_firewall:
            NodeHelper.enable_firewall(self.dest_cluster.get_master_node())
            self.sleep(120)
            NodeHelper.disable_firewall(self.dest_cluster.get_master_node())

        self._verify_es_results()

    def test_capi_with_rebalance_in(self):
        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self.src_cluster.rebalance_in()

        self._wait_for_es_replication_to_catchup(timeout=900)

        self._verify_es_results()

    def test_capi_with_rebalance_out(self):
        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self.src_cluster.rebalance_out()

        self._wait_for_es_replication_to_catchup(timeout=900)

        self._verify_es_results()

    def test_capi_with_swap_rebalance(self):
        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self.src_cluster.swap_rebalance()

        self._wait_for_es_replication_to_catchup(timeout=600)

        self._verify_es_results()

    def test_capi_with_failover(self):
        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        graceful = self._input.param("graceful", False)
        self.recoveryType = self._input.param("recoveryType", None)
        self.src_cluster.failover(graceful=graceful)

        self.sleep(30)

        if self.recoveryType:
            server_nodes = rest_conn.node_statuses()
            for node in server_nodes:
                if node.ip == self._input.servers[1].ip:
                    rest_conn.set_recovery_type(otpNode=node.id, recoveryType=self.recoveryType)
                    self.sleep(30)
                    rest_conn.add_back_node(otpNode=node.id)
            rebalance = self.cluster.async_rebalance(self.src_cluster.get_nodes(), [], [])
            rebalance.result()

        self._verify_es_results()

    def test_capi_with_malformed_http_resp(self):
        repl_id = self._start_es_replication(xdcr_params={'workerBatchSize':'2000',
                                                          'docBatchSizeKb':'8096',
                                                          'targetNozzlePerNode':'64'})

        rest_conn = RestConnection(self.src_master)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        goxdcr_log = NodeHelper.get_goxdcr_log_dir(self.src_master)\
                     + '/goxdcr.log*'
        for node in self.src_cluster.get_nodes():
            count = NodeHelper.check_goxdcr_log(
                            node,
                            "malformed HTTP response",
                            goxdcr_log)
            self.assertEqual(count, 0, "malformed HTTP response error message found in " + str(node.ip))
            self.log.info("malformed HTTP response error message not found in " + str(node.ip))

        self._verify_es_results()

    def test_capi_with_offline_upgrade(self):
        self._install(self._input.servers[:self.src_init + self.dest_init])
        upgrade_nodes = self.src_cluster.get_nodes()
        upgrade_version = self._input.param("upgrade_version", "5.0.0-1797")

        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.perform_update_delete()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        self._install(servers=upgrade_nodes, version=upgrade_version)

        self.log.info("######### Upgrade of CB cluster completed ##########")

        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value"}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.perform_update_delete()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results()

    def test_capi_with_online_upgrade(self):
        self._install(self._input.servers[:self.src_init + self.dest_init])
        upgrade_version = self._input.param("upgrade_version", "5.0.0-1797")
        upgrade_nodes = self.src_cluster.get_nodes()
        extra_nodes = self._input.servers[self.src_init + self.dest_init:]

        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.perform_update_delete()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        RestConnection(upgrade_nodes[0]).get_nodes_versions()
        added_versions = RestConnection(extra_nodes[0]).get_nodes_versions()
        self.cluster.rebalance(upgrade_nodes + extra_nodes, extra_nodes, [])
        self.log.info("Rebalance in all {0} nodes completed".format(added_versions[0]))
        RestConnection(upgrade_nodes[0]).get_nodes_versions()
        self.sleep(15)
        status, content = ClusterOperationHelper.find_orchestrator(upgrade_nodes[0])
        self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
                        format(status, content))
        self.log.info("after rebalance in the master is {0}".format(content))
        find_master = False
        for new_server in extra_nodes:
            if content.find(new_server.ip) >= 0:
                find_master = True
                self.log.info("{0} Node {1} becomes the master".format(added_versions[0], new_server.ip))
                break
        if not find_master:
            raise Exception("After rebalance in {0} Nodes, one of them doesn't become the master".
                            format(added_versions[0]))
        self.log.info("Rebalancing out all old version nodes")
        self.cluster.rebalance(upgrade_nodes + extra_nodes, [], upgrade_nodes)
        self.src_master = self._input.servers[self.src_init + self.dest_init]

        self._install(self.src_cluster.get_nodes(), version=upgrade_version)
        upgrade_nodes = self._input.servers[self.src_init + self.dest_init:]
        extra_nodes = self.src_cluster.get_nodes()

        RestConnection(upgrade_nodes[0]).get_nodes_versions()
        added_versions = RestConnection(extra_nodes[0]).get_nodes_versions()
        self.cluster.rebalance(upgrade_nodes + extra_nodes, extra_nodes, [])
        self.log.info("Rebalance in all {0} nodes completed".format(added_versions[0]))
        RestConnection(upgrade_nodes[0]).get_nodes_versions()
        self.sleep(15)
        status, content = ClusterOperationHelper.find_orchestrator(upgrade_nodes[0])
        self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
                        format(status, content))
        self.log.info("after rebalance in the master is {0}".format(content))
        self.log.info("Rebalancing out all old version nodes")
        self.cluster.rebalance(upgrade_nodes + extra_nodes, [], upgrade_nodes)
        self.src_master = self._input.servers[0]

        self.log.info("######### Upgrade of CB cluster completed ##########")

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value"}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.perform_update_delete()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results()

    def test_capi_with_cb_stop_and_start(self):
        bucket = self._input.param("bucket", 'default')
        repl_id = self._start_es_replication(bucket=bucket)

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self.async_perform_update_delete()

        conn = RemoteMachineShellConnection(self.src_master)
        conn.stop_couchbase()
        conn.start_couchbase()

        self.sleep(30)

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results(bucket=bucket)

    def test_capi_with_erlang_crash(self):
        bucket = self._input.param("bucket", 'default')
        repl_id = self._start_es_replication(bucket=bucket)

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self.async_perform_update_delete()

        conn = RemoteMachineShellConnection(self.src_master)
        conn.kill_erlang()
        conn.start_couchbase()

        self.sleep(30)

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results(bucket=bucket)

    def test_capi_with_memcached_crash(self):
        bucket = self._input.param("bucket", 'default')
        repl_id = self._start_es_replication(bucket=bucket)

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self.async_perform_update_delete()

        conn = RemoteMachineShellConnection(self.src_master)
        conn.pause_memcached()
        conn.unpause_memcached()

        self.sleep(30)

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results(bucket=bucket)
Ejemplo n.º 4
0
class compression(XDCRNewBaseTest):
    def setUp(self):
        super(compression, self).setUp()
        self.src_cluster = self.get_cb_cluster_by_name('C1')
        self.src_master = self.src_cluster.get_master_node()
        self.dest_cluster = self.get_cb_cluster_by_name('C2')
        self.dest_master = self.dest_cluster.get_master_node()
        self.chain_length = self._input.param("chain_length", 2)
        self.topology = self._input.param("ctopology", "chain")
        if self.chain_length > 2:
            self.c3_cluster = self.get_cb_cluster_by_name('C3')
            self.c3_master = self.c3_cluster.get_master_node()
        self.cluster = Cluster()

    def tearDown(self):
        super(compression, self).tearDown()

    def _set_compression_type(self,
                              cluster,
                              bucket_name,
                              compression_type="None"):
        repls = cluster.get_remote_clusters()[0].get_replications()
        for repl in repls:
            if bucket_name in str(repl):
                repl_id = repl.get_repl_id()
        shell = RemoteMachineShellConnection(cluster.get_master_node())
        repl_id = str(repl_id).replace('/', '%2F')
        base_url = "http://" + cluster.get_master_node(
        ).ip + ":8091/settings/replications/" + repl_id
        command = "curl -X POST -u Administrator:password " + base_url + " -d compressionType=" + str(
            compression_type)
        output, error = shell.execute_command(command)
        shell.log_command_output(output, error)
        shell.disconnect()
        return output, error

    def _verify_compression(self,
                            cluster,
                            compr_bucket_name="",
                            uncompr_bucket_name="",
                            compression_type="None",
                            repl_time=0):
        repls = cluster.get_remote_clusters()[0].get_replications()
        for repl in repls:
            if compr_bucket_name in str(repl):
                compr_repl_id = repl.get_repl_id()
            elif uncompr_bucket_name in str(repl):
                uncompr_repl_id = repl.get_repl_id()

        compr_repl_id = str(compr_repl_id).replace('/', '%2F')
        uncompr_repl_id = str(uncompr_repl_id).replace('/', '%2F')

        base_url = "http://" + cluster.get_master_node(
        ).ip + ":8091/settings/replications/" + compr_repl_id
        shell = RemoteMachineShellConnection(cluster.get_master_node())
        command = "curl -u Administrator:password " + base_url
        output, error = shell.execute_command(command)
        shell.log_command_output(output, error)
        self.assertTrue(
            '"compressionType":"Snappy"' in output[0],
            "Compression Type for replication " + compr_repl_id +
            " is not Snappy")
        self.log.info("Compression Type for replication " + compr_repl_id +
                      " is Snappy")

        base_url = "http://" + cluster.get_master_node().ip + ":8091/pools/default/buckets/" + compr_bucket_name + \
                   "/stats/replications%2F" + compr_repl_id + "%2Fdata_replicated?haveTStamp=" + str(repl_time)
        command = "curl -u Administrator:password " + base_url
        output, error = shell.execute_command(command)
        shell.log_command_output(output, error)
        output = json.loads(output[0])
        compressed_data_replicated = 0
        for node in cluster.get_nodes():
            items = output["nodeStats"]["{0}:8091".format(node.ip)]
            for item in items:
                compressed_data_replicated += item
        self.log.info("Compressed data for replication {0} is {1}".format(
            compr_repl_id, compressed_data_replicated))

        base_url = "http://" + cluster.get_master_node().ip + ":8091/pools/default/buckets/" + uncompr_bucket_name + \
                   "/stats/replications%2F" + uncompr_repl_id + "%2Fdata_replicated?haveTStamp=" + str(repl_time)
        command = "curl -u Administrator:password " + base_url
        output, error = shell.execute_command(command)
        shell.log_command_output(output, error)
        output = json.loads(output[0])
        uncompressed_data_replicated = 0
        for node in cluster.get_nodes():
            items = output["nodeStats"]["{0}:8091".format(node.ip)]
            for item in items:
                uncompressed_data_replicated += item
        self.log.info("Uncompressed data for replication {0} is {1}".format(
            uncompr_repl_id, uncompressed_data_replicated))

        self.assertTrue(
            uncompressed_data_replicated > compressed_data_replicated,
            "Compression did not work as expected")
        self.log.info("Compression worked as expected")

        shell.disconnect()

    def test_compression_with_unixdcr_incr_load(self):
        bucket_prefix = self._input.param("bucket_prefix", "standard_bucket_")
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, bucket_prefix + "1",
                                   compression_type)
        self._set_compression_type(self.src_cluster, bucket_prefix + "2")
        if self.chain_length > 2 and self.topology == TOPOLOGY.CHAIN:
            self._set_compression_type(self.dest_cluster, bucket_prefix + "1",
                                       compression_type)
            self._set_compression_type(self.dest_cluster, bucket_prefix + "2")
        if self.chain_length > 2 and self.topology == TOPOLOGY.RING:
            self._set_compression_type(self.dest_cluster, bucket_prefix + "1",
                                       compression_type)
            self._set_compression_type(self.dest_cluster, bucket_prefix + "2")
            self._set_compression_type(self.c3_cluster, bucket_prefix + "1",
                                       compression_type)
            self._set_compression_type(self.c3_cluster, bucket_prefix + "2")

        gen_create = BlobGenerator('comprOne-',
                                   'comprOne-',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.perform_update_delete()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name=bucket_prefix + "1",
                                 uncompr_bucket_name=bucket_prefix + "2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        if self.chain_length > 2 and self.topology == TOPOLOGY.CHAIN:
            self._verify_compression(cluster=self.dest_cluster,
                                     compr_bucket_name=bucket_prefix + "1",
                                     uncompr_bucket_name=bucket_prefix + "2",
                                     compression_type=compression_type,
                                     repl_time=repl_time)
        if self.chain_length > 2 and self.topology == TOPOLOGY.RING:
            self._verify_compression(cluster=self.dest_cluster,
                                     compr_bucket_name=bucket_prefix + "1",
                                     uncompr_bucket_name=bucket_prefix + "2",
                                     compression_type=compression_type,
                                     repl_time=repl_time)
            self._verify_compression(cluster=self.c3_cluster,
                                     compr_bucket_name=bucket_prefix + "1",
                                     uncompr_bucket_name=bucket_prefix + "2",
                                     compression_type=compression_type,
                                     repl_time=repl_time)
        self.verify_results()

    def test_compression_with_unixdcr_backfill_load(self):
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1",
                                   compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-',
                                   'comprOne-',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.perform_update_delete()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self.verify_results()

    def test_compression_with_bixdcr_incr_load(self):
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1",
                                   compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")
        self._set_compression_type(self.dest_cluster, "standard_bucket_1",
                                   compression_type)
        self._set_compression_type(self.dest_cluster, "standard_bucket_2")

        gen_create = BlobGenerator('comprOne-',
                                   'comprOne-',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)
        gen_create = BlobGenerator('comprTwo-',
                                   'comprTwo-',
                                   self._value_size,
                                   end=self._num_items)
        self.dest_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.perform_update_delete()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self._verify_compression(cluster=self.dest_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self.verify_results()

    def test_compression_with_bixdcr_backfill_load(self):
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1",
                                   compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")
        self._set_compression_type(self.dest_cluster, "standard_bucket_1",
                                   compression_type)
        self._set_compression_type(self.dest_cluster, "standard_bucket_2")

        self.src_cluster.pause_all_replications()
        self.dest_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-',
                                   'comprOne-',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)
        gen_create = BlobGenerator('comprTwo-',
                                   'comprTwo-',
                                   self._value_size,
                                   end=self._num_items)
        self.dest_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()
        self.dest_cluster.resume_all_replications()

        self.perform_update_delete()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self._verify_compression(cluster=self.dest_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self.verify_results()

    def test_compression_with_pause_resume(self):
        repeat = self._input.param("repeat", 5)
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1",
                                   compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")

        gen_create = BlobGenerator('comprOne-',
                                   'comprOne-',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.async_perform_update_delete()

        for i in range(0, repeat):
            self.src_cluster.pause_all_replications()
            self.sleep(30)
            self.src_cluster.resume_all_replications()

        self._wait_for_replication_to_catchup()

        self.verify_results()

    def test_compression_with_optimistic_threshold_change(self):
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1",
                                   compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")

        src_conn = RestConnection(self.src_cluster.get_master_node())
        src_conn.set_xdcr_param('standard_bucket_1', 'standard_bucket_1',
                                'optimisticReplicationThreshold',
                                self._optimistic_threshold)
        src_conn.set_xdcr_param('standard_bucket_2', 'standard_bucket_2',
                                'optimisticReplicationThreshold',
                                self._optimistic_threshold)

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-',
                                   'comprOne-',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.async_perform_update_delete()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self.verify_results()

    def test_compression_with_advanced_settings(self):
        batch_count = self._input.param("batch_count", 10)
        batch_size = self._input.param("batch_size", 2048)
        source_nozzle = self._input.param("source_nozzle", 2)
        target_nozzle = self._input.param("target_nozzle", 2)

        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1",
                                   compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")

        src_conn = RestConnection(self.src_cluster.get_master_node())
        src_conn.set_xdcr_param('standard_bucket_1', 'standard_bucket_1',
                                'workerBatchSize', batch_count)
        src_conn.set_xdcr_param('standard_bucket_1', 'standard_bucket_1',
                                'docBatchSizeKb', batch_size)
        src_conn.set_xdcr_param('standard_bucket_1', 'standard_bucket_1',
                                'sourceNozzlePerNode', source_nozzle)
        src_conn.set_xdcr_param('standard_bucket_1', 'standard_bucket_1',
                                'targetNozzlePerNode', target_nozzle)
        src_conn.set_xdcr_param('standard_bucket_2', 'standard_bucket_2',
                                'workerBatchSize', batch_count)
        src_conn.set_xdcr_param('standard_bucket_2', 'standard_bucket_2',
                                'docBatchSizeKb', batch_size)
        src_conn.set_xdcr_param('standard_bucket_2', 'standard_bucket_2',
                                'sourceNozzlePerNode', source_nozzle)
        src_conn.set_xdcr_param('standard_bucket_2', 'standard_bucket_2',
                                'targetNozzlePerNode', target_nozzle)

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-',
                                   'comprOne-',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.async_perform_update_delete()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self.verify_results()

    def test_compression_with_capi(self):
        self.setup_xdcr()
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        output, error = self._set_compression_type(self.src_cluster, "default",
                                                   compression_type)
        self.assertTrue(
            "The value can not be specified for CAPI replication" in output[0],
            "Compression enabled for CAPI")
        self.log.info("Compression not enabled for CAPI as expected")

    def test_compression_with_rebalance_in(self):
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1",
                                   compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-',
                                   'comprOne-',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.async_perform_update_delete()

        self.src_cluster.rebalance_in()

        self._wait_for_replication_to_catchup()

        self.verify_results()

    def test_compression_with_rebalance_out(self):
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1",
                                   compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-',
                                   'comprOne-',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.async_perform_update_delete()

        self.src_cluster.rebalance_out()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self.verify_results()

    def test_compression_with_swap_rebalance(self):
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1",
                                   compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-',
                                   'comprOne-',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.async_perform_update_delete()

        self.src_cluster.swap_rebalance()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self.verify_results()

    def test_compression_with_failover(self):
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1",
                                   compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-',
                                   'comprOne-',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.async_perform_update_delete()

        src_conn = RestConnection(self.src_cluster.get_master_node())
        graceful = self._input.param("graceful", False)
        self.recoveryType = self._input.param("recoveryType", None)
        self.src_cluster.failover(graceful=graceful)

        self.sleep(30)

        if self.recoveryType:
            server_nodes = src_conn.node_statuses()
            for node in server_nodes:
                if node.ip == self._input.servers[1].ip:
                    src_conn.set_recovery_type(otpNode=node.id,
                                               recoveryType=self.recoveryType)
                    self.sleep(30)
                    src_conn.add_back_node(otpNode=node.id)
            rebalance = self.cluster.async_rebalance(
                self.src_cluster.get_nodes(), [], [])
            rebalance.result()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self.verify_results()

    def test_compression_with_replication_delete_and_create(self):
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)

        gen_create = BlobGenerator('comprOne-',
                                   'comprOne-',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.async_perform_update_delete()

        rest_conn = RestConnection(self.src_master)
        rest_conn.remove_all_replications()
        rest_conn.remove_all_remote_clusters()

        self.src_cluster.get_remote_clusters()[0].clear_all_replications()
        self.src_cluster.clear_all_remote_clusters()

        self.setup_xdcr()

        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1",
                                   compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")

        self._wait_for_replication_to_catchup()

        self.verify_results()

    def test_compression_with_bixdcr_and_compression_one_way(self):
        self.setup_xdcr()
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "default",
                                   compression_type)

        gen_create = BlobGenerator('comprOne-',
                                   'comprOne-',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)
        gen_create = BlobGenerator('comprTwo-',
                                   'comprTwo-',
                                   self._value_size,
                                   end=self._num_items)
        self.dest_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.perform_update_delete()

        self._wait_for_replication_to_catchup()

        self.verify_results()

    def test_compression_with_enabling_later(self):
        self.setup_xdcr()
        self.sleep(60)

        gen_create = BlobGenerator('comprOne-',
                                   'comprOne-',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.async_perform_update_delete()
        self.sleep(10)

        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "default",
                                   compression_type)

        self._wait_for_replication_to_catchup()

        self.verify_results()

    def test_compression_with_disabling_later(self):
        self.setup_xdcr()
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "default",
                                   compression_type)

        gen_create = BlobGenerator('comprOne-',
                                   'comprOne-',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.async_perform_update_delete()
        self.sleep(10)

        self._set_compression_type(self.src_cluster, "default", "None")

        self._wait_for_replication_to_catchup()

        self.verify_results()

    def test_compression_with_rebalance_out_target_and_disabling(self):
        self.setup_xdcr()
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "default",
                                   compression_type)

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-',
                                   'comprOne-',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.async_perform_update_delete()

        self.dest_cluster.rebalance_out()

        self._set_compression_type(self.src_cluster, "default", "None")
        self.sleep(5)
        self._set_compression_type(self.src_cluster, "default",
                                   compression_type)

        self._wait_for_replication_to_catchup()

        self.verify_results()

    def test_compression_with_rebalance_out_src_and_disabling(self):
        self.setup_xdcr()
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "default",
                                   compression_type)

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-',
                                   'comprOne-',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.async_perform_update_delete()

        self.src_cluster.rebalance_out()

        self._set_compression_type(self.src_cluster, "default", "None")
        self.sleep(5)
        self._set_compression_type(self.src_cluster, "default",
                                   compression_type)

        self._wait_for_replication_to_catchup()

        self.verify_results()
class compression(XDCRNewBaseTest):
    def setUp(self):
        super(compression, self).setUp()
        self.src_cluster = self.get_cb_cluster_by_name('C1')
        self.src_master = self.src_cluster.get_master_node()
        self.dest_cluster = self.get_cb_cluster_by_name('C2')
        self.dest_master = self.dest_cluster.get_master_node()
        self.chain_length = self._input.param("chain_length", 2)
        self.topology = self._input.param("ctopology", "chain")
        if self.chain_length > 2:
            self.c3_cluster = self.get_cb_cluster_by_name('C3')
            self.c3_master = self.c3_cluster.get_master_node()
        self.cluster = Cluster()

    def tearDown(self):
        super(compression, self).tearDown()

    def _set_compression_type(self, cluster, bucket_name, compression_type="None"):
        repls = cluster.get_remote_clusters()[0].get_replications()
        for repl in repls:
            if bucket_name in str(repl):
                repl_id = repl.get_repl_id()
        shell = RemoteMachineShellConnection(cluster.get_master_node())
        repl_id = str(repl_id).replace('/','%2F')
        base_url = "http://" + cluster.get_master_node().ip + ":8091/settings/replications/" + repl_id
        command = "curl -X POST -u Administrator:password " + base_url + " -d compressionType=" + str(compression_type)
        output, error = shell.execute_command(command)
        shell.log_command_output(output, error)
        shell.disconnect()
        return output, error

    def _verify_compression(self, cluster, compr_bucket_name="", uncompr_bucket_name="",
                            compression_type="None", repl_time=0):
        repls = cluster.get_remote_clusters()[0].get_replications()
        for repl in repls:
            if compr_bucket_name in str(repl):
                compr_repl_id = repl.get_repl_id()
            elif uncompr_bucket_name in str(repl):
                uncompr_repl_id = repl.get_repl_id()

        compr_repl_id = str(compr_repl_id).replace('/', '%2F')
        uncompr_repl_id = str(uncompr_repl_id).replace('/', '%2F')

        base_url = "http://" + cluster.get_master_node().ip + ":8091/settings/replications/" + compr_repl_id
        shell = RemoteMachineShellConnection(cluster.get_master_node())
        command = "curl -u Administrator:password " + base_url
        output, error = shell.execute_command(command)
        shell.log_command_output(output, error)
        self.assertTrue('"compressionType":"Snappy"' in output[0],
                        "Compression Type for replication " + compr_repl_id + " is not Snappy")
        self.log.info("Compression Type for replication " + compr_repl_id + " is Snappy")

        base_url = "http://" + cluster.get_master_node().ip + ":8091/pools/default/buckets/" + compr_bucket_name + \
                   "/stats/replications%2F" + compr_repl_id + "%2Fdata_replicated?haveTStamp=" + str(repl_time)
        command = "curl -u Administrator:password " + base_url
        output, error = shell.execute_command(command)
        shell.log_command_output(output, error)
        output = json.loads(output[0])
        compressed_data_replicated = 0
        for node in cluster.get_nodes():
            items = output["nodeStats"]["{0}:8091".format(node.ip)]
            for item in items:
                compressed_data_replicated += item
        self.log.info("Compressed data for replication {0} is {1}".format(compr_repl_id, compressed_data_replicated))

        base_url = "http://" + cluster.get_master_node().ip + ":8091/pools/default/buckets/" + uncompr_bucket_name + \
                   "/stats/replications%2F" + uncompr_repl_id + "%2Fdata_replicated?haveTStamp=" + str(repl_time)
        command = "curl -u Administrator:password " + base_url
        output, error = shell.execute_command(command)
        shell.log_command_output(output, error)
        output = json.loads(output[0])
        uncompressed_data_replicated = 0
        for node in cluster.get_nodes():
            items = output["nodeStats"]["{0}:8091".format(node.ip)]
            for item in items:
                uncompressed_data_replicated += item
        self.log.info("Uncompressed data for replication {0} is {1}".format(uncompr_repl_id, uncompressed_data_replicated))

        self.assertTrue(uncompressed_data_replicated > compressed_data_replicated,
                        "Compression did not work as expected")
        self.log.info("Compression worked as expected")

        shell.disconnect()

    def test_compression_with_unixdcr_incr_load(self):
        bucket_prefix = self._input.param("bucket_prefix", "standard_bucket_")
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, bucket_prefix + "1", compression_type)
        self._set_compression_type(self.src_cluster, bucket_prefix + "2")
        if self.chain_length > 2 and self.topology == TOPOLOGY.CHAIN:
            self._set_compression_type(self.dest_cluster, bucket_prefix + "1", compression_type)
            self._set_compression_type(self.dest_cluster, bucket_prefix + "2")
        if self.chain_length > 2 and self.topology == TOPOLOGY.RING:
            self._set_compression_type(self.dest_cluster, bucket_prefix + "1", compression_type)
            self._set_compression_type(self.dest_cluster, bucket_prefix + "2")
            self._set_compression_type(self.c3_cluster, bucket_prefix + "1", compression_type)
            self._set_compression_type(self.c3_cluster, bucket_prefix + "2")

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.perform_update_delete()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name=bucket_prefix + "1",
                                 uncompr_bucket_name=bucket_prefix + "2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        if self.chain_length > 2 and self.topology == TOPOLOGY.CHAIN:
            self._verify_compression(cluster=self.dest_cluster,
                                     compr_bucket_name=bucket_prefix + "1",
                                     uncompr_bucket_name=bucket_prefix + "2",
                                     compression_type=compression_type,
                                     repl_time=repl_time)
        if self.chain_length > 2 and self.topology == TOPOLOGY.RING:
            self._verify_compression(cluster=self.dest_cluster,
                                     compr_bucket_name=bucket_prefix + "1",
                                     uncompr_bucket_name=bucket_prefix + "2",
                                     compression_type=compression_type,
                                     repl_time=repl_time)
            self._verify_compression(cluster=self.c3_cluster,
                                     compr_bucket_name=bucket_prefix + "1",
                                     uncompr_bucket_name=bucket_prefix + "2",
                                     compression_type=compression_type,
                                     repl_time=repl_time)
        self.verify_results()

    def test_compression_with_unixdcr_backfill_load(self):
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.perform_update_delete()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self.verify_results()

    def test_compression_with_bixdcr_incr_load(self):
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")
        self._set_compression_type(self.dest_cluster, "standard_bucket_1", compression_type)
        self._set_compression_type(self.dest_cluster, "standard_bucket_2")

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)
        gen_create = BlobGenerator('comprTwo-', 'comprTwo-', self._value_size, end=self._num_items)
        self.dest_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.perform_update_delete()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self._verify_compression(cluster=self.dest_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self.verify_results()

    def test_compression_with_bixdcr_backfill_load(self):
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")
        self._set_compression_type(self.dest_cluster, "standard_bucket_1", compression_type)
        self._set_compression_type(self.dest_cluster, "standard_bucket_2")

        self.src_cluster.pause_all_replications()
        self.dest_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)
        gen_create = BlobGenerator('comprTwo-', 'comprTwo-', self._value_size, end=self._num_items)
        self.dest_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()
        self.dest_cluster.resume_all_replications()

        self.perform_update_delete()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self._verify_compression(cluster=self.dest_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self.verify_results()

    def test_compression_with_pause_resume(self):
        repeat = self._input.param("repeat", 5)
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.async_perform_update_delete()

        for i in range(0, repeat):
            self.src_cluster.pause_all_replications()
            self.sleep(30)
            self.src_cluster.resume_all_replications()

        self._wait_for_replication_to_catchup()

        self.verify_results()

    def test_compression_with_optimistic_threshold_change(self):
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")

        src_conn = RestConnection(self.src_cluster.get_master_node())
        src_conn.set_xdcr_param('standard_bucket_1', 'standard_bucket_1', 'optimisticReplicationThreshold',
                                self._optimistic_threshold)
        src_conn.set_xdcr_param('standard_bucket_2', 'standard_bucket_2', 'optimisticReplicationThreshold',
                                self._optimistic_threshold)

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.async_perform_update_delete()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self.verify_results()

    def test_compression_with_advanced_settings(self):
        batch_count = self._input.param("batch_count", 10)
        batch_size = self._input.param("batch_size", 2048)
        source_nozzle = self._input.param("source_nozzle", 2)
        target_nozzle = self._input.param("target_nozzle", 2)

        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")

        src_conn = RestConnection(self.src_cluster.get_master_node())
        src_conn.set_xdcr_param('standard_bucket_1', 'standard_bucket_1', 'workerBatchSize', batch_count)
        src_conn.set_xdcr_param('standard_bucket_1', 'standard_bucket_1', 'docBatchSizeKb', batch_size)
        src_conn.set_xdcr_param('standard_bucket_1', 'standard_bucket_1', 'sourceNozzlePerNode', source_nozzle)
        src_conn.set_xdcr_param('standard_bucket_1', 'standard_bucket_1', 'targetNozzlePerNode', target_nozzle)
        src_conn.set_xdcr_param('standard_bucket_2', 'standard_bucket_2', 'workerBatchSize', batch_count)
        src_conn.set_xdcr_param('standard_bucket_2', 'standard_bucket_2', 'docBatchSizeKb', batch_size)
        src_conn.set_xdcr_param('standard_bucket_2', 'standard_bucket_2', 'sourceNozzlePerNode', source_nozzle)
        src_conn.set_xdcr_param('standard_bucket_2', 'standard_bucket_2', 'targetNozzlePerNode', target_nozzle)

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.async_perform_update_delete()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self.verify_results()

    def test_compression_with_capi(self):
        self.setup_xdcr()
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        output, error = self._set_compression_type(self.src_cluster, "default", compression_type)
        self.assertTrue("The value can not be specified for CAPI replication" in output[0], "Compression enabled for CAPI")
        self.log.info("Compression not enabled for CAPI as expected")

    def test_compression_with_rebalance_in(self):
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.async_perform_update_delete()

        self.src_cluster.rebalance_in()

        self._wait_for_replication_to_catchup()

        self.verify_results()

    def test_compression_with_rebalance_out(self):
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.async_perform_update_delete()

        self.src_cluster.rebalance_out()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self.verify_results()

    def test_compression_with_swap_rebalance(self):
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.async_perform_update_delete()

        self.src_cluster.swap_rebalance()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self.verify_results()

    def test_compression_with_failover(self):
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.async_perform_update_delete()

        src_conn = RestConnection(self.src_cluster.get_master_node())
        graceful = self._input.param("graceful", False)
        self.recoveryType = self._input.param("recoveryType", None)
        self.src_cluster.failover(graceful=graceful)

        self.sleep(30)

        if self.recoveryType:
            server_nodes = src_conn.node_statuses()
            for node in server_nodes:
                if node.ip == self._input.servers[1].ip:
                    src_conn.set_recovery_type(otpNode=node.id, recoveryType=self.recoveryType)
                    self.sleep(30)
                    src_conn.add_back_node(otpNode=node.id)
            rebalance = self.cluster.async_rebalance(self.src_cluster.get_nodes(), [], [])
            rebalance.result()

        self._wait_for_replication_to_catchup()

        self._verify_compression(cluster=self.src_cluster,
                                 compr_bucket_name="standard_bucket_1",
                                 uncompr_bucket_name="standard_bucket_2",
                                 compression_type=compression_type,
                                 repl_time=repl_time)
        self.verify_results()

    def test_compression_with_replication_delete_and_create(self):
        self.setup_xdcr()
        repl_time = int(time.time())
        self.sleep(60)

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.async_perform_update_delete()

        rest_conn = RestConnection(self.src_master)
        rest_conn.remove_all_replications()
        rest_conn.remove_all_remote_clusters()

        self.src_cluster.get_remote_clusters()[0].clear_all_replications()
        self.src_cluster.clear_all_remote_clusters()

        self.setup_xdcr()

        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "standard_bucket_1", compression_type)
        self._set_compression_type(self.src_cluster, "standard_bucket_2")

        self._wait_for_replication_to_catchup()

        self.verify_results()

    def test_compression_with_bixdcr_and_compression_one_way(self):
        self.setup_xdcr()
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "default", compression_type)

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)
        gen_create = BlobGenerator('comprTwo-', 'comprTwo-', self._value_size, end=self._num_items)
        self.dest_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.perform_update_delete()

        self._wait_for_replication_to_catchup()

        self.verify_results()

    def test_compression_with_enabling_later(self):
        self.setup_xdcr()
        self.sleep(60)

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.async_perform_update_delete()
        self.sleep(10)

        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "default", compression_type)

        self._wait_for_replication_to_catchup()

        self.verify_results()

    def test_compression_with_disabling_later(self):
        self.setup_xdcr()
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "default", compression_type)

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.async_perform_update_delete()
        self.sleep(10)

        self._set_compression_type(self.src_cluster, "default", "None")

        self._wait_for_replication_to_catchup()

        self.verify_results()

    def test_compression_with_rebalance_out_target_and_disabling(self):
        self.setup_xdcr()
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "default", compression_type)

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.async_perform_update_delete()

        self.dest_cluster.rebalance_out()

        self._set_compression_type(self.src_cluster, "default", "None")
        self.sleep(5)
        self._set_compression_type(self.src_cluster, "default", compression_type)

        self._wait_for_replication_to_catchup()

        self.verify_results()

    def test_compression_with_rebalance_out_src_and_disabling(self):
        self.setup_xdcr()
        self.sleep(60)
        compression_type = self._input.param("compression_type", "Snappy")
        self._set_compression_type(self.src_cluster, "default", compression_type)

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('comprOne-', 'comprOne-', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.async_perform_update_delete()

        self.src_cluster.rebalance_out()

        self._set_compression_type(self.src_cluster, "default", "None")
        self.sleep(5)
        self._set_compression_type(self.src_cluster, "default", compression_type)

        self._wait_for_replication_to_catchup()

        self.verify_results()
Ejemplo n.º 6
0
class SpatialQueryTests(unittest.TestCase):
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.helper = SpatialHelper(self, "default")
        self.helper.setup_cluster()
        self.cluster = Cluster()
        self.servers = self.helper.servers

    def tearDown(self):
        self.helper.cleanup_cluster()

    def test_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init(data_set)

    def test_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make skip (and limit) queries on a "
                      "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init(data_set)

    def test_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make bounding box queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._query_test_init(data_set)

    def test_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make range queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init(data_set)

    def test_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make limit queries on a multidimensional "
                      "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init(data_set)

    def test_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make skip (and limit) queries on a "
                      "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init(data_set)

    def test_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init(data_set)

    def test_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make range queries with limits on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._query_test_init(data_set)

## Rebalance In
    def test_rebalance_in_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance In and limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance In and skip (and limit) queries on a "
                      "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance In and bounding box queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance In and range queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance In and limit queries on a multidimensional "
                      "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance In and skip (and limit) queries on a "
                      "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance In and range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance In and range queries with limits on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._rebalance_cluster(data_set)

#Rebalance Out
    def test_rebalance_out_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and  limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and skip (and limit) queries on a "
                      "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and bounding box queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and range queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and limit queries on a multidimensional "
                      "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and skip (and limit) queries on a "
                      "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and range queries with limits on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._rebalance_cluster(data_set)

# Warmup Tests

    def test_warmup_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with skip and limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with  skip (and limit) queries on a "
                      "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with  bounding box queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with  range queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with  limit queries on a multidimensional "
                      "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with  skip (and limit) queries on a "
                      "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with  range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with  range queries with limits on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._query_test_init_integration(data_set)


# Reboot Tests
    def test_reboot_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot and limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot with  skip (and limit) queries on a "
                      "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot with  bounding box queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot with  range queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot with  limit queries on a multidimensional "
                      "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot with  skip (and limit) queries on a "
                      "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot with  range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot with  range queries with limits on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._query_test_init_integration(data_set)

# Failover Tests
    def test_failover_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Failover and limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._failover_cluster(data_set)

    def test_failover_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and skip (and limit) queries on a "
                      "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._failover_cluster(data_set)

    def test_failover_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and bounding box queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._failover_cluster(data_set)

    def test_failover_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and range queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._failover_cluster(data_set)

    def test_failover_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and limit queries on a multidimensional "
                      "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._failover_cluster(data_set)

    def test_failover_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and skip (and limit) queries on a "
                      "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._failover_cluster(data_set)

    def test_failover_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._failover_cluster(data_set)

    def test_failover_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and range queries with limits on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._failover_cluster(data_set)

    ###
    # load the data defined for this dataset.
    # create views and query the data as it loads.
    # verification is optional, and best practice is to
    # set to False if you plan on running _query_all_views()
    # later in the test case
    ###
    def _query_test_init(self, data_set, verify_results = True):
        views = data_set.views

        # start loading data
        t = Thread(target=data_set.load,
                   name="load_data_set",
                   args=())
        t.start()

        # run queries while loading data
        while(t.is_alive()):
            self._query_all_views(views, False)
            time.sleep(5)
        t.join()

        # results will be verified if verify_results set
        if verify_results:
            self._query_all_views(views, verify_results)
        else:
            self._check_view_intergrity(views)

    def _query_test_init_integration(self, data_set, verify_results = True):
        views = data_set.views
        inserted_keys = data_set.load()
        target_fn = ()

        if self.helper.num_nodes_reboot >= 1:
            target_fn = self._reboot_cluster(data_set)
        elif self.helper.num_nodes_warmup >= 1:
            target_fn = self._warmup_cluster(data_set)
        elif self.helper.num_nodes_to_add >= 1 or self.helper.num_nodes_to_remove >= 1:
            target_fn = self._rebalance_cluster(data_set)

        t = Thread(target=self._query_all_views(views, False))
        t.start()
        # run queries while loading data
        while t.is_alive():
            self._rebalance_cluster(data_set)
            time.sleep(5)
        t.join()

        # results will be verified if verify_results set
        if verify_results:
            self._query_all_views(views, verify_results)
        else:
            self._check_view_intergrity(views)

    ##
    # run all queries for all views in parallel
    ##
    def _query_all_views(self, views, verify_results = True):
        query_threads = []
        for view in views:
            t = RunQueriesThread(view, verify_results)
            query_threads.append(t)
            t.start()

        [t.join() for t in query_threads]

        self._check_view_intergrity(query_threads)

    ##
    # If an error occured loading or querying data for a view
    # it is queued and checked here. Fail on the first one that
    # occurs.
    ##
    def _check_view_intergrity(self, thread_results):
        for result in thread_results:
            if result.test_results.errors:
                self.fail(result.test_results.errors[0][1])
            if result.test_results.failures:
                self.fail(result.test_results.failures[0][1])

    ###
    # Rebalance
    ###
    def _rebalance_cluster(self, data_set):
        if self.helper.num_nodes_to_add >= 1:
            rebalance = self.cluster.async_rebalance(self.servers[:1],
                self.servers[1:self.helper.num_nodes_to_add + 1],
                [])
            self._query_test_init(data_set)
            rebalance.result()

        elif self.helper.num_nodes_to_remove >= 1:
            rebalance = self.cluster.async_rebalance(self.servers[:1],[],
                self.servers[1:self.helper.num_nodes_to_add + 1])
            self._query_test_init(data_set)
            rebalance.result()

    def _failover_cluster(self, data_set):
        failover_nodes = self.servers[1 : self.helper.failover_factor + 1]
        try:
            # failover and verify loaded data
            #self.cluster.failover(self.servers, failover_nodes)
            self.cluster.failover(self.servers, self.servers[1:2])
            self.log.info("120 seconds sleep after failover before invoking rebalance...")
            time.sleep(120)
            rebalance = self.cluster.async_rebalance(self.servers,
                [], self.servers[1:2])

            self._query_test_init(data_set)

            msg = "rebalance failed while removing failover nodes {0}".format(failover_nodes)
            self.assertTrue(rebalance.result(), msg=msg)

            #verify queries after failover
            self._query_test_init(data_set)
        finally:
            self.log.info("Completed the failover testing for spatial querying")

    ###
    # Warmup
    ###
    def _warmup_cluster(self, data_set):
        for server in self.servers[0:self.helper.num_nodes_warmup]:
            remote = RemoteMachineShellConnection(server)
            remote.stop_server()
            remote.start_server()
            remote.disconnect()
            self.log.info("Node {0} should be warming up ".format(server.ip))
            time.sleep(120)
        self._query_test_init(data_set)

    # REBOOT
    def _reboot_cluster(self, data_set):
        try:
            for server in self.servers[0:self.helper.num_nodes_reboot]:
                shell = RemoteMachineShellConnection(server)
                if shell.extract_remote_info().type.lower() == 'windows':
                    o, r = shell.execute_command("shutdown -r -f -t 0")
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} is being stopped".format(server.ip))
                elif shell.extract_remote_info().type.lower() == 'linux':
                    o, r = shell.execute_command("reboot")
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} is being stopped".format(server.ip))

                    time.sleep(120)
                    shell = RemoteMachineShellConnection(server)
                    command = "/sbin/iptables -F"
                    o, r = shell.execute_command(command)
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} backup".format(server.ip))
        finally:
            self.log.info("Warming-up server ..".format(server.ip))
            time.sleep(100)
Ejemplo n.º 7
0
class SpatialQueryTests(unittest.TestCase):
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.helper = SpatialHelper(self, "default")
        self.helper.setup_cluster()
        self.cluster = Cluster()
        self.servers = self.helper.servers

    def tearDown(self):
        self.helper.cleanup_cluster()

    def test_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init(data_set)

    def test_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make skip (and limit) queries on a "
                      "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init(data_set)

    def test_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make bounding box queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._query_test_init(data_set)

    def test_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make range queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init(data_set)

    def test_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make limit queries on a multidimensional "
                      "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init(data_set)

    def test_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Make skip (and limit) queries on a "
            "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init(data_set)

    def test_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init(data_set)

    def test_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Make range queries with limits on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._query_test_init(data_set)

## Rebalance In

    def test_rebalance_in_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance In and limit queries on a simple "
            "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance In and skip (and limit) queries on a "
            "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance In and bounding box queries on a simple "
            "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance In and range queries on a simple "
            "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance In and limit queries on a multidimensional "
            "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance In and skip (and limit) queries on a "
            "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance In and range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_in_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance In and range queries with limits on a "
            "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._rebalance_cluster(data_set)

#Rebalance Out

    def test_rebalance_out_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance Out and  limit queries on a simple "
            "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance Out and skip (and limit) queries on a "
            "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance Out and bounding box queries on a simple "
            "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance Out and range queries on a simple "
            "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance Out and limit queries on a multidimensional "
            "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance Out and skip (and limit) queries on a "
            "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._rebalance_cluster(data_set)

    def test_rebalance_out_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance Out and range queries with limits on a "
            "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._rebalance_cluster(data_set)

# Warmup Tests

    def test_warmup_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Warmup with skip and limit queries on a simple "
            "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Warmup with  skip (and limit) queries on a "
            "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Warmup with  bounding box queries on a simple "
            "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with  range queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Warmup with  limit queries on a multidimensional "
            "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Warmup with  skip (and limit) queries on a "
            "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Warmup with  range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init_integration(data_set)

    def test_warmup_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Warmup with  range queries with limits on a "
            "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._query_test_init_integration(data_set)

# Reboot Tests

    def test_reboot_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot and limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Reboot with  skip (and limit) queries on a "
            "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Reboot with  bounding box queries on a simple "
            "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot with  range queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Reboot with  limit queries on a multidimensional "
            "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Reboot with  skip (and limit) queries on a "
            "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Reboot with  range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._query_test_init_integration(data_set)

    def test_reboot_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Reboot with  range queries with limits on a "
            "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._query_test_init_integration(data_set)


# Failover Tests

    def test_failover_simple_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Failover and limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._failover_cluster(data_set)

    def test_failover_simple_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance Out and skip (and limit) queries on a "
            "simple dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._failover_cluster(data_set)

    def test_failover_simple_dataset_bbox_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance Out and bounding box queries on a simple "
            "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_bbox_queries()
        self._failover_cluster(data_set)

    def test_failover_simple_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance Out and range queries on a simple "
            "dataset with {0} docs".format(num_docs))

        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._failover_cluster(data_set)

    def test_failover_multidim_dataset_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance Out and limit queries on a multidimensional "
            "dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._failover_cluster(data_set)

    def test_failover_multidim_dataset_skip_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance Out and skip (and limit) queries on a "
            "multidimensional dataset with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_skip_queries()
        self._failover_cluster(data_set)

    def test_failover_multidim_dataset_range_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info("description : Rebalance Out and range queries on a "
                      "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_queries()
        self._failover_cluster(data_set)

    def test_failover_multidim_dataset_range_and_limit_queries(self):
        num_docs = self.helper.input.param("num-docs")
        self.log.info(
            "description : Rebalance Out and range queries with limits on a "
            "multidimensional with {0} docs".format(num_docs))

        data_set = MultidimDataSet(self.helper, num_docs)
        data_set.add_range_and_limit_queries()
        self._failover_cluster(data_set)

    ###
    # load the data defined for this dataset.
    # create views and query the data as it loads.
    # verification is optional, and best practice is to
    # set to False if you plan on running _query_all_views()
    # later in the test case
    ###
    def _query_test_init(self, data_set, verify_results=True):
        views = data_set.views

        # start loading data
        t = Thread(target=data_set.load, name="load_data_set", args=())
        t.start()

        # run queries while loading data
        while (t.is_alive()):
            self._query_all_views(views, False)
            time.sleep(5)
        t.join()

        # results will be verified if verify_results set
        if verify_results:
            self._query_all_views(views, verify_results)
        else:
            self._check_view_intergrity(views)

    def _query_test_init_integration(self, data_set, verify_results=True):
        views = data_set.views
        inserted_keys = data_set.load()
        target_fn = ()

        if self.helper.num_nodes_reboot >= 1:
            target_fn = self._reboot_cluster(data_set)
        elif self.helper.num_nodes_warmup >= 1:
            target_fn = self._warmup_cluster(data_set)
        elif self.helper.num_nodes_to_add >= 1 or self.helper.num_nodes_to_remove >= 1:
            target_fn = self._rebalance_cluster(data_set)

        t = Thread(target=self._query_all_views(views, False))
        t.start()
        # run queries while loading data
        while t.is_alive():
            self._rebalance_cluster(data_set)
            time.sleep(5)
        t.join()

        # results will be verified if verify_results set
        if verify_results:
            self._query_all_views(views, verify_results)
        else:
            self._check_view_intergrity(views)

    ##
    # run all queries for all views in parallel
    ##
    def _query_all_views(self, views, verify_results=True):
        query_threads = []
        for view in views:
            t = RunQueriesThread(view, verify_results)
            query_threads.append(t)
            t.start()

        [t.join() for t in query_threads]

        self._check_view_intergrity(query_threads)

    ##
    # If an error occured loading or querying data for a view
    # it is queued and checked here. Fail on the first one that
    # occurs.
    ##
    def _check_view_intergrity(self, thread_results):
        for result in thread_results:
            if result.test_results.errors:
                self.fail(result.test_results.errors[0][1])
            if result.test_results.failures:
                self.fail(result.test_results.failures[0][1])

    ###
    # Rebalance
    ###
    def _rebalance_cluster(self, data_set):
        if self.helper.num_nodes_to_add >= 1:
            rebalance = self.cluster.async_rebalance(
                self.servers[:1],
                self.servers[1:self.helper.num_nodes_to_add + 1], [])
            self._query_test_init(data_set)
            rebalance.result()

        elif self.helper.num_nodes_to_remove >= 1:
            rebalance = self.cluster.async_rebalance(
                self.servers[:1], [],
                self.servers[1:self.helper.num_nodes_to_add + 1])
            self._query_test_init(data_set)
            rebalance.result()

    def _failover_cluster(self, data_set):
        failover_nodes = self.servers[1:self.helper.failover_factor + 1]
        try:
            # failover and verify loaded data
            #self.cluster.failover(self.servers, failover_nodes)
            self.cluster.failover(self.servers, self.servers[1:2])
            self.log.info(
                "120 seconds sleep after failover before invoking rebalance..."
            )
            time.sleep(120)
            rebalance = self.cluster.async_rebalance(self.servers, [],
                                                     self.servers[1:2])

            self._query_test_init(data_set)

            msg = "rebalance failed while removing failover nodes {0}".format(
                failover_nodes)
            self.assertTrue(rebalance.result(), msg=msg)

            #verify queries after failover
            self._query_test_init(data_set)
        finally:
            self.log.info(
                "Completed the failover testing for spatial querying")

    ###
    # Warmup
    ###
    def _warmup_cluster(self, data_set):
        for server in self.servers[0:self.helper.num_nodes_warmup]:
            remote = RemoteMachineShellConnection(server)
            remote.stop_server()
            remote.start_server()
            remote.disconnect()
            self.log.info("Node {0} should be warming up ".format(server.ip))
            time.sleep(120)
        self._query_test_init(data_set)

    # REBOOT
    def _reboot_cluster(self, data_set):
        try:
            for server in self.servers[0:self.helper.num_nodes_reboot]:
                shell = RemoteMachineShellConnection(server)
                if shell.extract_remote_info().type.lower() == 'windows':
                    o, r = shell.execute_command("shutdown -r -f -t 0")
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} is being stopped".format(
                        server.ip))
                elif shell.extract_remote_info().type.lower() == 'linux':
                    o, r = shell.execute_command("reboot")
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} is being stopped".format(
                        server.ip))

                    time.sleep(120)
                    shell = RemoteMachineShellConnection(server)
                    command = "/sbin/iptables -F"
                    o, r = shell.execute_command(command)
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} backup".format(server.ip))
        finally:
            self.log.info("Warming-up server ..".format(server.ip))
            time.sleep(100)
Ejemplo n.º 8
0
class nwusage(XDCRNewBaseTest):
    def setUp(self):
        super(nwusage, self).setUp()
        self.src_cluster = self.get_cb_cluster_by_name('C1')
        self.src_master = self.src_cluster.get_master_node()
        self.dest_cluster = self.get_cb_cluster_by_name('C2')
        self.dest_master = self.dest_cluster.get_master_node()
        self.cluster = Cluster()

    def tearDown(self):
        super(nwusage, self).tearDown()

    def _set_nwusage_limit(self, cluster, nw_limit=0):
        repl_id = cluster.get_remote_clusters()[0].get_replications()[0].get_repl_id()
        shell = RemoteMachineShellConnection(cluster.get_master_node())
        repl_id = str(repl_id).replace('/','%2F')
        base_url = "http://" + cluster.get_master_node().ip + ":8091/settings/replications/" + repl_id
        command = "curl -X POST -u Administrator:password " + base_url + " -d networkUsageLimit=" + str(nw_limit)
        output, error = shell.execute_command(command)
        shell.log_command_output(output, error)

    def _extract_timestamp(self, logmsg):
        #matches timestamp format : 2018-10-11T00:02:35
        timestamp_str = re.search(r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}', logmsg)
        timestamp = datetime.datetime.strptime(timestamp_str.group(), '%Y-%m-%dT%H:%M:%S')
        return timestamp

    def _verify_bandwidth_usage(self, node, nw_limit=1, no_of_nodes=2, event_time=None,
                                nw_usage="[1-9][0-9]*", end_time=None):
        goxdcr_log = NodeHelper.get_goxdcr_log_dir(node) + '/goxdcr.log'
        nw_max = (nw_limit * 1024 * 1024)/no_of_nodes

        if event_time:
            time_to_compare = datetime.datetime.strptime(event_time.group(), '%Y-%m-%dT%H:%M:%S')
        else:
            matches, _ = NodeHelper.check_goxdcr_log(node, "Success adding replication specification",
                                                 goxdcr_log, print_matches=True)
        #Time when replication was set up
        time_to_compare = self._extract_timestamp(matches[-1])

        matches, count = NodeHelper.check_goxdcr_log(node, "\\\"bandwidth_usage\\\": " + nw_usage, goxdcr_log, print_matches=True)
        if count < 1:
            self.fail("Bandwidth usage information not found in logs")

        match_count = 0
        skip_count = 0
        for item in matches:
            item_datetime = self._extract_timestamp(item)
            #Ignore entries that happened before the replication was set up
            if item_datetime < time_to_compare:
                skip_count += 1
                continue
            if end_time:
                end_datetime = datetime.datetime.strptime(end_time.group(), '%Y-%m-%dT%H:%M:%S')
                if item_datetime > end_datetime:
                    skip_count += 1
                    continue
            bandwidth_usage = ((item.split('{"bandwidth_usage": ')[1]).split(' ')[0]).rstrip(',')
            if int(float(bandwidth_usage)) < nw_max:
                match_count += 1
                continue
            else:
                self.fail("Bandwidth usage {0} is higher than Bandwidth limit {1} in {2}".format(bandwidth_usage,nw_max,item))

        if match_count + skip_count == count:
            self.log.info("{0} stale entries skipped".format(skip_count))
            if match_count > 0:
                self.log.info("{0} entries checked - Bandwidth usage always lower than Bandwidth limit as expected".
                          format(match_count))
            else:
                if self._input.param("replication_type") == "capi":
                    self.log.info("Bandwidth Throttler not enabled on replication as expected")
                else:
                    self.fail("Bandwidth Throttler not enabled on replication")

    def _get_current_time(self, server):
        shell = RemoteMachineShellConnection(server)
        command = "date +'%Y-%m-%dT%H:%M:%S'"
        output, error = shell.execute_command(command)
        shell.log_command_output(output, error)
        curr_time = output[0].strip()
        return curr_time

    def test_nwusage_with_unidirection(self):
        self.setup_xdcr()
        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.perform_update_delete()
        self._wait_for_replication_to_catchup(timeout=60)

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit)

    def test_nwusage_with_bidirection(self):
        self.setup_xdcr()
        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)
        self._set_nwusage_limit(self.dest_cluster, nw_limit)

        gen_create1 = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create1)
        gen_create2 = BlobGenerator('nwTwo', 'nwTwo', self._value_size, end=self._num_items)
        self.dest_cluster.load_all_buckets_from_generator(kv_gen=gen_create2)

        self.perform_update_delete()
        self._wait_for_replication_to_catchup(timeout=60)

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit)
        self._verify_bandwidth_usage(node=self.dest_cluster.get_master_node(), nw_limit=nw_limit)

    def test_nwusage_with_unidirection_pause_resume(self):
        self.setup_xdcr()

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.pause_all_replications()

        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)

        self.src_cluster.resume_all_replications()

        for task in tasks:
            task.result()

        self._wait_for_replication_to_catchup(timeout=60)

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit)

    def test_nwusage_with_bidirection_pause_resume(self):
        self.setup_xdcr()

        gen_create1 = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(kv_gen=gen_create1)
        gen_create2 = BlobGenerator('nwTwo', 'nwTwo', self._value_size, end=self._num_items)
        tasks.extend(self.dest_cluster.async_load_all_buckets_from_generator(kv_gen=gen_create2))

        self.src_cluster.pause_all_replications()
        self.dest_cluster.pause_all_replications()

        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)
        self._set_nwusage_limit(self.dest_cluster, nw_limit)

        self.src_cluster.resume_all_replications()
        self.dest_cluster.resume_all_replications()

        for task in tasks:
            task.result()

        self._wait_for_replication_to_catchup()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit)
        self._verify_bandwidth_usage(node=self.dest_cluster.get_master_node(), nw_limit=nw_limit)

    def test_nwusage_with_unidirection_in_parallel(self):
        self.setup_xdcr()

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(kv_gen=gen_create)

        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)

        for task in tasks:
            task.result()

        self._wait_for_replication_to_catchup()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit)

    def test_nwusage_with_bidirection_in_parallel(self):
        self.setup_xdcr()

        gen_create1 = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(kv_gen=gen_create1)
        gen_create2 = BlobGenerator('nwTwo', 'nwTwo', self._value_size, end=self._num_items)
        tasks.extend(self.dest_cluster.async_load_all_buckets_from_generator(kv_gen=gen_create2))

        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)
        self._set_nwusage_limit(self.dest_cluster, nw_limit)

        for task in tasks:
            task.result()

        self._wait_for_replication_to_catchup(timeout=60)

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit)
        self._verify_bandwidth_usage(node=self.dest_cluster.get_master_node(), nw_limit=nw_limit)

    def test_nwusage_with_rebalance_in(self):
        self.setup_xdcr()
        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.rebalance_in()
        self.perform_update_delete()
        self._wait_for_replication_to_catchup(timeout=60)

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit, no_of_nodes=3)

    def test_nwusage_with_rebalance_out(self):
        self.setup_xdcr()
        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.rebalance_out()
        self.perform_update_delete()
        self._wait_for_replication_to_catchup(timeout=60)

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit, no_of_nodes=1)

    def test_nwusage_reset_to_zero(self):
        self.setup_xdcr()
        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(kv_gen=gen_create)

        self._set_nwusage_limit(self.src_cluster, 0)
        event_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Network limit reset to 0 at {0}".format(event_time))

        for task in tasks:
            task.result()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit, end_time=event_time)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=0, no_of_nodes=2, event_time=event_time, nw_usage="0")

    def test_nwusage_with_hard_failover_and_bwthrottle_enabled(self):
        self.setup_xdcr()
        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()
        self._wait_for_replication_to_catchup(timeout=60)

        self.src_cluster.failover_and_rebalance_nodes()
        failover_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Node failed over at {0}".format(failover_time))
        self._wait_for_replication_to_catchup(timeout=60)

        self.src_cluster.rebalance_in()
        node_back_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Node added back at {0}".format(node_back_time))
        self._wait_for_replication_to_catchup(timeout=60)

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), end_time=failover_time)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=failover_time, end_time=node_back_time, no_of_nodes=1)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=node_back_time)

    def test_nwusage_with_hard_failover_and_bwthrottle_enabled_later(self):
        self.setup_xdcr()

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()
        self._wait_for_replication_to_catchup(timeout=60)
        self.src_cluster.failover_and_rebalance_nodes()


        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)
        bw_enable_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Bandwidth throttler enabled at {0}".format(bw_enable_time))


        self.src_cluster.rebalance_in()
        node_back_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Node added back at {0}".format(node_back_time))

        self._wait_for_replication_to_catchup(timeout=600)

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=bw_enable_time, end_time=node_back_time, no_of_nodes=1)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=node_back_time)

    def test_nwusage_with_auto_failover_and_bwthrottle_enabled(self):
        self.setup_xdcr()

        self.src_cluster.rebalance_in()

        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)

        src_conn = RestConnection(self.src_cluster.get_master_node())
        src_conn.update_autofailover_settings(enabled=True, timeout=30)

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self._wait_for_replication_to_catchup(timeout=60)

        shell = RemoteMachineShellConnection(self._input.servers[1])
        shell.stop_couchbase()

        task = self.cluster.async_rebalance(self.src_cluster.get_nodes(), [], [])
        task.result()
        failover_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Node auto failed over at {0}".format(failover_time))
        FloatingServers._serverlist.append(self._input.servers[1])
        shell.start_couchbase()
        shell.disable_firewall()
        self.wait_service_started(self._input.servers[1])
        self.src_cluster.rebalance_in()
        node_back_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Node added back at {0}".format(node_back_time))

        self._wait_for_replication_to_catchup(timeout=600)

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), end_time=failover_time, no_of_nodes=3)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=failover_time, end_time=node_back_time, no_of_nodes=2)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=node_back_time, no_of_nodes=3)

    def test_nwusage_with_auto_failover_and_bwthrottle_enabled_later(self):
        self.setup_xdcr()

        self.src_cluster.rebalance_in()

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()
        self._wait_for_replication_to_catchup(timeout=60)

        shell = RemoteMachineShellConnection(self._input.servers[1])
        shell.stop_couchbase()
        task = self.cluster.async_rebalance(self.src_cluster.get_nodes(), [], [])
        task.result()
        FloatingServers._serverlist.append(self._input.servers[1])
        self._wait_for_replication_to_catchup(timeout=60)

        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)
        bw_enable_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Bandwidth throttler enabled at {0}".format(bw_enable_time))

        shell.start_couchbase()
        shell.disable_firewall()
        self.wait_service_started(self._input.servers[1])
        self.src_cluster.rebalance_in()
        node_back_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Node added back at {0}".format(node_back_time))

        self._wait_for_replication_to_catchup(timeout=600)

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=bw_enable_time, end_time=node_back_time, no_of_nodes=2)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=node_back_time, no_of_nodes=3)
Ejemplo n.º 9
0
class nwusage(XDCRNewBaseTest):
    def setUp(self):
        super(nwusage, self).setUp()
        self.src_cluster = self.get_cb_cluster_by_name('C1')
        self.src_master = self.src_cluster.get_master_node()
        self.dest_cluster = self.get_cb_cluster_by_name('C2')
        self.dest_master = self.dest_cluster.get_master_node()
        self.cluster = Cluster()

    def tearDown(self):
        super(nwusage, self).tearDown()

    def _verify_bandwidth_usage(self,
                                node,
                                nw_limit=1,
                                no_of_nodes=2,
                                event_time=None,
                                nw_usage="[1-9][0-9]*",
                                end_time=None):
        goxdcr_log = NodeHelper.get_goxdcr_log_dir(node) + '/goxdcr.log'
        nw_max = (nw_limit * 1024 * 1024) / no_of_nodes

        if event_time:
            time_to_compare = time.strptime(event_time, '%Y-%m-%dT%H:%M:%S')
        else:
            matches, _ = NodeHelper.check_goxdcr_log(
                node,
                "Success adding replication specification",
                goxdcr_log,
                print_matches=True)
            time_to_compare_str = matches[-1].split(' ')[0].split('.')[0]
            time_to_compare = time.strptime(time_to_compare_str,
                                            '%Y-%m-%dT%H:%M:%S')

        matches, count = NodeHelper.check_goxdcr_log(
            node,
            "bandwidth_limit=" + str(nw_max) + ", bandwidth_usage=" + nw_usage,
            goxdcr_log,
            print_matches=True)
        match_count = 0
        skip_count = 0
        for item in matches:
            items = item.split(' ')
            item_time = items[0].split('.')[0]
            item_datetime = time.strptime(item_time, '%Y-%m-%dT%H:%M:%S')
            if item_datetime < time_to_compare:
                skip_count += 1
                continue
            if end_time:
                end_datetime = time.strptime(end_time, '%Y-%m-%dT%H:%M:%S')
                if item_datetime > end_datetime:
                    skip_count += 1
                    continue
            bandwidth_usage = items[-1].split('=')[-1]
            if int(bandwidth_usage) <= nw_max:
                match_count += 1
                continue
            else:
                self.fail("Bandwidth usage higher than Bandwidth limit in {0}".
                          format(item))

        if match_count + skip_count == count:
            self.log.info("{0} stale entries skipped".format(skip_count))
            if match_count > 0:
                self.log.info(
                    "{0} entries checked - Bandwidth usage always lower than Bandwidth limit as expected"
                    .format(match_count))
            else:
                if self._input.param("replication_type") == "capi":
                    self.log.info(
                        "Bandwidth Throttler not enabled on replication as expected"
                    )
                else:
                    self.fail("Bandwidth Throttler not enabled on replication")

    def test_nwusage_with_unidirection(self):
        self.setup_xdcr()
        self.sleep(60)
        nw_limit = self._input.param("nw_limit", 1)
        self.src_cluster.set_xdcr_param("networkUsageLimit", nw_limit)

        gen_create = BlobGenerator('nwOne',
                                   'nwOne',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.perform_update_delete()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     nw_limit=nw_limit)

    def test_nwusage_with_bidirection(self):
        self.setup_xdcr()
        self.sleep(60)
        nw_limit = self._input.param("nw_limit", 1)
        self.src_cluster.set_xdcr_param("networkUsageLimit", nw_limit)
        self.dest_cluster.set_xdcr_param("networkUsageLimit", nw_limit)

        gen_create1 = BlobGenerator('nwOne',
                                    'nwOne',
                                    self._value_size,
                                    end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create1)
        gen_create2 = BlobGenerator('nwTwo',
                                    'nwTwo',
                                    self._value_size,
                                    end=self._num_items)
        self.dest_cluster.load_all_buckets_from_generator(kv_gen=gen_create2)

        self.perform_update_delete()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     nw_limit=nw_limit)
        self._verify_bandwidth_usage(node=self.dest_cluster.get_master_node(),
                                     nw_limit=nw_limit)

    def test_nwusage_with_unidirection_pause_resume(self):
        self.setup_xdcr()

        gen_create = BlobGenerator('nwOne',
                                   'nwOne',
                                   self._value_size,
                                   end=self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(
            kv_gen=gen_create)

        self.src_cluster.pause_all_replications()

        nw_limit = self._input.param("nw_limit", 1)
        self.src_cluster.set_xdcr_param("networkUsageLimit", nw_limit)

        self.src_cluster.resume_all_replications()

        for task in tasks:
            task.result()

        self._wait_for_replication_to_catchup()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     nw_limit=nw_limit)

    def test_nwusage_with_bidirection_pause_resume(self):
        self.setup_xdcr()

        gen_create1 = BlobGenerator('nwOne',
                                    'nwOne',
                                    self._value_size,
                                    end=self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(
            kv_gen=gen_create1)
        gen_create2 = BlobGenerator('nwTwo',
                                    'nwTwo',
                                    self._value_size,
                                    end=self._num_items)
        tasks.extend(
            self.dest_cluster.async_load_all_buckets_from_generator(
                kv_gen=gen_create2))

        self.src_cluster.pause_all_replications()
        self.dest_cluster.pause_all_replications()

        nw_limit = self._input.param("nw_limit", 1)
        self.src_cluster.set_xdcr_param("networkUsageLimit", nw_limit)
        self.dest_cluster.set_xdcr_param("networkUsageLimit", nw_limit)

        self.src_cluster.resume_all_replications()
        self.dest_cluster.resume_all_replications()

        for task in tasks:
            task.result()

        self._wait_for_replication_to_catchup()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     nw_limit=nw_limit)
        self._verify_bandwidth_usage(node=self.dest_cluster.get_master_node(),
                                     nw_limit=nw_limit)

    def test_nwusage_with_unidirection_in_parallel(self):
        self.setup_xdcr()

        gen_create = BlobGenerator('nwOne',
                                   'nwOne',
                                   self._value_size,
                                   end=self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(
            kv_gen=gen_create)

        nw_limit = self._input.param("nw_limit", 1)
        self.src_cluster.set_xdcr_param("networkUsageLimit", nw_limit)

        for task in tasks:
            task.result()

        self._wait_for_replication_to_catchup()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     nw_limit=nw_limit)

    def test_nwusage_with_bidirection_in_parallel(self):
        self.setup_xdcr()

        gen_create1 = BlobGenerator('nwOne',
                                    'nwOne',
                                    self._value_size,
                                    end=self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(
            kv_gen=gen_create1)
        gen_create2 = BlobGenerator('nwTwo',
                                    'nwTwo',
                                    self._value_size,
                                    end=self._num_items)
        tasks.extend(
            self.dest_cluster.async_load_all_buckets_from_generator(
                kv_gen=gen_create2))

        nw_limit = self._input.param("nw_limit", 1)
        self.src_cluster.set_xdcr_param("networkUsageLimit", nw_limit)
        self.dest_cluster.set_xdcr_param("networkUsageLimit", nw_limit)

        for task in tasks:
            task.result()

        self._wait_for_replication_to_catchup()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     nw_limit=nw_limit)
        self._verify_bandwidth_usage(node=self.dest_cluster.get_master_node(),
                                     nw_limit=nw_limit)

    def test_nwusage_with_rebalance_in(self):
        self.setup_xdcr()
        self.sleep(60)
        nw_limit = self._input.param("nw_limit", 1)
        self.src_cluster.set_xdcr_param("networkUsageLimit", nw_limit)

        gen_create = BlobGenerator('nwOne',
                                   'nwOne',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.rebalance_in()

        self.perform_update_delete()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     nw_limit=nw_limit,
                                     no_of_nodes=3)

    def test_nwusage_with_rebalance_out(self):
        self.setup_xdcr()
        self.sleep(60)
        nw_limit = self._input.param("nw_limit", 1)
        self.src_cluster.set_xdcr_param("networkUsageLimit", nw_limit)

        gen_create = BlobGenerator('nwOne',
                                   'nwOne',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.rebalance_out()

        self.perform_update_delete()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     nw_limit=nw_limit,
                                     no_of_nodes=1)

    def test_nwusage_reset_to_zero(self):
        self.setup_xdcr()
        self.sleep(60)
        nw_limit = self._input.param("nw_limit", 1)
        self.src_cluster.set_xdcr_param("networkUsageLimit", nw_limit)

        gen_create = BlobGenerator('nwOne',
                                   'nwOne',
                                   self._value_size,
                                   end=self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(
            kv_gen=gen_create)

        self.sleep(30)
        self.src_cluster.set_xdcr_param("networkUsageLimit", 0)
        event_time = time.strftime('%Y-%m-%dT%H:%M:%S')
        self.log.info("Network limit reset to 0 at {0}".format(event_time))

        for task in tasks:
            task.result()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     nw_limit=nw_limit,
                                     end_time=event_time)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     nw_limit=0,
                                     no_of_nodes=2,
                                     event_time=event_time,
                                     nw_usage="0")

    def test_nwusage_with_hard_failover_and_bwthrottle_enabled(self):
        self.setup_xdcr()
        self.sleep(60)
        nw_limit = self._input.param("nw_limit", 1)
        self.src_cluster.set_xdcr_param("networkUsageLimit", nw_limit)

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('nwOne',
                                   'nwOne',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.sleep(15)

        self.src_cluster.failover_and_rebalance_nodes()
        failover_time = time.strftime('%Y-%m-%dT%H:%M:%S')
        self.log.info("Node failed over at {0}".format(failover_time))

        self.sleep(15)

        self.src_cluster.rebalance_in()
        node_back_time = time.strftime('%Y-%m-%dT%H:%M:%S')
        self.log.info("Node added back at {0}".format(node_back_time))

        self._wait_for_replication_to_catchup()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     end_time=failover_time)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     event_time=failover_time,
                                     end_time=node_back_time,
                                     no_of_nodes=1)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     event_time=node_back_time)

    def test_nwusage_with_hard_failover_and_bwthrottle_enabled_later(self):
        self.setup_xdcr()

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('nwOne',
                                   'nwOne',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.sleep(15)

        self.src_cluster.failover_and_rebalance_nodes()

        self.sleep(15)

        nw_limit = self._input.param("nw_limit", 1)
        self.src_cluster.set_xdcr_param("networkUsageLimit", nw_limit)
        bw_enable_time = time.strftime('%Y-%m-%dT%H:%M:%S')
        self.log.info(
            "Bandwidth throttler enabled at {0}".format(bw_enable_time))

        self.sleep(60)

        self.src_cluster.rebalance_in()
        node_back_time = time.strftime('%Y-%m-%dT%H:%M:%S')
        self.log.info("Node added back at {0}".format(node_back_time))

        self._wait_for_replication_to_catchup(timeout=600)

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     event_time=bw_enable_time,
                                     end_time=node_back_time,
                                     no_of_nodes=1)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     event_time=node_back_time)

    def test_nwusage_with_auto_failover_and_bwthrottle_enabled(self):
        self.setup_xdcr()

        self.src_cluster.rebalance_in()

        nw_limit = self._input.param("nw_limit", 1)
        self.src_cluster.set_xdcr_param("networkUsageLimit", nw_limit)

        src_conn = RestConnection(self.src_cluster.get_master_node())
        src_conn.update_autofailover_settings(enabled=True, timeout=30)

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('nwOne',
                                   'nwOne',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.sleep(15)

        shell = RemoteMachineShellConnection(self._input.servers[1])
        shell.stop_couchbase()
        self.sleep(30)
        task = self.cluster.async_rebalance(self.src_cluster.get_nodes(), [],
                                            [])
        task.result()
        failover_time = time.strftime('%Y-%m-%dT%H:%M:%S')
        self.log.info("Node auto failed over at {0}".format(failover_time))
        FloatingServers._serverlist.append(self._input.servers[1])

        self.sleep(15)

        shell.start_couchbase()
        shell.disable_firewall()
        self.sleep(45)
        self.src_cluster.rebalance_in()
        node_back_time = time.strftime('%Y-%m-%dT%H:%M:%S')
        self.log.info("Node added back at {0}".format(node_back_time))

        self._wait_for_replication_to_catchup(timeout=600)

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     end_time=failover_time,
                                     no_of_nodes=3)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     event_time=failover_time,
                                     end_time=node_back_time,
                                     no_of_nodes=2)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     event_time=node_back_time,
                                     no_of_nodes=3)

    def test_nwusage_with_auto_failover_and_bwthrottle_enabled_later(self):
        self.setup_xdcr()

        self.src_cluster.rebalance_in()

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('nwOne',
                                   'nwOne',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.sleep(15)

        shell = RemoteMachineShellConnection(self._input.servers[1])
        shell.stop_couchbase()
        self.sleep(45)
        task = self.cluster.async_rebalance(self.src_cluster.get_nodes(), [],
                                            [])
        task.result()
        FloatingServers._serverlist.append(self._input.servers[1])

        self.sleep(15)

        nw_limit = self._input.param("nw_limit", 1)
        self.src_cluster.set_xdcr_param("networkUsageLimit", nw_limit)
        bw_enable_time = time.strftime('%Y-%m-%dT%H:%M:%S')
        self.log.info(
            "Bandwidth throttler enabled at {0}".format(bw_enable_time))

        self.sleep(60)

        shell.start_couchbase()
        shell.disable_firewall()
        self.sleep(30)
        self.src_cluster.rebalance_in()
        node_back_time = time.strftime('%Y-%m-%dT%H:%M:%S')
        self.log.info("Node added back at {0}".format(node_back_time))

        self._wait_for_replication_to_catchup(timeout=600)

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     event_time=bw_enable_time,
                                     end_time=node_back_time,
                                     no_of_nodes=2)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     event_time=node_back_time,
                                     no_of_nodes=3)
Ejemplo n.º 10
0
class nwusage(XDCRNewBaseTest):
    def setUp(self):
        super(nwusage, self).setUp()
        self.src_cluster = self.get_cb_cluster_by_name('C1')
        self.src_master = self.src_cluster.get_master_node()
        self.dest_cluster = self.get_cb_cluster_by_name('C2')
        self.dest_master = self.dest_cluster.get_master_node()
        self.cluster = Cluster()

    def tearDown(self):
        super(nwusage, self).tearDown()

    def _set_nwusage_limit(self, cluster, nw_limit=0):
        repl_id = cluster.get_remote_clusters()[0].get_replications()[0].get_repl_id()
        shell = RemoteMachineShellConnection(cluster.get_master_node())
        repl_id = str(repl_id).replace('/','%2F')
        base_url = "http://" + cluster.get_master_node().ip + ":8091/settings/replications/" + repl_id
        command = "curl -X POST -u Administrator:password " + base_url + " -d networkUsageLimit=" + str(nw_limit)
        output, error = shell.execute_command(command)
        shell.log_command_output(output, error)

    def _verify_bandwidth_usage(self, node, nw_limit=1, no_of_nodes=2, event_time=None,
                                nw_usage="[1-9][0-9]*", end_time=None):
        goxdcr_log = NodeHelper.get_goxdcr_log_dir(node) + '/goxdcr.log'
        nw_max = (nw_limit * 1024 * 1024)/no_of_nodes

        if event_time:
            time_to_compare = time.strptime(event_time, '%Y-%m-%dT%H:%M:%S')
        else:
            matches, _ = NodeHelper.check_goxdcr_log(node, "Success adding replication specification",
                                                 goxdcr_log, print_matches=True)
            time_to_compare_str = matches[-1].split(' ')[0].split('.')[0]
            time_to_compare = time.strptime(time_to_compare_str, '%Y-%m-%dT%H:%M:%S')

        matches, count = NodeHelper.check_goxdcr_log(node, "bandwidth_limit=" + str(nw_max) +
                                            ", bandwidth_usage=" + nw_usage, goxdcr_log, print_matches=True)
        match_count = 0
        skip_count = 0
        for item in matches:
            items = item.split(' ')
            item_time = items[0].split('.')[0]
            item_datetime = time.strptime(item_time, '%Y-%m-%dT%H:%M:%S')
            if item_datetime < time_to_compare:
                skip_count += 1
                continue
            if end_time:
                end_datetime = time.strptime(end_time, '%Y-%m-%dT%H:%M:%S')
                if item_datetime > end_datetime:
                    skip_count += 1
                    continue
            bandwidth_usage = items[-1].split('=')[-1]
            if int(bandwidth_usage) <= nw_max:
                match_count += 1
                continue
            else:
                self.fail("Bandwidth usage higher than Bandwidth limit in {0}".format(item))

        if match_count + skip_count == count:
            self.log.info("{0} stale entries skipped".format(skip_count))
            if match_count > 0:
                self.log.info("{0} entries checked - Bandwidth usage always lower than Bandwidth limit as expected".
                          format(match_count))
            else:
                if self._input.param("replication_type") == "capi":
                    self.log.info("Bandwidth Throttler not enabled on replication as expected")
                else:
                    self.fail("Bandwidth Throttler not enabled on replication")

    def _get_current_time(self, server):
        shell = RemoteMachineShellConnection(server)
        command = "date +'%Y-%m-%dT%H:%M:%S'"
        output, error = shell.execute_command(command)
        shell.log_command_output(output, error)
        curr_time = output[0].strip()
        return curr_time

    def test_nwusage_with_unidirection(self):
        self.setup_xdcr()
        self.sleep(60)
        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.perform_update_delete()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit)

    def test_nwusage_with_bidirection(self):
        self.setup_xdcr()
        self.sleep(60)
        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)
        self._set_nwusage_limit(self.dest_cluster, nw_limit)

        gen_create1 = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create1)
        gen_create2 = BlobGenerator('nwTwo', 'nwTwo', self._value_size, end=self._num_items)
        self.dest_cluster.load_all_buckets_from_generator(kv_gen=gen_create2)

        self.perform_update_delete()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit)
        self._verify_bandwidth_usage(node=self.dest_cluster.get_master_node(), nw_limit=nw_limit)

    def test_nwusage_with_unidirection_pause_resume(self):
        self.setup_xdcr()

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.pause_all_replications()

        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)

        self.src_cluster.resume_all_replications()

        for task in tasks:
            task.result()

        self._wait_for_replication_to_catchup()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit)

    def test_nwusage_with_bidirection_pause_resume(self):
        self.setup_xdcr()

        gen_create1 = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(kv_gen=gen_create1)
        gen_create2 = BlobGenerator('nwTwo', 'nwTwo', self._value_size, end=self._num_items)
        tasks.extend(self.dest_cluster.async_load_all_buckets_from_generator(kv_gen=gen_create2))

        self.src_cluster.pause_all_replications()
        self.dest_cluster.pause_all_replications()

        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)
        self._set_nwusage_limit(self.dest_cluster, nw_limit)

        self.src_cluster.resume_all_replications()
        self.dest_cluster.resume_all_replications()

        for task in tasks:
            task.result()

        self._wait_for_replication_to_catchup()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit)
        self._verify_bandwidth_usage(node=self.dest_cluster.get_master_node(), nw_limit=nw_limit)

    def test_nwusage_with_unidirection_in_parallel(self):
        self.setup_xdcr()

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(kv_gen=gen_create)

        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)

        for task in tasks:
            task.result()

        self._wait_for_replication_to_catchup()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit)

    def test_nwusage_with_bidirection_in_parallel(self):
        self.setup_xdcr()

        gen_create1 = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(kv_gen=gen_create1)
        gen_create2 = BlobGenerator('nwTwo', 'nwTwo', self._value_size, end=self._num_items)
        tasks.extend(self.dest_cluster.async_load_all_buckets_from_generator(kv_gen=gen_create2))

        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)
        self._set_nwusage_limit(self.dest_cluster, nw_limit)

        for task in tasks:
            task.result()

        self._wait_for_replication_to_catchup()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit)
        self._verify_bandwidth_usage(node=self.dest_cluster.get_master_node(), nw_limit=nw_limit)

    def test_nwusage_with_rebalance_in(self):
        self.setup_xdcr()
        self.sleep(60)
        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.rebalance_in()

        self.perform_update_delete()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit, no_of_nodes=3)

    def test_nwusage_with_rebalance_out(self):
        self.setup_xdcr()
        self.sleep(60)
        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.rebalance_out()

        self.perform_update_delete()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit, no_of_nodes=1)

    def test_nwusage_reset_to_zero(self):
        self.setup_xdcr()
        self.sleep(60)
        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(kv_gen=gen_create)

        self.sleep(30)
        self._set_nwusage_limit(self.src_cluster, 0)
        event_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Network limit reset to 0 at {0}".format(event_time))

        for task in tasks:
            task.result()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit, end_time=event_time)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=0, no_of_nodes=2, event_time=event_time, nw_usage="0")

    def test_nwusage_with_hard_failover_and_bwthrottle_enabled(self):
        self.setup_xdcr()
        self.sleep(60)
        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.sleep(15)

        self.src_cluster.failover_and_rebalance_nodes()
        failover_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Node failed over at {0}".format(failover_time))

        self.sleep(15)

        self.src_cluster.rebalance_in()
        node_back_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Node added back at {0}".format(node_back_time))

        self._wait_for_replication_to_catchup()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), end_time=failover_time)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=failover_time, end_time=node_back_time, no_of_nodes=1)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=node_back_time)

    def test_nwusage_with_hard_failover_and_bwthrottle_enabled_later(self):
        self.setup_xdcr()

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.sleep(15)

        self.src_cluster.failover_and_rebalance_nodes()

        self.sleep(15)

        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)
        bw_enable_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Bandwidth throttler enabled at {0}".format(bw_enable_time))

        self.sleep(60)

        self.src_cluster.rebalance_in()
        node_back_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Node added back at {0}".format(node_back_time))

        self._wait_for_replication_to_catchup(timeout=600)

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=bw_enable_time, end_time=node_back_time, no_of_nodes=1)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=node_back_time)

    def test_nwusage_with_auto_failover_and_bwthrottle_enabled(self):
        self.setup_xdcr()

        self.src_cluster.rebalance_in()

        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)

        src_conn = RestConnection(self.src_cluster.get_master_node())
        src_conn.update_autofailover_settings(enabled=True, timeout=30)

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.sleep(15)

        shell = RemoteMachineShellConnection(self._input.servers[1])
        shell.stop_couchbase()
        self.sleep(30)
        task = self.cluster.async_rebalance(self.src_cluster.get_nodes(), [], [])
        task.result()
        failover_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Node auto failed over at {0}".format(failover_time))
        FloatingServers._serverlist.append(self._input.servers[1])

        self.sleep(15)

        shell.start_couchbase()
        shell.disable_firewall()
        self.sleep(45)
        self.src_cluster.rebalance_in()
        node_back_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Node added back at {0}".format(node_back_time))

        self._wait_for_replication_to_catchup(timeout=600)

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), end_time=failover_time, no_of_nodes=3)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=failover_time, end_time=node_back_time, no_of_nodes=2)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=node_back_time, no_of_nodes=3)

    def test_nwusage_with_auto_failover_and_bwthrottle_enabled_later(self):
        self.setup_xdcr()

        self.src_cluster.rebalance_in()

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.sleep(15)

        shell = RemoteMachineShellConnection(self._input.servers[1])
        shell.stop_couchbase()
        self.sleep(45)
        task = self.cluster.async_rebalance(self.src_cluster.get_nodes(), [], [])
        task.result()
        FloatingServers._serverlist.append(self._input.servers[1])

        self.sleep(15)

        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)
        bw_enable_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Bandwidth throttler enabled at {0}".format(bw_enable_time))

        self.sleep(60)

        shell.start_couchbase()
        shell.disable_firewall()
        self.sleep(30)
        self.src_cluster.rebalance_in()
        node_back_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Node added back at {0}".format(node_back_time))

        self._wait_for_replication_to_catchup(timeout=600)

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=bw_enable_time, end_time=node_back_time, no_of_nodes=2)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=node_back_time, no_of_nodes=3)