Example #1
0
class XDCRBaseTest(unittest.TestCase):
    def setUp(self):
        try:
            self._log = logger.Logger.get_logger()
            self._input = TestInputSingleton.input
            self._init_parameters()
            self._cluster_helper = Cluster()
            self._log.info("==============  XDCRbasetests setup was started for test #{0} {1}=============="\
                .format(self._case_number, self._testMethodName))
            if not self._input.param("skip_cleanup", False):
                self._cleanup_previous_setup()

            self._init_clusters(self._disabled_consistent_view)
            self.setup_extended()
            self._log.info("==============  XDCRbasetests setup was finished for test #{0} {1} =============="\
                .format(self._case_number, self._testMethodName))
            self._log_start(self)
        except Exception as e:
            self._log.error(e.message)
            self._log.error("Error while setting up clusters: %s",
                            sys.exc_info())
            self._cleanup_broken_setup()
            raise

    def tearDown(self):
        try:
            self._log.info("==============  XDCRbasetests cleanup was started for test #{0} {1} =============="\
                .format(self._case_number, self._testMethodName))
            self.teardown_extended()
            self._do_cleanup()
            self._log.info("==============  XDCRbasetests cleanup was finished for test #{0} {1} =============="\
                .format(self._case_number, self._testMethodName))
        finally:
            self._cluster_helper.shutdown()
            self._log_finish(self)

    def _cleanup_previous_setup(self):
        self.teardown_extended()
        self._do_cleanup()

    def _init_parameters(self):
        self._log.info("Initializing input parameters started...")
        self._clusters_dic = self._input.clusters  # clusters is declared as dic in TestInput which is unordered.
        self._clusters_keys_olst = range(
            len(self._clusters_dic)
        )  #clusters are populated in the dic in testrunner such that ordinal is the key.
        #orderedDic cannot be used in order to maintain the compability with python 2.6
        self._cluster_counter_temp_int = 0
        self._cluster_names_dic = self._get_cluster_names()
        self._servers = self._input.servers
        self._disabled_consistent_view = self._input.param(
            "disabled_consistent_view", True)
        self._floating_servers_set = self._get_floating_servers(
        )  # These are the servers defined in .ini file but not linked to any cluster.
        self._cluster_counter_temp_int = 0  #TODO: fix the testrunner code to pass cluster name in params.
        self._buckets = []

        self._default_bucket = self._input.param("default_bucket", True)
        """
        ENTER: sasl_buckets=[no.] or standard_buckets=[no.]
        """
        self._standard_buckets = self._input.param("standard_buckets", 0)
        self._sasl_buckets = self._input.param("sasl_buckets", 0)

        if self._default_bucket:
            self.default_bucket_name = "default"

        self._num_replicas = self._input.param("replicas", 1)
        self._num_items = self._input.param("items", 1000)
        self._value_size = self._input.param("value_size", 256)
        self._dgm_run_bool = self._input.param("dgm_run", False)
        self._mem_quota_int = 0  # will be set in subsequent methods

        self._poll_interval = self._input.param(
            XDCRConstants.INPUT_PARAM_POLL_INTERVAL, 5)
        self._poll_timeout = self._input.param(
            XDCRConstants.INPUT_PARAM_POLL_TIMEOUT, 120)

        self.init_parameters_extended()

        self._doc_ops = self._input.param("doc-ops", None)
        if self._doc_ops is not None:
            self._doc_ops = self._doc_ops.split("-")
        self._doc_ops_dest = self._input.param("doc-ops-dest", None)
        # semi-colon separator is not accepted for some reason here
        if self._doc_ops_dest is not None:
            self._doc_ops_dest = self._doc_ops_dest.split("-")

        self._case_number = self._input.param("case_number", 0)
        self._expires = self._input.param("expires", 0)
        self._timeout = self._input.param("timeout", 60)
        self._percent_update = self._input.param("upd", 30)
        self._percent_delete = self._input.param("del", 30)
        self._warmup = self._input.param("warm", None)
        self._failover = self._input.param("failover", None)
        self._rebalance = self._input.param("rebalance", None)
        if self._warmup is not None:
            self._warmup = self._warmup.split("-")
        if self._failover is not None:
            self._failover = self._failover.split("-")
        if self._rebalance is not None:
            self._rebalance = self._rebalance.split("-")
            self._num_rebalance = self._input.param("num_rebalance", 1)
        """
        CREATE's a set of items,
        UPDATE's UPD% of the items starting from 0,
        DELETE's DEL% of the items starting from the end (count(items)).
        """
        self.gen_create = BlobGenerator('loadOne',
                                        'loadOne',
                                        self._value_size,
                                        end=self._num_items)
        self.gen_delete = BlobGenerator(
            'loadOne',
            'loadOne-',
            self._value_size,
            start=int(
                (self._num_items) * (float)(100 - self._percent_delete) / 100),
            end=self._num_items)
        self.gen_update = BlobGenerator(
            'loadOne',
            'loadOne-',
            self._value_size,
            start=0,
            end=int(self._num_items * (float)(self._percent_update) / 100))

        self.ord_keys = self._clusters_keys_olst
        self.ord_keys_len = len(self.ord_keys)

        self.src_nodes = self._clusters_dic[0]
        self.src_master = self.src_nodes[0]

        self.dest_nodes = self._clusters_dic[1]
        self.dest_master = self.dest_nodes[0]

        self._defaul_map_func = "function (doc) {\n  emit(doc._id, doc);\n}"
        self._default_view_name = "default_view"
        self._default_view = View(self._default_view_name,
                                  self._defaul_map_func, None)
        self._num_views = self._input.param("num_views", 5)
        self._is_dev_ddoc = self._input.param("is_dev_ddoc", True)

        self.fragmentation_value = self._input.param("fragmentation_value", 80)
        self.disable_src_comp = self._input.param("disable_src_comp", True)
        self.disable_dest_comp = self._input.param("disable_dest_comp", True)

        self._log.info("Initializing input parameters completed.")

    @staticmethod
    def _log_start(self):
        try:
            msg = "{0} : {1} started ".format(datetime.datetime.now(),
                                              self._testMethodName)
            RestConnection(self.src_master[0]).log_client_error(msg)
            RestConnection(self.dest_master[0]).log_client_error(msg)
        except:
            pass

    @staticmethod
    def _log_finish(self):
        try:
            msg = "{0} : {1} finished ".format(datetime.datetime.now(),
                                               self._testMethodName)
            RestConnection(self.src_master[0]).log_client_error(msg)
            RestConnection(self.dest_master[0]).log_client_error(msg)
        except:
            pass

    def _get_floating_servers(self):
        cluster_nodes = []
        floating_servers = self._servers

        for key, node in self._clusters_dic.items():
            cluster_nodes.extend(node)

        for c_node in cluster_nodes:
            for node in floating_servers:
                if node.ip in str(c_node) and node.port in str(c_node):
                    floating_servers.remove(node)

        return floating_servers

    def _init_clusters(self, disabled_consistent_view=None):
        for key in self._clusters_keys_olst:
            self._setup_cluster(self._clusters_dic[key],
                                disabled_consistent_view)

    # This method shall be overridden in case there are parameters that need to be initialized.
    def init_parameters_extended(self):
        pass

    # This method shall be overridden in case there are custom steps involved during setup.
    def setup_extended(self):
        pass

    # This method shall be overridden in case there are custom steps involved during teardown.
    def teardown_extended(self):
        pass

    def _do_cleanup(self):
        for key in self._clusters_keys_olst:
            nodes = self._clusters_dic[key]
            BucketOperationHelper.delete_all_buckets_or_assert(nodes, self)
            ClusterOperationHelper.cleanup_cluster(nodes)
            ClusterOperationHelper.wait_for_ns_servers_or_assert(nodes, self)

    def _cleanup_broken_setup(self):
        try:
            self.tearDown()
        except:
            self._log.info("Error while cleaning broken setup.")

    def _get_cluster_names(self):
        cs_names = {}
        for key in self._clusters_keys_olst:
            cs_names[key] = "cluster{0}".format(self._cluster_counter_temp_int)
            self._cluster_counter_temp_int += 1
        return cs_names

    def _setup_cluster(self, nodes, disabled_consistent_view=None):
        self._init_nodes(nodes, disabled_consistent_view)
        self._config_cluster(nodes)
        self._create_buckets(nodes)

    def _init_nodes(self, nodes, disabled_consistent_view=None):
        _tasks = []
        for node in nodes:
            _tasks.append(
                self._cluster_helper.async_init_node(node,
                                                     disabled_consistent_view))
        for task in _tasks:
            mem_quota_node = task.result()
            if mem_quota_node < self._mem_quota_int or self._mem_quota_int == 0:
                self._mem_quota_int = mem_quota_node

    def _create_sasl_buckets(self, server, server_id, bucket_size):
        bucket_tasks = []
        for i in range(self._sasl_buckets):
            name = "sasl_bucket_" + str(i + 1)
            bucket_tasks.append(
                self._cluster_helper.async_create_sasl_bucket(
                    server, name, 'password', bucket_size, self._num_replicas))
            self._buckets.append(
                Bucket(name=name,
                       authType="sasl",
                       saslPassword="******",
                       num_replicas=self._num_replicas,
                       bucket_size=bucket_size,
                       master_id=server_id))

        for task in bucket_tasks:
            task.result()

    def _create_standard_buckets(self, server, server_id, bucket_size):
        bucket_tasks = []
        for i in range(self._standard_buckets):
            name = "standard_bucket_" + str(i + 1)
            bucket_tasks.append(
                self._cluster_helper.async_create_standard_bucket(
                    server, name, 11214 + i, bucket_size, self._num_replicas))
            self._buckets.append(
                Bucket(name=name,
                       authType=None,
                       saslPassword=None,
                       num_replicas=self._num_replicas,
                       bucket_size=bucket_size,
                       master_id=server_id))

        for task in bucket_tasks:
            task.result()

    def _create_buckets(self, nodes):
        if self._dgm_run_bool:
            self._mem_quota_int = 256
        master_node = nodes[0]
        bucket_size = self._get_bucket_size(master_node, nodes,
                                            self._mem_quota_int,
                                            self._default_bucket)
        rest = RestConnection(master_node)
        master_id = rest.get_nodes_self().id

        if self._sasl_buckets > 0:
            self._create_sasl_buckets(master_node, master_id, bucket_size)
        if self._standard_buckets > 0:
            self._create_standard_buckets(master_node, master_id, bucket_size)
        if self._default_bucket:
            self._cluster_helper.create_default_bucket(master_node,
                                                       bucket_size,
                                                       self._num_replicas)
            self._buckets.append(
                Bucket(name="default",
                       authType="sasl",
                       saslPassword="",
                       num_replicas=self._num_replicas,
                       bucket_size=bucket_size,
                       master_id=master_id))

    def _config_cluster(self, nodes):
        task = self._cluster_helper.async_rebalance(nodes, nodes[1:], [])
        task.result()

    def _get_bucket_size(self,
                         master_node,
                         nodes,
                         mem_quota,
                         num_buckets,
                         ratio=2.0 / 3.0):
        for node in nodes:
            if node.ip == master_node.ip:
                return int(ratio / float(len(nodes)) / float(num_buckets) *
                           float(mem_quota))
        return int(ratio / float(num_buckets) * float(mem_quota))

    def _poll_for_condition(self, condition):
        timeout = self._poll_timeout
        interval = self._poll_interval
        num_itr = timeout / interval
        return self._poll_for_condition_rec(condition, interval, num_itr)

    def _poll_for_condition_rec(self, condition, sleep, num_itr):
        if num_itr == 0:
            return False
        else:
            if condition():
                return True
            else:
                time.sleep(sleep)
                return self._poll_for_condition_rec(condition, sleep,
                                                    (num_itr - 1))

    def do_a_warm_up(self, node):
        shell = RemoteMachineShellConnection(node)
        shell.stop_couchbase()
        time.sleep(5)
        shell.start_couchbase()
        shell.disconnect()

    def adding_back_a_node(self, master, server):
        rest = RestConnection(master)
        nodes = rest.node_statuses()
        for node in nodes:
            if server.ip == node.ip and int(server.port) == int(node.port):
                rest.add_back_node(node.id)

    def _get_cluster_buckets(self, master_server):
        rest = RestConnection(master_server)
        master_id = rest.get_nodes_self().id
        #verify if node_ids were changed for cluster_run
        for bucket in self._buckets:
            if ("127.0.0.1" in bucket.master_id and "127.0.0.1" not in master_id) or \
               ("localhost" in bucket.master_id and "localhost" not in master_id):
                new_ip = master_id[master_id.index("@") + 1:]
                bucket.master_id = bucket.master_id.replace("127.0.0.1", new_ip).\
                replace("localhost", new_ip)
        return [
            bucket for bucket in self._buckets if bucket.master_id == master_id
        ]

    """merge 2 different kv strores from different clsusters/buckets
       assume that all elements in the second kvs are more relevant.

    Returns:
            merged kvs, that we expect to get on both clusters
    """

    def merge_keys(self, kv_store_first, kv_store_second, kvs_num=1):
        valid_keys_first, deleted_keys_first = kv_store_first[kvs_num].key_set(
        )
        valid_keys_second, deleted_keys_second = kv_store_second[
            kvs_num].key_set()

        for key in valid_keys_second:
            #replace the values for each key in first kvs if the keys are presented in second one
            if key in valid_keys_first:
                partition1 = kv_store_first[kvs_num].acquire_partition(key)
                partition2 = kv_store_second[kvs_num].acquire_partition(key)
                key_add = partition2.get_key(key)
                partition1.valid[key] = {
                    "value": key_add["value"],
                    "expires": key_add["expires"],
                    "flag": key_add["flag"]
                }
                kv_store_first[1].release_partition(key)
                kv_store_second[1].release_partition(key)
            #add keys/values in first kvs if the keys are presented only in second one
            else:
                partition1, num_part = kv_store_first[
                    kvs_num].acquire_random_partition()
                partition2 = kv_store_second[kvs_num].acquire_partition(key)
                key_add = partition2.get_key(key)
                partition1.valid[key] = {
                    "value": key_add["value"],
                    "expires": key_add["expires"],
                    "flag": key_add["flag"]
                }
                kv_store_first[kvs_num].release_partition(num_part)
                kv_store_second[kvs_num].release_partition(key)
            #add condition when key was deleted in first, but added in second

        for key in deleted_keys_second:
            # the same keys were deleted in both kvs
            if key in deleted_keys_first:
                pass
            # add deleted keys to first kvs if the where deleted only in second kvs
            else:
                partition1 = kv_store_first[kvs_num].acquire_partition(key)
                partition2 = kv_store_second[kvs_num].acquire_partition(key)
                partition1.deleted[key] = partition2.get_key(key)
                kv_store_first[kvs_num].release_partition(key)
                kv_store_second[kvs_num].release_partition(key)
            # return merged kvs, that we expect to get on both clusters
        return kv_store_first[kvs_num]

    def merge_buckets(self, src_master, dest_master, bidirection=True):
        if self._cluster_topology_str == XDCRConstants.CLUSTER_TOPOLOGY_TYPE_CHAIN:
            self.do_merge_buckets(src_master, dest_master, bidirection)
        elif self._cluster_topology_str == XDCRConstants.CLUSTER_TOPOLOGY_TYPE_STAR:
            for i in range(1, len(self._clusters_dic)):
                dest_cluster = self._clusters_dic[i]
                self.do_merge_buckets(src_master, dest_cluster[0], bidirection)

    def do_merge_buckets(self, src_master, dest_master, bidirection):
        src_buckets = self._get_cluster_buckets(src_master)
        dest_buckets = self._get_cluster_buckets(dest_master)
        for src_bucket in src_buckets:
            for dest_bucket in dest_buckets:
                if src_bucket.name == dest_bucket.name:
                    if bidirection:
                        src_bucket.kvs[1] = self.merge_keys(src_bucket.kvs,
                                                            dest_bucket.kvs,
                                                            kvs_num=1)
                    dest_bucket.kvs[1] = src_bucket.kvs[1]
        """Verify the stats at the destination cluster
        1. Data Validity check - using kvstore-node key-value check
        2. Item count check on source versus destination
        3. For deleted and updated items, check the CAS/SeqNo/Expiry/Flags for same key on source/destination
        * Make sure to call expiry_pager function to flush out temp items(deleted/expired items)"""

    def verify_xdcr_stats(self, src_nodes, dest_nodes, verify_src=False):
        if self._num_items in range(0, 10000):
            timeout = 120
        elif self._num_items in range(10000, 50000):
            timeout = 300
        elif self._num_items in range(50000, 100000):
            timeout = 500
        elif self._num_items >= 100000:
            timeout = 600

        if self._failover is not None or self._rebalance is not None:
            timeout *= 3 / 2

        #for verification src and dest clusters need more time
        if verify_src:
            timeout *= 3 / 2

        end_time = time.time() + timeout
        self._log.info(
            "Verify xdcr replication stats at Destination Cluster : {0}".
            format(self.dest_nodes[0].ip))
        if verify_src:
            timeout = max(120, end_time - time.time())
            self._wait_for_stats_all_buckets(self.src_nodes, timeout=timeout)
        timeout = max(120, end_time - time.time())
        self._wait_for_stats_all_buckets(self.dest_nodes, timeout=timeout)
        self._expiry_pager(self.src_nodes[0])
        self._expiry_pager(self.dest_nodes[0])
        if verify_src:
            timeout = max(120, end_time - time.time())
            self._verify_stats_all_buckets(self.src_nodes, timeout=timeout)
            timeout = max(120, end_time - time.time())
            self._verify_all_buckets(self.src_master, timeout=timeout)
        timeout = max(120, end_time - time.time())
        self._verify_stats_all_buckets(self.dest_nodes, timeout=timeout)
        timeout = max(120, end_time - time.time())
        self._verify_all_buckets(self.dest_master, timeout=timeout)

        errors_caught = 0
        if self._doc_ops is not None or self._doc_ops_dest is not None:
            if "update" in self._doc_ops or (self._doc_ops_dest is not None and
                                             "update" in self._doc_ops_dest):
                errors_caught = self._verify_revIds(self.src_nodes[0],
                                                    self.dest_nodes[0],
                                                    "update")

            if "delete" in self._doc_ops or (self._doc_ops_dest is not None and
                                             "delete" in self._doc_ops_dest):
                errors_caught = self._verify_revIds(self.src_nodes[0],
                                                    self.dest_nodes[0],
                                                    "delete")

        if errors_caught > 0:
            self.fail(
                "Mismatches on Meta Information on xdcr-replicated items!")

    def verify_results(self, verify_src=False):
        # Checking replication at destination clusters
        dest_key_index = 1
        for key in self.ord_keys[1:]:
            if dest_key_index == self.ord_keys_len:
                break
            dest_key = self.ord_keys[dest_key_index]
            self.dest_nodes = self._clusters_dic[dest_key]

            self.verify_xdcr_stats(self.src_nodes, self.dest_nodes, verify_src)
            dest_key_index += 1

    def wait_warmup_completed(self, warmupnodes, bucket_names=["default"]):
        if isinstance(bucket_names, str):
            bucket_names = [bucket_names]
        for server in warmupnodes:
            for bucket in bucket_names:
                mc = MemcachedClientHelper.direct_client(server, bucket)
                start = time.time()
                while time.time() - start < 150:
                    if mc.stats()["ep_warmup_thread"] == "complete":
                        self._log.info("Warmed up: %s items " %
                                       (mc.stats()["curr_items_tot"]))
                        time.sleep(10)
                        break
                    elif mc.stats()["ep_warmup_thread"] == "running":
                        self._log.info(
                            "Still warming up .. curr_items_tot : %s" %
                            (mc.stats()["curr_items_tot"]))
                        continue
                    else:
                        self._log.info(
                            "Value of ep_warmup_thread does not exist, exiting from this server"
                        )
                        break
                if mc.stats()["ep_warmup_thread"] == "running":
                    self._log.info(
                        "ERROR: ep_warmup_thread's status not complete")
                mc.close

    def _modify_src_data(self):
        """Setting up creates/updates/deletes at source nodes"""

        if self._doc_ops is not None:
            if "create" in self._doc_ops:
                self._load_all_buckets(self.src_master, self.gen_create,
                                       "create", 0)
            if "update" in self._doc_ops:
                self._load_all_buckets(self.src_master, self.gen_update,
                                       "update", self._expires)
            if "delete" in self._doc_ops:
                self._load_all_buckets(self.src_master, self.gen_delete,
                                       "delete", 0)
            self._wait_for_stats_all_buckets(self.src_nodes)

    def disable_compaction(self, server=None, bucket="default"):
        server = server or self.src_master
        new_config = {
            "viewFragmntThresholdPercentage": None,
            "dbFragmentThresholdPercentage": None,
            "dbFragmentThreshold": None,
            "viewFragmntThreshold": None
        }
        self._cluster_helper.modify_fragmentation_config(
            server, new_config, bucket)

    def make_default_views(
        self,
        prefix,
        count,
        is_dev_ddoc=False,
    ):
        ref_view = self._default_view
        ref_view.name = (prefix, ref_view.name)[prefix is None]
        return [
            View(ref_view.name + str(i), ref_view.map_func, None, is_dev_ddoc)
            for i in xrange(count)
        ]

    def async_create_views(self,
                           server,
                           design_doc_name,
                           views,
                           bucket="default"):
        tasks = []
        if len(views):
            for view in views:
                t_ = self._cluster_helper.async_create_view(
                    server, design_doc_name, view, bucket)
                tasks.append(t_)
        else:
            t_ = self._cluster_helper.async_create_view(
                server, design_doc_name, None, bucket)
            tasks.append(t_)
        return tasks
Example #2
0
class BaseTestCase(unittest.TestCase):

    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.cluster = Cluster()
        self.servers = self.input.servers
        self.buckets = {}

        self.default_bucket = self.input.param("default_bucket", True)
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
        self.num_servers = self.input.param("servers", len(self.servers))
        self.num_replicas = self.input.param("replicas", 1)
        self.num_items = self.input.param("items", 1000)
        self.dgm_run = self.input.param("dgm_run", False)

        if not self.input.param("skip_cleanup", False):
            BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
            for server in self.servers:
                ClusterOperationHelper.cleanup_cluster([server])
            ClusterOperationHelper.wait_for_ns_servers_or_assert([self.servers[0]], self)

        self.quota = self._initialize_nodes(self.cluster, self.servers)
        if self.dgm_run:
            self.quota = 256
        self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)
        if self.default_bucket:
            self.cluster.create_default_bucket(self.servers[0], self.bucket_size, self.num_replicas)
            self.buckets['default'] = {1 : KVStore()}
        self._create_sasl_buckets(self.servers[0], self.sasl_buckets)
        # TODO (Mike): Create Standard buckets

    def tearDown(self):
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        ClusterOperationHelper.cleanup_cluster(self.servers)
        ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
        self.buckets = {}
        self.cluster.shutdown()

    def _initialize_nodes(self, cluster, servers):
        quota = 0
        init_tasks = []
        for server in servers:
            init_tasks.append(cluster.async_init_node(server))
        for task in init_tasks:
            node_quota = task.result()
            if node_quota < quota or quota == 0:
                quota = node_quota
        return quota

    def _get_bucket_size(self, quota, num_buckets, ratio=2.0/3.0):
        ip = self.servers[0]
        for server in self.servers:
            if server.ip == ip:
                return int(ratio / float(self.num_servers) / float(num_buckets) * float(quota))
        return int(ratio / float(num_buckets) * float(quota))

    def _create_sasl_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'bucket' + str(i)
            bucket_tasks.append(self.cluster.async_create_sasl_bucket(server, name,
                                                                      'password',
                                                                      self.bucket_size,
                                                                      self.num_replicas))
            self.buckets[name] = {1 : KVStore()}
        for task in bucket_tasks:
            task.result()

    def _verify_stats_all_buckets(self, servers):
        stats_tasks = []
        for bucket, kv_stores in self.buckets.items():
            items = sum([len(kv_store) for kv_store in kv_stores.values()])
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                               'curr_items', '==', items))
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                               'vb_active_curr_items', '==', items))

            available_replicas = self.num_replicas
            if len(servers) == self.num_replicas:
                available_replicas = len(servers) - 1
            elif len(servers) <= self.num_replicas:
                available_replicas = len(servers) - 1

            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                                   'vb_replica_curr_items', '==', items * available_replicas))
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                                   'curr_items_tot', '==', items * (available_replicas + 1)))

        for task in stats_tasks:
            task.result(60)


    """Asynchronously applys load generation to all bucekts in the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_gen - The generator to use to generate load. (DocumentGenerator)
        op_type - "create", "read", "update", or "delete" (String)
        exp - The expiration for the items if updated or created (int)
        kv_store - The index of the bucket's kv_store to use. (int)

    Returns:
        A list of all of the tasks created.
    """
    def _async_load_all_buckets(self, server, kv_gen, op_type, exp, kv_store=1):
        tasks = []
        for bucket, kv_stores in self.buckets.items():
            gen = copy.deepcopy(kv_gen)
            tasks.append(self.cluster.async_load_gen_docs(server, bucket, gen,
                                                          kv_stores[kv_store],
                                                          op_type, exp))
        return tasks

    """Synchronously applys load generation to all bucekts in the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_gen - The generator to use to generate load. (DocumentGenerator)
        op_type - "create", "read", "update", or "delete" (String)
        exp - The expiration for the items if updated or created (int)
        kv_store - The index of the bucket's kv_store to use. (int)
    """
    def _load_all_buckets(self, server, kv_gen, op_type, exp, kv_store=1):
        tasks = self._async_load_all_buckets(server, kv_gen, op_type, exp, kv_store)
        for task in tasks:
            task.result()

    """Waits for queues to drain on all servers and buckets in a cluster.

    A utility function that waits for all of the items loaded to be persisted
    and replicated.

    Args:
        servers - A list of all of the servers in the cluster. ([TestInputServer])
    """
    def _wait_for_stats_all_buckets(self, servers):
        tasks = []
        for server in servers:
            for bucket in self.buckets:
                tasks.append(self.cluster.async_wait_for_stats([server], bucket, '',
                                   'ep_queue_size', '==', 0))
                tasks.append(self.cluster.async_wait_for_stats([server], bucket, '',
                                   'ep_flusher_todo', '==', 0))
        for task in tasks:
            task.result()

    """Verifies data on all of the nodes in a cluster.

    Verifies all of the data in a specific kv_store index for all buckets in
    the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_store - The kv store index to check. (int)
    """
    def _verify_all_buckets(self, server, kv_store=1):
        tasks = []
        for bucket, kv_stores in self.buckets.items():
            tasks.append(self.cluster.async_verify_data(server, bucket, kv_stores[kv_store]))
        for task in tasks:
            task.result()
Example #3
0
class BaseTestCase(unittest.TestCase):
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.buckets = []
        self.master = self.servers[0]
        self.cluster = Cluster()
        self.wait_timeout = self.input.param("wait_timeout", 60)
        #number of case that is performed from testrunner( increment each time)
        self.case_number = self.input.param("case_number", 0)
        self.default_bucket = self.input.param("default_bucket", True)
        if self.default_bucket:
            self.default_bucket_name = "default"
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
        self.num_servers = self.input.param("servers", len(self.servers))
        self.num_replicas = self.input.param("replicas", 1)
        self.num_items = self.input.param("items", 1000)
        self.dgm_run = self.input.param("dgm_run", False)
        #max items number to verify in ValidateDataTask, None - verify all
        self.max_verify = self.input.param("max_verify", None)
        #we don't change consistent_view on server by default
        self.disabled_consistent_view = self.input.param(
            "disabled_consistent_view", None)
        self.log.info("==============  basetestcase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
        #avoid clean up if the previous test has been tear down
        if not self.input.param("skip_cleanup", True) or self.case_number == 1:
            self.tearDown()
            self.cluster = Cluster()
        self.quota = self._initialize_nodes(self.cluster, self.servers,
                                            self.disabled_consistent_view)
        if self.dgm_run:
            self.quota = 256
        if self.total_buckets > 0:
            self.bucket_size = self._get_bucket_size(self.quota,
                                                     self.total_buckets)

        if self.default_bucket:
            self.cluster.create_default_bucket(self.master, self.bucket_size,
                                               self.num_replicas)
            self.buckets.append(
                Bucket(name="default",
                       authType="sasl",
                       saslPassword="",
                       num_replicas=self.num_replicas,
                       bucket_size=self.bucket_size))

        self._create_sasl_buckets(self.master, self.sasl_buckets)
        self._create_standard_buckets(self.master, self.standard_buckets)
        self.log.info("==============  basetestcase setup was finished for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))
        self._log_start(self)

    def tearDown(self):
        if not self.input.param("skip_cleanup", False):
            try:
                self.log.info("==============  basetestcase cleanup was started for test #{0} {1} =============="\
                          .format(self.case_number, self._testMethodName))
                rest = RestConnection(self.master)
                alerts = rest.get_alerts()
                if alerts is not None and len(alerts) != 0:
                    self.log.warn("Alerts were found: {0}".format(alerts))
                if rest._rebalance_progress_status() == 'running':
                    self.log.warning(
                        "rebalancing is still running, test should be verified"
                    )
                    stopped = rest.stop_rebalance()
                    self.assertTrue(stopped, msg="unable to stop rebalance")
                BucketOperationHelper.delete_all_buckets_or_assert(
                    self.servers, self)
                ClusterOperationHelper.cleanup_cluster(self.servers)
                time.sleep(10)
                ClusterOperationHelper.wait_for_ns_servers_or_assert(
                    self.servers, self)
                self.log.info("==============  basetestcase cleanup was finished for test #{0} {1} =============="\
                          .format(self.case_number, self._testMethodName))
            finally:
                #stop all existing task manager threads
                self.cluster.shutdown()
                self._log_finish(self)

    @staticmethod
    def _log_start(self):
        try:
            msg = "{0} : {1} started ".format(datetime.datetime.now(),
                                              self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    @staticmethod
    def _log_finish(self):
        try:
            msg = "{0} : {1} finished ".format(datetime.datetime.now(),
                                               self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    def _initialize_nodes(self,
                          cluster,
                          servers,
                          disabled_consistent_view=None):
        quota = 0
        init_tasks = []
        for server in servers:
            init_tasks.append(
                cluster.async_init_node(server, disabled_consistent_view))
        for task in init_tasks:
            node_quota = task.result()
            if node_quota < quota or quota == 0:
                quota = node_quota
        return quota

    def _get_bucket_size(self, quota, num_buckets, ratio=2.0 / 3.0):
        ip = self.servers[0]
        for server in self.servers:
            if server.ip == ip:
                return int(ratio / float(self.num_servers) /
                           float(num_buckets) * float(quota))
        return int(ratio / float(num_buckets) * float(quota))

    def _create_sasl_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'bucket' + str(i)
            bucket_tasks.append(
                self.cluster.async_create_sasl_bucket(server, name, 'password',
                                                      self.bucket_size,
                                                      self.num_replicas))
            self.buckets.append(
                Bucket(name=name,
                       authType="sasl",
                       saslPassword='******',
                       num_replicas=self.num_replicas,
                       bucket_size=self.bucket_size))
        for task in bucket_tasks:
            task.result()

    def _create_standard_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'standard_bucket' + str(i)
            bucket_tasks.append(
                self.cluster.async_create_standard_bucket(
                    server, name, 11214 + i, self.bucket_size,
                    self.num_replicas))

            self.buckets.append(
                Bucket(name=name,
                       authType=None,
                       saslPassword=None,
                       num_replicas=self.num_replicas,
                       bucket_size=self.bucket_size,
                       port=11214 + i))
        for task in bucket_tasks:
            task.result()

    def _all_buckets_delete(self, server):
        delete_tasks = []
        for bucket in self.buckets:
            delete_tasks.append(
                self.cluster.async_bucket_delete(server, bucket.name))

        for task in delete_tasks:
            task.result()
        self.buckets = []

    def _verify_stats_all_buckets(self, servers):
        stats_tasks = []
        for bucket in self.buckets:
            items = sum([len(kv_store) for kv_store in bucket.kvs.values()])
            stats_tasks.append(
                self.cluster.async_wait_for_stats(servers, bucket, '',
                                                  'curr_items', '==', items))
            stats_tasks.append(
                self.cluster.async_wait_for_stats(servers, bucket, '',
                                                  'vb_active_curr_items', '==',
                                                  items))

            available_replicas = self.num_replicas
            if len(servers) == self.num_replicas:
                available_replicas = len(servers) - 1
            elif len(servers) <= self.num_replicas:
                available_replicas = len(servers) - 1

            stats_tasks.append(
                self.cluster.async_wait_for_stats(servers, bucket, '',
                                                  'vb_replica_curr_items',
                                                  '==',
                                                  items * available_replicas))
            stats_tasks.append(
                self.cluster.async_wait_for_stats(
                    servers, bucket, '', 'curr_items_tot', '==',
                    items * (available_replicas + 1)))

        for task in stats_tasks:
            task.result(60)

    """Asynchronously applys load generation to all bucekts in the cluster.
 bucket.name, gen,
                                                          bucket.kvs[kv_store],
                                                          op_type, exp
    Args:
        server - A server in the cluster. (TestInputServer)
        kv_gen - The generator to use to generate load. (DocumentGenerator)
        op_type - "create", "read", "update", or "delete" (String)
        exp - The expiration for the items if updated or created (int)
        kv_store - The index of the bucket's kv_store to use. (int)

    Returns:
        A list of all of the tasks created.
    """

    def _async_load_all_buckets(self,
                                server,
                                kv_gen,
                                op_type,
                                exp,
                                kv_store=1,
                                flag=0,
                                only_store_hash=True,
                                batch_size=1,
                                pause_secs=1,
                                timeout_secs=30):
        tasks = []
        for bucket in self.buckets:
            gen = copy.deepcopy(kv_gen)
            tasks.append(
                self.cluster.async_load_gen_docs(server, bucket.name, gen,
                                                 bucket.kvs[kv_store], op_type,
                                                 exp, flag, only_store_hash,
                                                 batch_size, pause_secs,
                                                 timeout_secs))
        return tasks

    """Synchronously applys load generation to all bucekts in the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_gen - The generator to use to generate load. (DocumentGenerator)
        op_type - "create", "read", "update", or "delete" (String)
        exp - The expiration for the items if updated or created (int)
        kv_store - The index of the bucket's kv_store to use. (int)
    """

    def _load_all_buckets(self,
                          server,
                          kv_gen,
                          op_type,
                          exp,
                          kv_store=1,
                          flag=0,
                          only_store_hash=True,
                          batch_size=1,
                          pause_secs=1,
                          timeout_secs=30):
        tasks = self._async_load_all_buckets(server, kv_gen, op_type, exp,
                                             kv_store, flag, only_store_hash,
                                             batch_size, pause_secs,
                                             timeout_secs)
        for task in tasks:
            task.result()

    """Waits for queues to drain on all servers and buckets in a cluster.

    A utility function that waits for all of the items loaded to be persisted
    and replicated.

    Args:
        servers - A list of all of the servers in the cluster. ([TestInputServer])
    """

    def _wait_for_stats_all_buckets(self, servers):
        tasks = []
        for server in servers:
            for bucket in self.buckets:
                tasks.append(
                    self.cluster.async_wait_for_stats([server], bucket, '',
                                                      'ep_queue_size', '==',
                                                      0))
                tasks.append(
                    self.cluster.async_wait_for_stats([server], bucket, '',
                                                      'ep_flusher_todo', '==',
                                                      0))
        for task in tasks:
            task.result()

    """Verifies data on all of the nodes in a cluster.

    Verifies all of the data in a specific kv_store index for all buckets in
    the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_store - The kv store index to check. (int)
    """

    def _verify_all_buckets(self,
                            server,
                            kv_store=1,
                            timeout=180,
                            max_verify=None,
                            only_store_hash=True,
                            batch_size=1):
        tasks = []
        for bucket in self.buckets:
            tasks.append(
                self.cluster.async_verify_data(server, bucket,
                                               bucket.kvs[kv_store],
                                               max_verify, only_store_hash,
                                               batch_size))
        for task in tasks:
            task.result(timeout)

    def disable_compaction(self, server=None, bucket="default"):

        server = server or self.servers[0]
        new_config = {
            "viewFragmntThresholdPercentage": None,
            "dbFragmentThresholdPercentage": None,
            "dbFragmentThreshold": None,
            "viewFragmntThreshold": None
        }
        self.cluster.modify_fragmentation_config(server, new_config, bucket)

    def async_create_views(self,
                           server,
                           design_doc_name,
                           views,
                           bucket="default"):
        tasks = []
        if len(views):
            for view in views:
                t_ = self.cluster.async_create_view(server, design_doc_name,
                                                    view, bucket)
                tasks.append(t_)
        else:
            t_ = self.cluster.async_create_view(server, design_doc_name, None,
                                                bucket)
            tasks.append(t_)
        return tasks

    def create_views(self,
                     server,
                     design_doc_name,
                     views,
                     bucket="default",
                     timeout=None):
        if len(views):
            for view in views:
                self.cluster.create_view(server, design_doc_name, view, bucket,
                                         timeout)
        else:
            self.cluster.create_view(server, design_doc_name, None, bucket,
                                     timeout)

    def make_default_views(self, prefix, count, is_dev_ddoc=False):
        ref_view = self.default_view
        ref_view.name = (prefix, ref_view.name)[prefix is None]
        return [
            View(ref_view.name + str(i), ref_view.map_func, None, is_dev_ddoc)
            for i in xrange(count)
        ]

    def _load_doc_data_all_buckets(self, data_op="create"):
        #initialize the template for document generator
        age = range(5)
        first = ['james', 'sharon']
        template = '{{ "age": {0}, "first_name": "{1}" }}'
        gen_load = DocumentGenerator('test_docs',
                                     template,
                                     age,
                                     first,
                                     start=0,
                                     end=self.num_items)

        self.log.info("%s %s documents..." % (data_op, self.num_items))
        self._load_all_buckets(self.master, gen_load, data_op, 0)

    #returns true if warmup is completed in wait_time sec,
    #otherwise return false
    @staticmethod
    def _wait_warmup_completed(self, servers, bucket_name, wait_time=300):
        warmed_up = False
        log = logger.Logger.get_logger()
        for server in servers:
            mc = None
            start = time.time()
            # Try to get the stats for 5 minutes, else hit out.
            while time.time() - start < wait_time:
                # Get the wamrup time for each server
                try:
                    mc = MemcachedClientHelper.direct_client(
                        server, bucket_name)
                    stats = mc.stats()
                    if stats is not None:
                        warmup_time = int(stats["ep_warmup_time"])
                        log.info("ep_warmup_time is %s " % warmup_time)
                        log.info(
                            "Collected the stats %s for server %s:%s" %
                            (stats["ep_warmup_time"], server.ip, server.port))
                        break
                    else:
                        log.info(
                            " Did not get the stats from the server yet, trying again....."
                        )
                        time.sleep(2)
                except Exception as e:
                    log.error(
                        "Could not get warmup_time stats from server %s:%s, exception %s"
                        % (server.ip, server.port, e))
            else:
                self.fail(
                    "Fail! Unable to get the warmup-stats from server %s:%s after trying for %s seconds."
                    % (server.ip, server.port, wait_time))

            # Waiting for warm-up
            start = time.time()
            warmed_up = False
            while time.time() - start < wait_time and not warmed_up:
                if mc.stats()["ep_warmup_thread"] == "complete":
                    log.info(
                        "warmup completed, awesome!!! Warmed up. %s items " %
                        (mc.stats()["curr_items_tot"]))
                    warmed_up = True
                    continue
                elif mc.stats()["ep_warmup_thread"] == "running":
                    log.info("still warming up .... curr_items_tot : %s" %
                             (mc.stats()["curr_items_tot"]))
                else:
                    fail(
                        "Value of ep warmup thread does not exist, exiting from this server"
                    )
                time.sleep(5)
            mc.close()
        return warmed_up
Example #4
0
class BaseTestCase(unittest.TestCase):
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.buckets = []
        self.master = self.servers[0]
        self.cluster = Cluster()
        self.wait_timeout = self.input.param("wait_timeout", 60)
        #number of case that is performed from testrunner( increment each time)
        self.case_number = self.input.param("case_number", 0)
        self.default_bucket = self.input.param("default_bucket", True)
        if self.default_bucket:
            self.default_bucket_name = "default"
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
        self.num_servers = self.input.param("servers", len(self.servers))
        #initial number of items in the cluster
        self.nodes_init = self.input.param("nodes_init", 1)

        self.num_replicas = self.input.param("replicas", 1)
        self.num_items = self.input.param("items", 1000)
        self.dgm_run = self.input.param("dgm_run", False)
        #max items number to verify in ValidateDataTask, None - verify all
        self.max_verify = self.input.param("max_verify", None)
        #we don't change consistent_view on server by default
        self.disabled_consistent_view = self.input.param(
            "disabled_consistent_view", None)
        self.log.info("==============  basetestcase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
        #avoid clean up if the previous test has been tear down
        if not self.input.param("skip_cleanup", True) or self.case_number == 1:
            self.tearDown()
            self.cluster = Cluster()
        if str(self.__class__).find('rebalanceout.RebalanceOutTests') != -1:
            #rebalance all nodes into the cluster before each test
            self.cluster.rebalance(self.servers[:self.num_servers],
                                   self.servers[1:self.num_servers], [])
        elif self.nodes_init > 1:
            self.cluster.rebalance(self.servers[:1],
                                   self.servers[1:self.nodes_init], [])
        self.quota = self._initialize_nodes(self.cluster, self.servers,
                                            self.disabled_consistent_view)
        if self.dgm_run:
            self.quota = 256
        if self.total_buckets > 0:
            self.bucket_size = self._get_bucket_size(self.quota,
                                                     self.total_buckets)

        if self.default_bucket:
            self.cluster.create_default_bucket(self.master, self.bucket_size,
                                               self.num_replicas)
            self.buckets.append(
                Bucket(name="default",
                       authType="sasl",
                       saslPassword="",
                       num_replicas=self.num_replicas,
                       bucket_size=self.bucket_size))

        self._create_sasl_buckets(self.master, self.sasl_buckets)
        self._create_standard_buckets(self.master, self.standard_buckets)
        self.log.info("==============  basetestcase setup was finished for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))
        self._log_start(self)

    def tearDown(self):
        try:
            if (hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \
                and TestInputSingleton.input.param("stop-on-failure", False))\
                    or self.input.param("skip_cleanup", False):
                self.log.warn("CLEANUP WAS SKIPPED")
            else:
                self.log.info("==============  basetestcase cleanup was started for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))
                rest = RestConnection(self.master)
                alerts = rest.get_alerts()
                if alerts is not None and len(alerts) != 0:
                    self.log.warn("Alerts were found: {0}".format(alerts))
                if rest._rebalance_progress_status() == 'running':
                    self.log.warning(
                        "rebalancing is still running, test should be verified"
                    )
                    stopped = rest.stop_rebalance()
                    self.assertTrue(stopped, msg="unable to stop rebalance")
                BucketOperationHelper.delete_all_buckets_or_assert(
                    self.servers, self)
                ClusterOperationHelper.cleanup_cluster(self.servers)
                time.sleep(10)
                ClusterOperationHelper.wait_for_ns_servers_or_assert(
                    self.servers, self)
                self.log.info("==============  basetestcase cleanup was finished for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))
        finally:
            #stop all existing task manager threads
            self.cluster.shutdown()
            self._log_finish(self)

    @staticmethod
    def _log_start(self):
        try:
            msg = "{0} : {1} started ".format(datetime.datetime.now(),
                                              self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    @staticmethod
    def _log_finish(self):
        try:
            msg = "{0} : {1} finished ".format(datetime.datetime.now(),
                                               self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    def _initialize_nodes(self,
                          cluster,
                          servers,
                          disabled_consistent_view=None):
        quota = 0
        init_tasks = []
        for server in servers:
            init_tasks.append(
                cluster.async_init_node(server, disabled_consistent_view))
        for task in init_tasks:
            node_quota = task.result()
            if node_quota < quota or quota == 0:
                quota = node_quota
        return quota

    def _get_bucket_size(self, quota, num_buckets, ratio=2.0 / 3.0):
        ip = self.servers[0]
        for server in self.servers:
            if server.ip == ip:
                return int(ratio / float(self.num_servers) /
                           float(num_buckets) * float(quota))
        return int(ratio / float(num_buckets) * float(quota))

    def _create_sasl_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'bucket' + str(i)
            bucket_tasks.append(
                self.cluster.async_create_sasl_bucket(server, name, 'password',
                                                      self.bucket_size,
                                                      self.num_replicas))
            self.buckets.append(
                Bucket(name=name,
                       authType="sasl",
                       saslPassword='******',
                       num_replicas=self.num_replicas,
                       bucket_size=self.bucket_size))
        for task in bucket_tasks:
            task.result()

    def _create_standard_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'standard_bucket' + str(i)
            bucket_tasks.append(
                self.cluster.async_create_standard_bucket(
                    server, name, 11214 + i, self.bucket_size,
                    self.num_replicas))

            self.buckets.append(
                Bucket(name=name,
                       authType=None,
                       saslPassword=None,
                       num_replicas=self.num_replicas,
                       bucket_size=self.bucket_size,
                       port=11214 + i))
        for task in bucket_tasks:
            task.result()

    def _all_buckets_delete(self, server):
        delete_tasks = []
        for bucket in self.buckets:
            delete_tasks.append(
                self.cluster.async_bucket_delete(server, bucket.name))

        for task in delete_tasks:
            task.result()
        self.buckets = []

    def _verify_stats_all_buckets(self, servers):
        stats_tasks = []
        for bucket in self.buckets:
            items = sum([len(kv_store) for kv_store in bucket.kvs.values()])
            stats_tasks.append(
                self.cluster.async_wait_for_stats(servers, bucket, '',
                                                  'curr_items', '==', items))
            stats_tasks.append(
                self.cluster.async_wait_for_stats(servers, bucket, '',
                                                  'vb_active_curr_items', '==',
                                                  items))

            available_replicas = self.num_replicas
            if len(servers) == self.num_replicas:
                available_replicas = len(servers) - 1
            elif len(servers) <= self.num_replicas:
                available_replicas = len(servers) - 1

            stats_tasks.append(
                self.cluster.async_wait_for_stats(servers, bucket, '',
                                                  'vb_replica_curr_items',
                                                  '==',
                                                  items * available_replicas))
            stats_tasks.append(
                self.cluster.async_wait_for_stats(
                    servers, bucket, '', 'curr_items_tot', '==',
                    items * (available_replicas + 1)))

        for task in stats_tasks:
            task.result(60)

    """Asynchronously applys load generation to all bucekts in the cluster.
 bucket.name, gen,
                                                          bucket.kvs[kv_store],
                                                          op_type, exp
    Args:
        server - A server in the cluster. (TestInputServer)
        kv_gen - The generator to use to generate load. (DocumentGenerator)
        op_type - "create", "read", "update", or "delete" (String)
        exp - The expiration for the items if updated or created (int)
        kv_store - The index of the bucket's kv_store to use. (int)

    Returns:
        A list of all of the tasks created.
    """

    def _async_load_all_buckets(self,
                                server,
                                kv_gen,
                                op_type,
                                exp,
                                kv_store=1,
                                flag=0,
                                only_store_hash=True,
                                batch_size=1,
                                pause_secs=1,
                                timeout_secs=30):
        tasks = []
        for bucket in self.buckets:
            gen = copy.deepcopy(kv_gen)
            tasks.append(
                self.cluster.async_load_gen_docs(server, bucket.name, gen,
                                                 bucket.kvs[kv_store], op_type,
                                                 exp, flag, only_store_hash,
                                                 batch_size, pause_secs,
                                                 timeout_secs))
        return tasks

    """Synchronously applys load generation to all bucekts in the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_gen - The generator to use to generate load. (DocumentGenerator)
        op_type - "create", "read", "update", or "delete" (String)
        exp - The expiration for the items if updated or created (int)
        kv_store - The index of the bucket's kv_store to use. (int)
    """

    def _load_all_buckets(self,
                          server,
                          kv_gen,
                          op_type,
                          exp,
                          kv_store=1,
                          flag=0,
                          only_store_hash=True,
                          batch_size=1000,
                          pause_secs=1,
                          timeout_secs=30):
        tasks = self._async_load_all_buckets(server, kv_gen, op_type, exp,
                                             kv_store, flag, only_store_hash,
                                             batch_size, pause_secs,
                                             timeout_secs)
        for task in tasks:
            task.result()

    """Waits for queues to drain on all servers and buckets in a cluster.

    A utility function that waits for all of the items loaded to be persisted
    and replicated.

    Args:
        servers - A list of all of the servers in the cluster. ([TestInputServer])
    """

    def _wait_for_stats_all_buckets(self, servers):
        tasks = []
        for server in servers:
            for bucket in self.buckets:
                tasks.append(
                    self.cluster.async_wait_for_stats([server], bucket, '',
                                                      'ep_queue_size', '==',
                                                      0))
                tasks.append(
                    self.cluster.async_wait_for_stats([server], bucket, '',
                                                      'ep_flusher_todo', '==',
                                                      0))
        for task in tasks:
            task.result()

    """Verifies data on all of the nodes in a cluster.

    Verifies all of the data in a specific kv_store index for all buckets in
    the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_store - The kv store index to check. (int)
    """

    def _verify_all_buckets(self,
                            server,
                            kv_store=1,
                            timeout=180,
                            max_verify=None,
                            only_store_hash=True,
                            batch_size=1000):
        tasks = []
        for bucket in self.buckets:
            tasks.append(
                self.cluster.async_verify_data(server, bucket,
                                               bucket.kvs[kv_store],
                                               max_verify, only_store_hash,
                                               batch_size))
        for task in tasks:
            task.result(timeout)

    def disable_compaction(self, server=None, bucket="default"):

        server = server or self.servers[0]
        new_config = {
            "viewFragmntThresholdPercentage": None,
            "dbFragmentThresholdPercentage": None,
            "dbFragmentThreshold": None,
            "viewFragmntThreshold": None
        }
        self.cluster.modify_fragmentation_config(server, new_config, bucket)

    def async_create_views(self,
                           server,
                           design_doc_name,
                           views,
                           bucket="default",
                           with_query=True):
        tasks = []
        if len(views):
            for view in views:
                t_ = self.cluster.async_create_view(server, design_doc_name,
                                                    view, bucket, with_query)
                tasks.append(t_)
        else:
            t_ = self.cluster.async_create_view(server, design_doc_name, None,
                                                bucket, with_query)
            tasks.append(t_)
        return tasks

    def create_views(self,
                     server,
                     design_doc_name,
                     views,
                     bucket="default",
                     timeout=None):
        if len(views):
            for view in views:
                self.cluster.create_view(server, design_doc_name, view, bucket,
                                         timeout)
        else:
            self.cluster.create_view(server, design_doc_name, None, bucket,
                                     timeout)

    def make_default_views(self, prefix, count, is_dev_ddoc=False):
        ref_view = self.default_view
        ref_view.name = (prefix, ref_view.name)[prefix is None]
        return [
            View(ref_view.name + str(i), ref_view.map_func, None, is_dev_ddoc)
            for i in xrange(count)
        ]

    def _load_doc_data_all_buckets(self, data_op="create", batch_size=1000):
        #initialize the template for document generator
        age = range(5)
        first = ['james', 'sharon']
        template = '{{ "age": {0}, "first_name": "{1}" }}'
        gen_load = DocumentGenerator('test_docs',
                                     template,
                                     age,
                                     first,
                                     start=0,
                                     end=self.num_items)

        self.log.info("%s %s documents..." % (data_op, self.num_items))
        self._load_all_buckets(self.master,
                               gen_load,
                               data_op,
                               0,
                               batch_size=batch_size)

    def verify_cluster_stats(self, servers=None, master=None, max_verify=None):
        if servers is None:
            servers = self.servers
        if master is None:
            master = self.master
        if max_verify is None:
            max_verify = self.max_verify
        self._wait_for_stats_all_buckets(servers)
        self._verify_all_buckets(master, max_verify=max_verify)
        self._verify_stats_all_buckets(servers)
        #verify that curr_items_tot corresponds to sum of curr_items from all nodes
        verified = True
        for bucket in self.buckets:
            verified &= RebalanceHelper.wait_till_total_numbers_match(
                master, bucket)
        self.assertTrue(
            verified,
            "Lost items!!! Replication was completed but sum(curr_items) don't match the curr_items_total"
        )
Example #5
0
class BaseTestCase(unittest.TestCase):

    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.buckets = []
        self.master = self.servers[0]
        self.cluster = Cluster()
        self.wait_timeout = self.input.param("wait_timeout", 60)
        #number of case that is performed from testrunner( increment each time)
        self.case_number = self.input.param("case_number", 0)
        self.default_bucket = self.input.param("default_bucket", True)
        if self.default_bucket:
            self.default_bucket_name = "default"
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
        self.num_servers = self.input.param("servers", len(self.servers))
        self.num_replicas = self.input.param("replicas", 1)
        self.num_items = self.input.param("items", 1000)
        self.dgm_run = self.input.param("dgm_run", False)
        #max items number to verify in ValidateDataTask, None - verify all
        self.max_verify = self.input.param("max_verify", None)
        #we don't change consistent_view on server by default
        self.disabled_consistent_view = self.input.param("disabled_consistent_view", None)
        self.log.info("==============  basetestcase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
        #avoid clean up if the previous test has been tear down
        if not self.input.param("skip_cleanup", True) or self.case_number == 1:
            self.tearDown()
            self.cluster = Cluster()
        self.quota = self._initialize_nodes(self.cluster, self.servers, self.disabled_consistent_view)
        if self.dgm_run:
            self.quota = 256
        if self.total_buckets > 0:
            self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)

        if self.default_bucket:
            self.cluster.create_default_bucket(self.master, self.bucket_size, self.num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
                                       num_replicas=self.num_replicas, bucket_size=self.bucket_size))

        self._create_sasl_buckets(self.master, self.sasl_buckets)
        self._create_standard_buckets(self.master, self.standard_buckets)
        self.log.info("==============  basetestcase setup was finished for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))
        self._log_start(self)

    def tearDown(self):
        if not self.input.param("skip_cleanup", False):
            try:
                self.log.info("==============  basetestcase cleanup was started for test #{0} {1} =============="\
                          .format(self.case_number, self._testMethodName))
                rest = RestConnection(self.master)
                alerts = rest.get_alerts()
                if alerts is not None and len(alerts) != 0:
                    self.log.warn("Alerts were found: {0}".format(alerts))
                if rest._rebalance_progress_status() == 'running':
                    self.log.warning("rebalancing is still running, test should be verified")
                    stopped = rest.stop_rebalance()
                    self.assertTrue(stopped, msg="unable to stop rebalance")
                BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
                ClusterOperationHelper.cleanup_cluster(self.servers)
                time.sleep(10)
                ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
                self.log.info("==============  basetestcase cleanup was finished for test #{0} {1} =============="\
                          .format(self.case_number, self._testMethodName))
            finally:
                #stop all existing task manager threads
                self.cluster.shutdown()
                self._log_finish(self)

    @staticmethod
    def _log_start(self):
        try:
            msg = "{0} : {1} started ".format(datetime.datetime.now(), self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    @staticmethod
    def _log_finish(self):
        try:
            msg = "{0} : {1} finished ".format(datetime.datetime.now(), self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    def _initialize_nodes(self, cluster, servers, disabled_consistent_view=None):
        quota = 0
        init_tasks = []
        for server in servers:
            init_tasks.append(cluster.async_init_node(server, disabled_consistent_view))
        for task in init_tasks:
            node_quota = task.result()
            if node_quota < quota or quota == 0:
                quota = node_quota
        return quota

    def _get_bucket_size(self, quota, num_buckets, ratio=2.0 / 3.0):
        ip = self.servers[0]
        for server in self.servers:
            if server.ip == ip:
                return int(ratio / float(self.num_servers) / float(num_buckets) * float(quota))
        return int(ratio / float(num_buckets) * float(quota))

    def _create_sasl_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'bucket' + str(i)
            bucket_tasks.append(self.cluster.async_create_sasl_bucket(server, name,
                                                                      'password',
                                                                      self.bucket_size,
                                                                      self.num_replicas))
            self.buckets.append(Bucket(name=name, authType="sasl", saslPassword='******',
                                       num_replicas=self.num_replicas, bucket_size=self.bucket_size));
        for task in bucket_tasks:
            task.result()

    def _create_standard_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'standard_bucket' + str(i)
            bucket_tasks.append(self.cluster.async_create_standard_bucket(server, name,
                                                                          11214 + i,
                                                                          self.bucket_size,
                                                                          self.num_replicas))

            self.buckets.append(Bucket(name=name, authType=None, saslPassword=None, num_replicas=self.num_replicas,
                                       bucket_size=self.bucket_size, port=11214 + i));
        for task in bucket_tasks:
            task.result()

    def _all_buckets_delete(self, server):
        delete_tasks = []
        for bucket in self.buckets:
            delete_tasks.append(self.cluster.async_bucket_delete(server, bucket.name))

        for task in delete_tasks:
            task.result()
        self.buckets = []

    def _verify_stats_all_buckets(self, servers):
        stats_tasks = []
        for bucket in self.buckets:
            items = sum([len(kv_store) for kv_store in bucket.kvs.values()])
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                               'curr_items', '==', items))
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                               'vb_active_curr_items', '==', items))

            available_replicas = self.num_replicas
            if len(servers) == self.num_replicas:
                available_replicas = len(servers) - 1
            elif len(servers) <= self.num_replicas:
                available_replicas = len(servers) - 1

            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                                   'vb_replica_curr_items', '==', items * available_replicas))
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                                   'curr_items_tot', '==', items * (available_replicas + 1)))

        for task in stats_tasks:
            task.result(60)


    """Asynchronously applys load generation to all bucekts in the cluster.
 bucket.name, gen,
                                                          bucket.kvs[kv_store],
                                                          op_type, exp
    Args:
        server - A server in the cluster. (TestInputServer)
        kv_gen - The generator to use to generate load. (DocumentGenerator)
        op_type - "create", "read", "update", or "delete" (String)
        exp - The expiration for the items if updated or created (int)
        kv_store - The index of the bucket's kv_store to use. (int)

    Returns:
        A list of all of the tasks created.
    """
    def _async_load_all_buckets(self, server, kv_gen, op_type, exp, kv_store=1, flag=0, only_store_hash=True, batch_size=1, pause_secs=1, timeout_secs=30):
        tasks = []
        for bucket in self.buckets:
            gen = copy.deepcopy(kv_gen)
            tasks.append(self.cluster.async_load_gen_docs(server, bucket.name, gen,
                                                          bucket.kvs[kv_store],
                                                          op_type, exp, flag, only_store_hash, batch_size, pause_secs, timeout_secs))
        return tasks

    """Synchronously applys load generation to all bucekts in the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_gen - The generator to use to generate load. (DocumentGenerator)
        op_type - "create", "read", "update", or "delete" (String)
        exp - The expiration for the items if updated or created (int)
        kv_store - The index of the bucket's kv_store to use. (int)
    """
    def _load_all_buckets(self, server, kv_gen, op_type, exp, kv_store=1, flag=0, only_store_hash=True, batch_size=1, pause_secs=1, timeout_secs=30):
        tasks = self._async_load_all_buckets(server, kv_gen, op_type, exp, kv_store, flag, only_store_hash, batch_size, pause_secs, timeout_secs)
        for task in tasks:
            task.result()

    """Waits for queues to drain on all servers and buckets in a cluster.

    A utility function that waits for all of the items loaded to be persisted
    and replicated.

    Args:
        servers - A list of all of the servers in the cluster. ([TestInputServer])
    """
    def _wait_for_stats_all_buckets(self, servers):
        tasks = []
        for server in servers:
            for bucket in self.buckets:
                tasks.append(self.cluster.async_wait_for_stats([server], bucket, '',
                                   'ep_queue_size', '==', 0))
                tasks.append(self.cluster.async_wait_for_stats([server], bucket, '',
                                   'ep_flusher_todo', '==', 0))
        for task in tasks:
            task.result()

    """Verifies data on all of the nodes in a cluster.

    Verifies all of the data in a specific kv_store index for all buckets in
    the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_store - The kv store index to check. (int)
    """
    def _verify_all_buckets(self, server, kv_store=1, timeout=180, max_verify=None, only_store_hash=True, batch_size=1):
        tasks = []
        for bucket in self.buckets:
            tasks.append(self.cluster.async_verify_data(server, bucket, bucket.kvs[kv_store], max_verify, only_store_hash, batch_size))
        for task in tasks:
            task.result(timeout)


    def disable_compaction(self, server=None, bucket="default"):

        server = server or self.servers[0]
        new_config = {"viewFragmntThresholdPercentage" : None,
                      "dbFragmentThresholdPercentage" :  None,
                      "dbFragmentThreshold" : None,
                      "viewFragmntThreshold" : None}
        self.cluster.modify_fragmentation_config(server, new_config, bucket)

    def async_create_views(self, server, design_doc_name, views, bucket="default"):
        tasks = []
        if len(views):
            for view in views:
                t_ = self.cluster.async_create_view(server, design_doc_name, view, bucket)
                tasks.append(t_)
        else:
            t_ = self.cluster.async_create_view(server, design_doc_name, None, bucket)
            tasks.append(t_)
        return tasks

    def create_views(self, server, design_doc_name, views, bucket="default", timeout=None):
        if len(views):
            for view in views:
                self.cluster.create_view(server, design_doc_name, view, bucket, timeout)
        else:
            self.cluster.create_view(server, design_doc_name, None, bucket, timeout)

    def make_default_views(self, prefix, count, is_dev_ddoc=False):
        ref_view = self.default_view
        ref_view.name = (prefix, ref_view.name)[prefix is None]
        return [View(ref_view.name + str(i), ref_view.map_func, None, is_dev_ddoc) for i in xrange(count)]

    def _load_doc_data_all_buckets(self, data_op="create"):
        #initialize the template for document generator
        age = range(5)
        first = ['james', 'sharon']
        template = '{{ "age": {0}, "first_name": "{1}" }}'
        gen_load = DocumentGenerator('test_docs', template, age, first, start=0, end=self.num_items)

        self.log.info("%s %s documents..." % (data_op, self.num_items))
        self._load_all_buckets(self.master, gen_load, data_op, 0)

    #returns true if warmup is completed in wait_time sec,
    #otherwise return false
    @staticmethod
    def _wait_warmup_completed(self, servers, bucket_name, wait_time=300):
        warmed_up = False
        log = logger.Logger.get_logger()
        for server in servers:
            mc = None
            start = time.time()
            # Try to get the stats for 5 minutes, else hit out.
            while time.time() - start < wait_time:
                # Get the wamrup time for each server
                try:
                    mc = MemcachedClientHelper.direct_client(server, bucket_name)
                    stats = mc.stats()
                    if stats is not None:
                        warmup_time = int(stats["ep_warmup_time"])
                        log.info("ep_warmup_time is %s " % warmup_time)
                        log.info(
                            "Collected the stats %s for server %s:%s" % (stats["ep_warmup_time"], server.ip,
                                server.port))
                        break
                    else:
                        log.info(" Did not get the stats from the server yet, trying again.....")
                        time.sleep(2)
                except Exception as e:
                    log.error(
                        "Could not get warmup_time stats from server %s:%s, exception %s" % (server.ip,
                            server.port, e))
            else:
                self.fail(
                    "Fail! Unable to get the warmup-stats from server %s:%s after trying for %s seconds." % (
                        server.ip, server.port, wait_time))

            # Waiting for warm-up
            start = time.time()
            warmed_up = False
            while time.time() - start < wait_time and not warmed_up:
                if mc.stats()["ep_warmup_thread"] == "complete":
                    log.info("warmup completed, awesome!!! Warmed up. %s items " % (mc.stats()["curr_items_tot"]))
                    warmed_up = True
                    continue
                elif mc.stats()["ep_warmup_thread"] == "running":
                    log.info(
                                "still warming up .... curr_items_tot : %s" % (mc.stats()["curr_items_tot"]))
                else:
                    fail("Value of ep warmup thread does not exist, exiting from this server")
                time.sleep(5)
            mc.close()
        return warmed_up
Example #6
0
class FailoverBaseTest(unittest.TestCase):
    @staticmethod
    def setUp(self):
        log = logger.Logger.get_logger()
        self._input = TestInputSingleton.input
        self._keys_count = self._input.param("keys_count", DEFAULT_KEY_COUNT)
        self._num_replicas = self._input.param("replica", DEFAULT_REPLICA)
        self.bidirectional = self._input.param("bidirectional", False)
        self.case_number = self._input.param("case_number", 0)
        self._value_size = self._input.param("value_size", 256)
        self.wait_timeout = self._input.param("wait_timeout", 60)
        self._servers = self._input.servers
        self.master = self._servers[0]
        self._failed_nodes = []
        num_buckets = 0
        self.buckets = []
        self.default_bucket = self._input.param("default_bucket", True)
        if self.default_bucket:
            self.default_bucket_name = "default"
            num_buckets += 1
        self._standard_buckets = self._input.param("standard_buckets", 0)
        self._sasl_buckets = self._input.param("sasl_buckets", 0)
        num_buckets += self._standard_buckets + self._sasl_buckets
        self.dgm_run = self._input.param("dgm_run", True)
        self.log = logger.Logger().get_logger()
        self._cluster_helper = Cluster()
        self.disabled_consistent_view = self._input.param(
            "disabled_consistent_view", None)
        self._quota = self._initialize_nodes(self._cluster_helper,
                                             self._servers,
                                             self.disabled_consistent_view)
        if self.dgm_run:
            self.quota = 256
        self.bucket_size = int(
            (2.0 / 3.0) / float(num_buckets) * float(self._quota))
        self.gen_create = BlobGenerator('loadOne',
                                        'loadOne_',
                                        self._value_size,
                                        end=self._keys_count)
        self.add_back_flag = False
        self._cleanup_nodes = []
        log.info("==============  setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
        RemoteUtilHelper.common_basic_setup(self._servers)
        BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
        for server in self._servers:
            ClusterOperationHelper.cleanup_cluster([server])
        ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self)
        self._setup_cluster()
        self._create_buckets_()
        log.info("==============  setup was finished for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))

    @staticmethod
    def tearDown(self):
        try:
            self._cluster_helper.shutdown()
            log = logger.Logger.get_logger()
            log.info("==============  tearDown was started for test #{0} {1} =============="\
                              .format(self.case_number, self._testMethodName))
            RemoteUtilHelper.common_basic_setup(self._servers)
            log.info("10 seconds delay to wait for membase-server to start")
            time.sleep(10)
            for server in self._cleanup_nodes:
                shell = RemoteMachineShellConnection(server)
                o, r = shell.execute_command("iptables -F")
                shell.log_command_output(o, r)
                o, r = shell.execute_command(
                    "/sbin/iptables -A INPUT -p tcp -i eth0 --dport 1000:60000 -j ACCEPT"
                )
                shell.log_command_output(o, r)
                o, r = shell.execute_command(
                    "/sbin/iptables -A OUTPUT -p tcp -o eth0 --dport 1000:60000 -j ACCEPT"
                )
                shell.log_command_output(o, r)
                o, r = shell.execute_command(
                    "/etc/init.d/couchbase-server start")
                shell.log_command_output(o, r)
                shell.disconnect()
            BucketOperationHelper.delete_all_buckets_or_assert(
                self._servers, self)
            ClusterOperationHelper.cleanup_cluster(self._servers)
            ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self)
            log.info("==============  tearDown was finished for test #{0} {1} =============="\
                              .format(self.case_number, self._testMethodName))
        finally:
            pass

    def _initialize_nodes(self,
                          cluster,
                          servers,
                          disabled_consistent_view=None):
        quota = 0
        init_tasks = []
        for server in servers:
            init_tasks.append(
                cluster.async_init_node(server, disabled_consistent_view))
        for task in init_tasks:
            node_quota = task.result()
            if node_quota < quota or quota == 0:
                quota = node_quota
        return quota

    def _setup_cluster(self):
        rest = RestConnection(self.master)
        credentials = self._input.membase_settings
        ClusterOperationHelper.add_all_nodes_or_assert(self.master,
                                                       self._servers,
                                                       credentials, self)
        nodes = rest.node_statuses()
        rest.rebalance(otpNodes=[node.id for node in nodes], ejectedNodes=[])
        msg = "rebalance failed after adding these nodes {0}".format(nodes)
        self.assertTrue(rest.monitorRebalance(), msg=msg)

    def _create_buckets_(self):
        if self.default_bucket:
            self._cluster_helper.create_default_bucket(self.master,
                                                       self.bucket_size,
                                                       self._num_replicas)
            self.buckets.append(
                Bucket(name="default",
                       authType="sasl",
                       saslPassword="",
                       num_replicas=self._num_replicas,
                       bucket_size=self.bucket_size))

        self._create_sasl_buckets(self.master, self._sasl_buckets)
        self._create_standard_buckets(self.master, self._standard_buckets)

    def _create_sasl_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'bucket' + str(i)
            bucket_tasks.append(
                self._cluster_helper.async_create_sasl_bucket(
                    server, name, 'password', self.bucket_size,
                    self._num_replicas))
            self.buckets.append(
                Bucket(name=name,
                       authType="sasl",
                       saslPassword='******',
                       num_replicas=self._num_replicas,
                       bucket_size=self.bucket_size))
        for task in bucket_tasks:
            task.result()

    def _create_standard_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'standard_bucket' + str(i)
            bucket_tasks.append(
                self._cluster_helper.async_create_standard_bucket(
                    server, name, 11214 + i, self.bucket_size,
                    self._num_replicas))

            self.buckets.append(
                Bucket(name=name,
                       authType=None,
                       saslPassword=None,
                       num_replicas=self._num_replicas,
                       bucket_size=self.bucket_size,
                       port=11214 + i))
        for task in bucket_tasks:
            task.result()

    def _async_load_all_buckets(self,
                                server,
                                kv_gen,
                                op_type,
                                exp,
                                kv_store=1,
                                flag=0,
                                only_store_hash=True,
                                batch_size=1,
                                pause_secs=1,
                                timeout_secs=30):
        tasks = []
        for bucket in self.buckets:
            gen = copy.deepcopy(kv_gen)
            tasks.append(
                self._cluster_helper.async_load_gen_docs(
                    server, bucket.name, gen, bucket.kvs[kv_store], op_type,
                    exp, flag, only_store_hash, batch_size, pause_secs,
                    timeout_secs))
        return tasks

    def _load_all_buckets(self,
                          server,
                          kv_gen,
                          op_type,
                          exp,
                          kv_store=1,
                          flag=0,
                          only_store_hash=True,
                          batch_size=1,
                          pause_secs=1,
                          timeout_secs=30):
        tasks = self._async_load_all_buckets(server, kv_gen, op_type, exp,
                                             kv_store, flag, only_store_hash,
                                             batch_size, pause_secs,
                                             timeout_secs)
        for task in tasks:
            task.result()

    def _wait_for_stats_all_buckets(self, servers):
        tasks = []
        for server in servers:
            for bucket in self.buckets:
                tasks.append(
                    self._cluster_helper.async_wait_for_stats([server], bucket,
                                                              '',
                                                              'ep_queue_size',
                                                              '==', 0))
                tasks.append(
                    self._cluster_helper.async_wait_for_stats(
                        [server], bucket, '', 'ep_flusher_todo', '==', 0))
        for task in tasks:
            task.result()

    def _wait_for_replication(self, servers, timeout=600):
        tasks = []
        for server in servers:
            for bucket in self.buckets:
                for server_repl in list(set(servers) - set([server])):
                    tasks.append(
                        self._cluster_helper.async_wait_for_stats(
                            [server], bucket, 'tap',
                            'eq_tapq:replication_ns_1@' + server_repl.ip +
                            ':idle', '==', 'true'))
                    tasks.append(
                        self._cluster_helper.async_wait_for_stats(
                            [server], bucket, 'tap',
                            'eq_tapq:replication_ns_1@' + server_repl.ip +
                            ':backfill_completed', '==', 'true'))
        for task in tasks:
            task.result(timeout)

    def _verify_all_buckets(self,
                            server,
                            kv_store=1,
                            timeout=180,
                            max_verify=None,
                            only_store_hash=True,
                            batch_size=1):
        tasks = []
        for bucket in self.buckets:
            tasks.append(
                self._cluster_helper.async_verify_data(server, bucket,
                                                       bucket.kvs[kv_store],
                                                       max_verify,
                                                       only_store_hash,
                                                       batch_size))
        for task in tasks:
            task.result(timeout)

    def _verify_stats_all_buckets(self, servers):
        stats_tasks = []
        for bucket in self.buckets:
            items = sum([len(kv_store) for kv_store in bucket.kvs.values()])
            stats_tasks.append(
                self._cluster_helper.async_wait_for_stats(
                    servers, bucket, '', 'curr_items', '==', items))
            stats_tasks.append(
                self._cluster_helper.async_wait_for_stats(
                    servers, bucket, '', 'vb_active_curr_items', '==', items))

            available_replicas = self._num_replicas
            if len(servers) == self._num_replicas:
                available_replicas = len(servers) - 1
            elif len(servers) <= self._num_replicas:
                available_replicas = len(servers) - 1

            stats_tasks.append(
                self._cluster_helper.async_wait_for_stats(
                    servers, bucket, '', 'vb_replica_curr_items', '==',
                    items * available_replicas))
            stats_tasks.append(
                self._cluster_helper.async_wait_for_stats(
                    servers, bucket, '', 'curr_items_tot', '==',
                    items * (available_replicas + 1)))

        for task in stats_tasks:
            task.result(60)
Example #7
0
class XDCRBaseTest(unittest.TestCase):
    def setUp(self):
        try:
            self._log = logger.Logger.get_logger()
            self._input = TestInputSingleton.input
            self._init_parameters()
            self._cluster_helper = Cluster()
            self._log.info("==============  XDCRbasetests setup was started for test #{0} {1}=============="\
                .format(self._case_number, self._testMethodName))
            if not self._input.param("skip_cleanup", False):
                self._cleanup_previous_setup()

            self._init_clusters(self._disabled_consistent_view)
            self.setup_extended()
            self._log.info("==============  XDCRbasetests setup was finished for test #{0} {1} =============="\
                .format(self._case_number, self._testMethodName))
            self._log_start(self)
        except  Exception as e:
            self._log.error(e.message)
            self._log.error("Error while setting up clusters: %s", sys.exc_info())
            self._cleanup_broken_setup()
            raise

    def tearDown(self):
        try:
            self._log.info("==============  XDCRbasetests cleanup was started for test #{0} {1} =============="\
                .format(self._case_number, self._testMethodName))
            self.teardown_extended()
            self._do_cleanup()
            self._log.info("==============  XDCRbasetests cleanup was finished for test #{0} {1} =============="\
                .format(self._case_number, self._testMethodName))
        finally:
            self._cluster_helper.shutdown()
            self._log_finish(self)

    def _cleanup_previous_setup(self):
        self.teardown_extended()
        self._do_cleanup()

    def _init_parameters(self):
        self._log.info("Initializing input parameters started...")
        self._clusters_dic = self._input.clusters # clusters is declared as dic in TestInput which is unordered.
        self._clusters_keys_olst = range(
            len(self._clusters_dic)) #clusters are populated in the dic in testrunner such that ordinal is the key.
        #orderedDic cannot be used in order to maintain the compability with python 2.6
        self._cluster_counter_temp_int = 0
        self._cluster_names_dic = self._get_cluster_names()
        self._servers = self._input.servers
        self._disabled_consistent_view = self._input.param("disabled_consistent_view", True)
        self._floating_servers_set = self._get_floating_servers() # These are the servers defined in .ini file but not linked to any cluster.
        self._cluster_counter_temp_int = 0 #TODO: fix the testrunner code to pass cluster name in params.
        self._buckets = []

        self._default_bucket = self._input.param("default_bucket", True)

        """
        ENTER: sasl_buckets=[no.] or standard_buckets=[no.]
        """
        self._standard_buckets = self._input.param("standard_buckets", 0)
        self._sasl_buckets = self._input.param("sasl_buckets", 0)

        if self._default_bucket:
            self.default_bucket_name = "default"

        self._num_replicas = self._input.param("replicas", 1)
        self._num_items = self._input.param("items", 1000)
        self._value_size = self._input.param("value_size", 256)
        self._dgm_run_bool = self._input.param("dgm_run", False)
        self._mem_quota_int = 0 # will be set in subsequent methods

        self._poll_interval = self._input.param(XDCRConstants.INPUT_PARAM_POLL_INTERVAL, 5)
        self._poll_timeout = self._input.param(XDCRConstants.INPUT_PARAM_POLL_TIMEOUT, 120)

        self.init_parameters_extended()

        self._doc_ops = self._input.param("doc-ops", None)
        if self._doc_ops is not None:
            self._doc_ops = self._doc_ops.split("-")
        self._doc_ops_dest = self._input.param("doc-ops-dest", None)
        # semi-colon separator is not accepted for some reason here
        if self._doc_ops_dest is not None:
            self._doc_ops_dest = self._doc_ops_dest.split("-")

        self._case_number = self._input.param("case_number", 0)
        self._expires = self._input.param("expires", 0)
        self._timeout = self._input.param("timeout", 60)
        self._percent_update = self._input.param("upd", 30)
        self._percent_delete = self._input.param("del", 30)
        self._warmup = self._input.param("warm", "all")
        self._failover = self._input.param("failover", None)
        self._rebalance = self._input.param("rebalance", None)
        if self._failover is not None:
            self._failover = self._failover.split("-")
        if self._rebalance is not None:
            self._rebalance = self._rebalance.split("-")
            self._num_rebalance = self._input.param("num_rebalance", 1)


        """
        CREATE's a set of items,
        UPDATE's UPD% of the items starting from 0,
        DELETE's DEL% of the items starting from the end (count(items)).
        """
        self.gen_create = BlobGenerator('loadOne', 'loadOne', self._value_size, end=self._num_items)
        self.gen_delete = BlobGenerator('loadOne', 'loadOne-', self._value_size,
            start=int((self._num_items) * (float)(100 - self._percent_delete) / 100), end=self._num_items)
        self.gen_update = BlobGenerator('loadOne', 'loadOne-', self._value_size, start=0,
            end=int(self._num_items * (float)(self._percent_update) / 100))


        self.ord_keys = self._clusters_keys_olst
        self.ord_keys_len = len(self.ord_keys)

        self.src_nodes = self._clusters_dic[0]
        self.src_master = self.src_nodes[0]

        self.dest_nodes = self._clusters_dic[1]
        self.dest_master = self.dest_nodes[0]

        self._defaul_map_func = "function (doc) {\n  emit(doc._id, doc);\n}"
        self._default_view_name = "default_view"
        self._default_view = View(self._default_view_name, self._defaul_map_func, None)
        self._num_views = self._input.param("num_views", 5)
        self._is_dev_ddoc = self._input.param("is_dev_ddoc", True)

        self.fragmentation_value = self._input.param("fragmentation_value", 80)
        self.disable_src_comp = self._input.param("disable_src_comp", True)
        self.disable_dest_comp = self._input.param("disable_dest_comp", True)

        self._log.info("Initializing input parameters completed.")

    @staticmethod
    def _log_start(self):
        try:
            msg = "{0} : {1} started ".format(datetime.datetime.now(), self._testMethodName)
            RestConnection(self.src_master[0]).log_client_error(msg)
            RestConnection(self.dest_master[0]).log_client_error(msg)
        except:
            pass

    @staticmethod
    def _log_finish(self):
        try:
            msg = "{0} : {1} finished ".format(datetime.datetime.now(), self._testMethodName)
            RestConnection(self.src_master[0]).log_client_error(msg)
            RestConnection(self.dest_master[0]).log_client_error(msg)
        except:
            pass

    def _get_floating_servers(self):
        cluster_nodes = []
        floating_servers = self._servers

        for key, node in self._clusters_dic.items():
            cluster_nodes.extend(node)

        for c_node in cluster_nodes:
            for node in floating_servers:
                if node.ip in str(c_node) and node.port in str(c_node):
                    floating_servers.remove(node)

        return floating_servers


    def _init_clusters(self, disabled_consistent_view=None):
        for key in self._clusters_keys_olst:
            self._setup_cluster(self._clusters_dic[key], disabled_consistent_view)

    # This method shall be overridden in case there are parameters that need to be initialized.
    def init_parameters_extended(self):
        pass

    # This method shall be overridden in case there are custom steps involved during setup.
    def setup_extended(self):
        pass

    # This method shall be overridden in case there are custom steps involved during teardown.
    def teardown_extended(self):
        pass

    def _do_cleanup(self):
        for key in self._clusters_keys_olst:
            nodes = self._clusters_dic[key]
            BucketOperationHelper.delete_all_buckets_or_assert(nodes, self)
            ClusterOperationHelper.cleanup_cluster(nodes)
            ClusterOperationHelper.wait_for_ns_servers_or_assert(nodes, self)

    def _cleanup_broken_setup(self):
        try:
            self.tearDown()
        except:
            self._log.info("Error while cleaning broken setup.")

    def _get_cluster_names(self):
        cs_names = {}
        for key in self._clusters_keys_olst:
            cs_names[key] = "cluster{0}".format(self._cluster_counter_temp_int)
            self._cluster_counter_temp_int += 1
        return cs_names

    def _setup_cluster(self, nodes, disabled_consistent_view=None):
        self._init_nodes(nodes, disabled_consistent_view)
        self._config_cluster(nodes)
        self._create_buckets(nodes)

    def _init_nodes(self, nodes, disabled_consistent_view=None):
        _tasks = []
        for node in nodes:
            _tasks.append(self._cluster_helper.async_init_node(node, disabled_consistent_view))
        for task in _tasks:
            mem_quota_node = task.result()
            if mem_quota_node < self._mem_quota_int or self._mem_quota_int == 0:
                self._mem_quota_int = mem_quota_node

    def _create_sasl_buckets(self, server, server_id, bucket_size):
        bucket_tasks = []
        for i in range(self._sasl_buckets):
            name = "sasl_bucket_" + str(i + 1)
            bucket_tasks.append(self._cluster_helper.async_create_sasl_bucket(server, name, 'password',
                bucket_size, self._num_replicas))
            self._buckets.append(Bucket(name=name, authType="sasl", saslPassword="******",
                num_replicas=self._num_replicas, bucket_size=bucket_size, master_id=server_id))

        for task in bucket_tasks:
            task.result()

    def _create_standard_buckets(self, server, server_id, bucket_size):
        bucket_tasks = []
        for i in range(self._standard_buckets):
            name = "standard_bucket_" + str(i + 1)
            bucket_tasks.append(self._cluster_helper.async_create_standard_bucket(server, name,
                11214 + i, bucket_size, self._num_replicas))
            self._buckets.append(Bucket(name=name, authType=None, saslPassword=None,
                num_replicas=self._num_replicas, bucket_size=bucket_size, master_id=server_id))

        for task in bucket_tasks:
            task.result()

    def _create_buckets(self, nodes):
        if self._dgm_run_bool:
            self._mem_quota_int = 256
        master_node = nodes[0]
        bucket_size = self._get_bucket_size(master_node, nodes, self._mem_quota_int, self._default_bucket)
        rest = RestConnection(master_node)
        master_id = rest.get_nodes_self().id

        if self._sasl_buckets > 0:
            self._create_sasl_buckets(master_node, master_id, bucket_size)
        if self._standard_buckets > 0:
            self._create_standard_buckets(master_node, master_id, bucket_size)
        if self._default_bucket:
            self._cluster_helper.create_default_bucket(master_node, bucket_size, self._num_replicas)
            self._buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
                num_replicas=self._num_replicas, bucket_size=bucket_size, master_id=master_id))

    def _config_cluster(self, nodes):
        task = self._cluster_helper.async_rebalance(nodes, nodes[1:], [])
        task.result()

    def _get_bucket_size(self, master_node, nodes, mem_quota, num_buckets, ratio=2.0 / 3.0):
        for node in nodes:
            if node.ip == master_node.ip:
                return int(ratio / float(len(nodes)) / float(num_buckets) * float(mem_quota))
        return int(ratio / float(num_buckets) * float(mem_quota))


    def _poll_for_condition(self, condition):
        timeout = self._poll_timeout
        interval = self._poll_interval
        num_itr = timeout / interval
        return self._poll_for_condition_rec(condition, interval, num_itr)

    def _poll_for_condition_rec(self, condition, sleep, num_itr):
        if num_itr == 0:
            return False
        else:
            if condition():
                return True
            else:
                time.sleep(sleep)
                return self._poll_for_condition_rec(condition, sleep, (num_itr - 1))

    def do_a_warm_up(self, node):
        shell = RemoteMachineShellConnection(node)
        shell.stop_couchbase()
        time.sleep(5)
        shell.start_couchbase()
        shell.disconnect()

    def _get_cluster_buckets(self, master_server):
        rest = RestConnection(master_server)
        master_id = rest.get_nodes_self().id
        #verify if node_ids were changed for cluster_run
        for bucket in self._buckets:
            if ("127.0.0.1" in bucket.master_id and "127.0.0.1" not in master_id) or \
               ("localhost" in bucket.master_id and "localhost" not in master_id):
                new_ip = master_id[master_id.index("@") + 1:]
                bucket.master_id = bucket.master_id.replace("127.0.0.1", new_ip).\
                replace("localhost", new_ip)
        return [bucket for bucket in self._buckets if bucket.master_id == master_id]


    """merge 2 different kv strores from different clsusters/buckets
       assume that all elements in the second kvs are more relevant.

    Returns:
            merged kvs, that we expect to get on both clusters
    """
    def merge_keys(self, kv_store_first, kv_store_second, kvs_num=1):
        valid_keys_first, deleted_keys_first = kv_store_first[kvs_num].key_set()
        valid_keys_second, deleted_keys_second = kv_store_second[kvs_num].key_set()

        for key in valid_keys_second:
            #replace the values for each key in first kvs if the keys are presented in second one
            if key in valid_keys_first:
                partition1 = kv_store_first[kvs_num].acquire_partition(key)
                partition2 = kv_store_second[kvs_num].acquire_partition(key)
                partition1.set(key, partition2.get_key(key))
                kv_store_first[1].release_partition(key)
                kv_store_second[1].release_partition(key)
            #add keys/values in first kvs if the keys are presented only in second one
            else:
                partition1, num_part = kv_store_first[kvs_num].acquire_random_partition()
                partition2 = kv_store_second[kvs_num].acquire_partition(key)
                partition1.set(key, partition2.get_key(key))
                kv_store_first[kvs_num].release_partition(num_part)
                kv_store_second[kvs_num].release_partition(key)
            #add condition when key was deleted in first, but added in second

        for key in deleted_keys_second:
            # the same keys were deleted in both kvs
            if key in deleted_keys_first:
                pass
            # add deleted keys to first kvs if the where deleted only in second kvs
            else:
                partition1 = kv_store_first[kvs_num].acquire_partition(key)
                partition2 = kv_store_second[kvs_num].acquire_partition(key)
                partition1.deleted[key] = partition2.get_key(key)
                kv_store_first[kvs_num].release_partition(key)
                kv_store_second[kvs_num].release_partition(key)
            # return merged kvs, that we expect to get on both clusters
        return kv_store_first[kvs_num]

    def merge_buckets(self, src_master, dest_master, bidirection=True):
        if self._cluster_topology_str == XDCRConstants.CLUSTER_TOPOLOGY_TYPE_CHAIN:
            self.do_merge_buckets(src_master, dest_master, bidirection)
        elif self._cluster_topology_str == XDCRConstants.CLUSTER_TOPOLOGY_TYPE_STAR:
            for i in range(1, len(self._clusters_dic)):
                dest_cluster = self._clusters_dic[i]
                self.do_merge_buckets(src_master, dest_cluster[0], bidirection)

    def do_merge_buckets(self, src_master, dest_master, bidirection):
        src_buckets = self._get_cluster_buckets(src_master)
        dest_buckets = self._get_cluster_buckets(dest_master)
        for src_bucket in src_buckets:
            for dest_bucket in dest_buckets:
                if src_bucket.name == dest_bucket.name:
                    if bidirection:
                        src_bucket.kvs[1] = self.merge_keys(src_bucket.kvs, dest_bucket.kvs, kvs_num=1)
                    dest_bucket.kvs[1] = src_bucket.kvs[1]

        """Verify the stats at the destination cluster
        1. Data Validity check - using kvstore-node key-value check
        2. Item count check on source versus destination
        3. For deleted and updated items, check the CAS/SeqNo/Expiry/Flags for same key on source/destination
        * Make sure to call expiry_pager function to flush out temp items(deleted/expired items)"""
    def verify_xdcr_stats(self, src_nodes, dest_nodes, verify_src=False):
        if self._num_items in range(0, 10000):
            timeout = 120
        elif self._num_items in range(10000, 50000):
            timeout = 300
        elif self._num_items in range(50000, 100000):
            timeout = 500
        elif self._num_items >= 100000:
            timeout = 600

        if self._failover is not None or self._rebalance is not None:
            timeout *= 3 / 2

        #for verification src and dest clusters need more time
        if verify_src:
            timeout *= 3 / 2

        end_time = time.time() + timeout
        self._log.info("Verify xdcr replication stats at Destination Cluster : {0}".format(self.dest_nodes[0].ip))
        if verify_src:
            timeout = max(120, end_time - time.time())
            self._wait_for_stats_all_buckets(self.src_nodes, timeout=timeout)
        timeout = max(120, end_time - time.time())
        self._wait_for_stats_all_buckets(self.dest_nodes, timeout=timeout)
        self._expiry_pager(self.src_nodes[0])
        self._expiry_pager(self.dest_nodes[0])
        if verify_src:
            timeout = max(120, end_time - time.time())
            self._verify_stats_all_buckets(self.src_nodes, timeout=timeout)
            timeout = max(120, end_time - time.time())
            self._verify_all_buckets(self.src_master, timeout=timeout)
        timeout = max(120, end_time - time.time())
        self._verify_stats_all_buckets(self.dest_nodes, timeout=timeout)
        timeout = max(120, end_time - time.time())
        self._verify_all_buckets(self.dest_master, timeout=timeout)

        errors_caught = 0
        if self._doc_ops is not None or self._doc_ops_dest is not None:
            if "update" in self._doc_ops or (self._doc_ops_dest is not None and "update" in self._doc_ops_dest):
                errors_caught = self._verify_revIds(self.src_nodes[0], self.dest_nodes[0], "update")

            if "delete" in self._doc_ops or (self._doc_ops_dest is not None and "delete" in self._doc_ops_dest):
                errors_caught = self._verify_revIds(self.src_nodes[0], self.dest_nodes[0], "delete")

        if errors_caught > 0:
            self.fail("Mismatches on Meta Information on xdcr-replicated items!")

    def verify_results(self, verify_src=False):
        # Checking replication at destination clusters
        dest_key_index = 1
        for key in self.ord_keys[1:]:
            if dest_key_index == self.ord_keys_len:
                break
            dest_key = self.ord_keys[dest_key_index]
            self.dest_nodes = self._clusters_dic[dest_key]

            self.verify_xdcr_stats(self.src_nodes, self.dest_nodes, verify_src)
            dest_key_index += 1

    def wait_warmup_completed(self, warmupnodes, bucket_names=["default"]):
        if isinstance(bucket_names, str):
            bucket_names = [bucket_names]
        for server in warmupnodes:
            for bucket in bucket_names:
                mc = MemcachedClientHelper.direct_client(server, bucket)
                start = time.time()
                while time.time() - start < 150:
                    if mc.stats()["ep_warmup_thread"] == "complete":
                        self._log.info("Warmed up: %s items " % (mc.stats()["curr_items_tot"]))
                        time.sleep(10)
                        break
                    elif mc.stats()["ep_warmup_thread"] == "running":
                        self._log.info(
                            "Still warming up .. curr_items_tot : %s" % (mc.stats()["curr_items_tot"]))
                        continue
                    else:
                        self._log.info("Value of ep_warmup_thread does not exist, exiting from this server")
                        break
                if mc.stats()["ep_warmup_thread"] == "running":
                    self._log.info("ERROR: ep_warmup_thread's status not complete")
                mc.close


    def _modify_src_data(self):
        """Setting up creates/updates/deletes at source nodes"""

        if self._doc_ops is not None:
            if "create" in self._doc_ops:
                self._load_all_buckets(self.src_master, self.gen_create, "create", 0)
            if "update" in self._doc_ops:
                self._load_all_buckets(self.src_master, self.gen_update, "update", self._expires)
            if "delete" in self._doc_ops:
                self._load_all_buckets(self.src_master, self.gen_delete, "delete", 0)
            self._wait_for_stats_all_buckets(self.src_nodes)

    def disable_compaction(self, server=None, bucket="default"):
        server = server or self.src_master
        new_config = {"viewFragmntThresholdPercentage" : None,
                      "dbFragmentThresholdPercentage" :  None,
                      "dbFragmentThreshold" : None,
                      "viewFragmntThreshold" : None}
        self._cluster_helper.modify_fragmentation_config(server, new_config, bucket)

    def make_default_views(self, prefix, count, is_dev_ddoc=False,):
        ref_view = self._default_view
        ref_view.name = (prefix, ref_view.name)[prefix is None]
        return [View(ref_view.name + str(i), ref_view.map_func, None, is_dev_ddoc) for i in xrange(count)]


    def async_create_views(self, server, design_doc_name, views, bucket="default"):
        tasks = []
        if len(views):
            for view in views:
                t_ = self._cluster_helper.async_create_view(server, design_doc_name, view, bucket)
                tasks.append(t_)
        else:
            t_ = self._cluster_helper.async_create_view(server, design_doc_name, None, bucket)
            tasks.append(t_)
        return tasks
Example #8
0
class FailoverBaseTest(unittest.TestCase):

    @staticmethod
    def setUp(self):
        log = logger.Logger.get_logger()
        self._input = TestInputSingleton.input
        self._keys_count = self._input.param("keys_count", DEFAULT_KEY_COUNT)
        self._num_replicas = self._input.param("replica", DEFAULT_REPLICA)
        self.bidirectional = self._input.param("bidirectional", False)
        self.case_number = self._input.param("case_number", 0)
        self._value_size = self._input.param("value_size", 256)
        self.wait_timeout = self._input.param("wait_timeout", 60)
        self._servers = self._input.servers
        self.master = self._servers[0]
        self._failed_nodes = []
        num_buckets = 0
        self.buckets = []
        self.default_bucket = self._input.param("default_bucket", True)
        if self.default_bucket:
            self.default_bucket_name = "default"
            num_buckets += 1
        self._standard_buckets = self._input.param("standard_buckets", 0)
        self._sasl_buckets = self._input.param("sasl_buckets", 0)
        num_buckets += self._standard_buckets + self._sasl_buckets
        self.dgm_run = self._input.param("dgm_run", True)
        self.log = logger.Logger().get_logger()
        self._cluster_helper = Cluster()
        self.disabled_consistent_view = self._input.param("disabled_consistent_view", None)
        self._quota = self._initialize_nodes(self._cluster_helper, self._servers, self.disabled_consistent_view)
        if self.dgm_run:
            self.quota = 256
        self.bucket_size = int((2.0 / 3.0) / float(num_buckets) * float(self._quota))
        self.gen_create = BlobGenerator('loadOne', 'loadOne_', self._value_size, end=self._keys_count)
        self.add_back_flag = False
        self._cleanup_nodes = []
        log.info("==============  setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
        RemoteUtilHelper.common_basic_setup(self._servers)
        BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
        for server in self._servers:
            ClusterOperationHelper.cleanup_cluster([server])
        ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self)
        self._setup_cluster()
        self._create_buckets_()
        log.info("==============  setup was finished for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))

    @staticmethod
    def tearDown(self):
        try:
            self._cluster_helper.shutdown()
            log = logger.Logger.get_logger()
            log.info("==============  tearDown was started for test #{0} {1} =============="\
                              .format(self.case_number, self._testMethodName))
            RemoteUtilHelper.common_basic_setup(self._servers)
            log.info("10 seconds delay to wait for membase-server to start")
            time.sleep(10)
            for server in self._cleanup_nodes:
                shell = RemoteMachineShellConnection(server)
                o, r = shell.execute_command("iptables -F")
                shell.log_command_output(o, r)
                o, r = shell.execute_command("/sbin/iptables -A INPUT -p tcp -i eth0 --dport 1000:60000 -j ACCEPT")
                shell.log_command_output(o, r)
                o, r = shell.execute_command("/sbin/iptables -A OUTPUT -p tcp -o eth0 --dport 1000:60000 -j ACCEPT")
                shell.log_command_output(o, r)
                o, r = shell.execute_command("/etc/init.d/couchbase-server start")
                shell.log_command_output(o, r)
                shell.disconnect()
            BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
            ClusterOperationHelper.cleanup_cluster(self._servers)
            ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self)
            log.info("==============  tearDown was finished for test #{0} {1} =============="\
                              .format(self.case_number, self._testMethodName))
        finally:
            pass

    def _initialize_nodes(self, cluster, servers, disabled_consistent_view=None):
        quota = 0
        init_tasks = []
        for server in servers:
            init_tasks.append(cluster.async_init_node(server, disabled_consistent_view))
        for task in init_tasks:
            node_quota = task.result()
            if node_quota < quota or quota == 0:
                quota = node_quota
        return quota

    def _setup_cluster(self):
        rest = RestConnection(self.master)
        credentials = self._input.membase_settings
        ClusterOperationHelper.add_all_nodes_or_assert(self.master, self._servers, credentials, self)
        nodes = rest.node_statuses()
        rest.rebalance(otpNodes=[node.id for node in nodes], ejectedNodes=[])
        msg = "rebalance failed after adding these nodes {0}".format(nodes)
        self.assertTrue(rest.monitorRebalance(), msg=msg)

    def _create_buckets_(self):
        if self.default_bucket:
            self._cluster_helper.create_default_bucket(self.master, self.bucket_size, self._num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
                                       num_replicas=self._num_replicas, bucket_size=self.bucket_size))

        self._create_sasl_buckets(self.master, self._sasl_buckets)
        self._create_standard_buckets(self.master, self._standard_buckets)

    def _create_sasl_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'bucket' + str(i)
            bucket_tasks.append(self._cluster_helper.async_create_sasl_bucket(server, name,
                                                                      'password',
                                                                      self.bucket_size,
                                                                      self._num_replicas))
            self.buckets.append(Bucket(name=name, authType="sasl", saslPassword='******',
                                       num_replicas=self._num_replicas, bucket_size=self.bucket_size));
        for task in bucket_tasks:
            task.result()

    def _create_standard_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'standard_bucket' + str(i)
            bucket_tasks.append(self._cluster_helper.async_create_standard_bucket(server, name,
                                                                          11214 + i,
                                                                          self.bucket_size,
                                                                          self._num_replicas))

            self.buckets.append(Bucket(name=name, authType=None, saslPassword=None, num_replicas=self._num_replicas,
                                       bucket_size=self.bucket_size, port=11214 + i));
        for task in bucket_tasks:
            task.result()

    def _async_load_all_buckets(self, server, kv_gen, op_type, exp, kv_store=1, flag=0, only_store_hash=True, batch_size=1, pause_secs=1, timeout_secs=30):
        tasks = []
        for bucket in self.buckets:
            gen = copy.deepcopy(kv_gen)
            tasks.append(self._cluster_helper.async_load_gen_docs(server, bucket.name, gen,
                                                          bucket.kvs[kv_store],
                                                          op_type, exp, flag, only_store_hash, batch_size, pause_secs, timeout_secs))
        return tasks

    def _load_all_buckets(self, server, kv_gen, op_type, exp, kv_store=1, flag=0, only_store_hash=True, batch_size=1, pause_secs=1, timeout_secs=30):
        tasks = self._async_load_all_buckets(server, kv_gen, op_type, exp, kv_store, flag, only_store_hash, batch_size, pause_secs, timeout_secs)
        for task in tasks:
            task.result()

    def _wait_for_stats_all_buckets(self, servers):
        tasks = []
        for server in servers:
            for bucket in self.buckets:
                tasks.append(self._cluster_helper.async_wait_for_stats([server], bucket, '',
                                   'ep_queue_size', '==', 0))
                tasks.append(self._cluster_helper.async_wait_for_stats([server], bucket, '',
                                   'ep_flusher_todo', '==', 0))
        for task in tasks:
            task.result()

    def _wait_for_replication(self, servers, timeout=600):
        tasks = []
        for server in servers:
            for bucket in self.buckets:
                for server_repl in list(set(servers) - set([server])):
                    tasks.append(self._cluster_helper.async_wait_for_stats([server], bucket, 'tap',
                                   'eq_tapq:replication_ns_1@' + server_repl.ip + ':idle', '==', 'true'))
                    tasks.append(self._cluster_helper.async_wait_for_stats([server], bucket, 'tap',
                                   'eq_tapq:replication_ns_1@' + server_repl.ip + ':backfill_completed', '==', 'true'))
        for task in tasks:
            task.result(timeout)


    def _verify_all_buckets(self, server, kv_store=1, timeout=180, max_verify=None, only_store_hash=True, batch_size=1):
        tasks = []
        for bucket in self.buckets:
            tasks.append(self._cluster_helper.async_verify_data(server, bucket, bucket.kvs[kv_store], max_verify, only_store_hash, batch_size))
        for task in tasks:
            task.result(timeout)

    def _verify_stats_all_buckets(self, servers):
        stats_tasks = []
        for bucket in self.buckets:
            items = sum([len(kv_store) for kv_store in bucket.kvs.values()])
            stats_tasks.append(self._cluster_helper.async_wait_for_stats(servers, bucket, '',
                               'curr_items', '==', items))
            stats_tasks.append(self._cluster_helper.async_wait_for_stats(servers, bucket, '',
                               'vb_active_curr_items', '==', items))

            available_replicas = self._num_replicas
            if len(servers) == self._num_replicas:
                available_replicas = len(servers) - 1
            elif len(servers) <= self._num_replicas:
                available_replicas = len(servers) - 1

            stats_tasks.append(self._cluster_helper.async_wait_for_stats(servers, bucket, '',
                                   'vb_replica_curr_items', '==', items * available_replicas))
            stats_tasks.append(self._cluster_helper.async_wait_for_stats(servers, bucket, '',
                                   'curr_items_tot', '==', items * (available_replicas + 1)))

        for task in stats_tasks:
            task.result(60)
Example #9
0
class BaseTestCase(unittest.TestCase):
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.buckets = []
        self.master = self.servers[0]
        self.cluster = Cluster()
        self.wait_timeout = self.input.param("wait_timeout", 60)
        #number of case that is performed from testrunner( increment each time)
        self.case_number = self.input.param("case_number", 0)
        self.default_bucket = self.input.param("default_bucket", True)
        if self.default_bucket:
            self.default_bucket_name = "default"
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
        self.num_servers = self.input.param("servers", len(self.servers))
        #initial number of items in the cluster
        self.nodes_init = self.input.param("nodes_init", 1)

        self.num_replicas = self.input.param("replicas", 1)
        self.num_items = self.input.param("items", 1000)
        self.dgm_run = self.input.param("dgm_run", False)
        #max items number to verify in ValidateDataTask, None - verify all
        self.max_verify = self.input.param("max_verify", None)
        #we don't change consistent_view on server by default
        self.disabled_consistent_view = self.input.param("disabled_consistent_view", None)
        self.log.info("==============  basetestcase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
        #avoid clean up if the previous test has been tear down
        if not self.input.param("skip_cleanup", True) or self.case_number == 1:
            self.tearDown()
            self.cluster = Cluster()
        if str(self.__class__).find('rebalanceout.RebalanceOutTests') != -1:
            #rebalance all nodes into the cluster before each test
            self.cluster.rebalance(self.servers[:self.num_servers], self.servers[1:self.num_servers], [])
        elif self.nodes_init > 1:
            self.cluster.rebalance(self.servers[:1], self.servers[1:self.nodes_init], [])
        self.quota = self._initialize_nodes(self.cluster, self.servers, self.disabled_consistent_view)
        if self.dgm_run:
            self.quota = 256
        if self.total_buckets > 0:
            self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)

        if self.default_bucket:
            self.cluster.create_default_bucket(self.master, self.bucket_size, self.num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
                                       num_replicas=self.num_replicas, bucket_size=self.bucket_size))

        self._create_sasl_buckets(self.master, self.sasl_buckets)
        self._create_standard_buckets(self.master, self.standard_buckets)
        self.log.info("==============  basetestcase setup was finished for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))
        self._log_start(self)

    def tearDown(self):
            try:
                if (hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \
                    and TestInputSingleton.input.param("stop-on-failure", False))\
                        or self.input.param("skip_cleanup", False):
                    self.log.warn("CLEANUP WAS SKIPPED")
                else:
                    self.log.info("==============  basetestcase cleanup was started for test #{0} {1} =============="\
                          .format(self.case_number, self._testMethodName))
                    rest = RestConnection(self.master)
                    alerts = rest.get_alerts()
                    if alerts is not None and len(alerts) != 0:
                        self.log.warn("Alerts were found: {0}".format(alerts))
                    if rest._rebalance_progress_status() == 'running':
                        self.log.warning("rebalancing is still running, test should be verified")
                        stopped = rest.stop_rebalance()
                        self.assertTrue(stopped, msg="unable to stop rebalance")
                    BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
                    ClusterOperationHelper.cleanup_cluster(self.servers)
                    time.sleep(10)
                    ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
                    self.log.info("==============  basetestcase cleanup was finished for test #{0} {1} =============="\
                          .format(self.case_number, self._testMethodName))
            finally:
                #stop all existing task manager threads
                self.cluster.shutdown()
                self._log_finish(self)

    @staticmethod
    def _log_start(self):
        try:
            msg = "{0} : {1} started ".format(datetime.datetime.now(), self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    @staticmethod
    def _log_finish(self):
        try:
            msg = "{0} : {1} finished ".format(datetime.datetime.now(), self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    def _initialize_nodes(self, cluster, servers, disabled_consistent_view=None):
        quota = 0
        init_tasks = []
        for server in servers:
            init_tasks.append(cluster.async_init_node(server, disabled_consistent_view))
        for task in init_tasks:
            node_quota = task.result()
            if node_quota < quota or quota == 0:
                quota = node_quota
        return quota

    def _get_bucket_size(self, quota, num_buckets, ratio=2.0 / 3.0):
        ip = self.servers[0]
        for server in self.servers:
            if server.ip == ip:
                return int(ratio / float(self.num_servers) / float(num_buckets) * float(quota))
        return int(ratio / float(num_buckets) * float(quota))

    def _create_sasl_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'bucket' + str(i)
            bucket_tasks.append(self.cluster.async_create_sasl_bucket(server, name,
                                                                      'password',
                                                                      self.bucket_size,
                                                                      self.num_replicas))
            self.buckets.append(Bucket(name=name, authType="sasl", saslPassword='******',
                                       num_replicas=self.num_replicas, bucket_size=self.bucket_size));
        for task in bucket_tasks:
            task.result()

    def _create_standard_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'standard_bucket' + str(i)
            bucket_tasks.append(self.cluster.async_create_standard_bucket(server, name,
                                                                          11214 + i,
                                                                          self.bucket_size,
                                                                          self.num_replicas))

            self.buckets.append(Bucket(name=name, authType=None, saslPassword=None, num_replicas=self.num_replicas,
                                       bucket_size=self.bucket_size, port=11214 + i));
        for task in bucket_tasks:
            task.result()

    def _all_buckets_delete(self, server):
        delete_tasks = []
        for bucket in self.buckets:
            delete_tasks.append(self.cluster.async_bucket_delete(server, bucket.name))

        for task in delete_tasks:
            task.result()
        self.buckets = []

    def _verify_stats_all_buckets(self, servers):
        stats_tasks = []
        for bucket in self.buckets:
            items = sum([len(kv_store) for kv_store in bucket.kvs.values()])
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                               'curr_items', '==', items))
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                               'vb_active_curr_items', '==', items))

            available_replicas = self.num_replicas
            if len(servers) == self.num_replicas:
                available_replicas = len(servers) - 1
            elif len(servers) <= self.num_replicas:
                available_replicas = len(servers) - 1

            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                                   'vb_replica_curr_items', '==', items * available_replicas))
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                                   'curr_items_tot', '==', items * (available_replicas + 1)))

        for task in stats_tasks:
            task.result(60)


    """Asynchronously applys load generation to all bucekts in the cluster.
 bucket.name, gen,
                                                          bucket.kvs[kv_store],
                                                          op_type, exp
    Args:
        server - A server in the cluster. (TestInputServer)
        kv_gen - The generator to use to generate load. (DocumentGenerator)
        op_type - "create", "read", "update", or "delete" (String)
        exp - The expiration for the items if updated or created (int)
        kv_store - The index of the bucket's kv_store to use. (int)

    Returns:
        A list of all of the tasks created.
    """
    def _async_load_all_buckets(self, server, kv_gen, op_type, exp, kv_store=1, flag=0, only_store_hash=True, batch_size=1, pause_secs=1, timeout_secs=30):
        tasks = []
        for bucket in self.buckets:
            gen = copy.deepcopy(kv_gen)
            tasks.append(self.cluster.async_load_gen_docs(server, bucket.name, gen,
                                                          bucket.kvs[kv_store],
                                                          op_type, exp, flag, only_store_hash, batch_size, pause_secs, timeout_secs))
        return tasks

    """Synchronously applys load generation to all bucekts in the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_gen - The generator to use to generate load. (DocumentGenerator)
        op_type - "create", "read", "update", or "delete" (String)
        exp - The expiration for the items if updated or created (int)
        kv_store - The index of the bucket's kv_store to use. (int)
    """
    def _load_all_buckets(self, server, kv_gen, op_type, exp, kv_store=1, flag=0, only_store_hash=True, batch_size=1000, pause_secs=1, timeout_secs=30):
        tasks = self._async_load_all_buckets(server, kv_gen, op_type, exp, kv_store, flag, only_store_hash, batch_size, pause_secs, timeout_secs)
        for task in tasks:
            task.result()

    """Waits for queues to drain on all servers and buckets in a cluster.

    A utility function that waits for all of the items loaded to be persisted
    and replicated.

    Args:
        servers - A list of all of the servers in the cluster. ([TestInputServer])
    """
    def _wait_for_stats_all_buckets(self, servers):
        tasks = []
        for server in servers:
            for bucket in self.buckets:
                tasks.append(self.cluster.async_wait_for_stats([server], bucket, '',
                                   'ep_queue_size', '==', 0))
                tasks.append(self.cluster.async_wait_for_stats([server], bucket, '',
                                   'ep_flusher_todo', '==', 0))
        for task in tasks:
            task.result()

    """Verifies data on all of the nodes in a cluster.

    Verifies all of the data in a specific kv_store index for all buckets in
    the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_store - The kv store index to check. (int)
    """
    def _verify_all_buckets(self, server, kv_store=1, timeout=180, max_verify=None, only_store_hash=True, batch_size=1000):
        tasks = []
        for bucket in self.buckets:
            tasks.append(self.cluster.async_verify_data(server, bucket, bucket.kvs[kv_store], max_verify, only_store_hash, batch_size))
        for task in tasks:
            task.result(timeout)


    def disable_compaction(self, server=None, bucket="default"):

        server = server or self.servers[0]
        new_config = {"viewFragmntThresholdPercentage" : None,
                      "dbFragmentThresholdPercentage" :  None,
                      "dbFragmentThreshold" : None,
                      "viewFragmntThreshold" : None}
        self.cluster.modify_fragmentation_config(server, new_config, bucket)

    def async_create_views(self, server, design_doc_name, views, bucket="default", with_query=True):
        tasks = []
        if len(views):
            for view in views:
                t_ = self.cluster.async_create_view(server, design_doc_name, view, bucket, with_query)
                tasks.append(t_)
        else:
            t_ = self.cluster.async_create_view(server, design_doc_name, None, bucket, with_query)
            tasks.append(t_)
        return tasks

    def create_views(self, server, design_doc_name, views, bucket="default", timeout=None):
        if len(views):
            for view in views:
                self.cluster.create_view(server, design_doc_name, view, bucket, timeout)
        else:
            self.cluster.create_view(server, design_doc_name, None, bucket, timeout)

    def make_default_views(self, prefix, count, is_dev_ddoc=False):
        ref_view = self.default_view
        ref_view.name = (prefix, ref_view.name)[prefix is None]
        return [View(ref_view.name + str(i), ref_view.map_func, None, is_dev_ddoc) for i in xrange(count)]

    def _load_doc_data_all_buckets(self, data_op="create", batch_size=1000):
        #initialize the template for document generator
        age = range(5)
        first = ['james', 'sharon']
        template = '{{ "age": {0}, "first_name": "{1}" }}'
        gen_load = DocumentGenerator('test_docs', template, age, first, start=0, end=self.num_items)

        self.log.info("%s %s documents..." % (data_op, self.num_items))
        self._load_all_buckets(self.master, gen_load, data_op, 0, batch_size=batch_size)

    def verify_cluster_stats(self, servers=None, master=None, max_verify=None):
        if servers is None:
            servers = self.servers
        if master is None:
            master = self.master
        if max_verify is None:
            max_verify = self.max_verify
        self._wait_for_stats_all_buckets(servers)
        self._verify_all_buckets(master, max_verify=max_verify)
        self._verify_stats_all_buckets(servers)
        #verify that curr_items_tot corresponds to sum of curr_items from all nodes
        verified = True
        for bucket in self.buckets:
            verified &= RebalanceHelper.wait_till_total_numbers_match(master, bucket)
        self.assertTrue(verified, "Lost items!!! Replication was completed but sum(curr_items) don't match the curr_items_total")