Example #1
0
 def wait_for_replication(servers, cluster_helper=None, timeout=600):
     if cluster_helper is None:
         cluster = Cluster()
     else:
         cluster = cluster_helper
     tasks = []
     rest = RestConnection(servers[0])
     buckets = rest.get_buckets()
     for server in servers:
         for bucket in buckets:
             for server_repl in list(set(servers) - set([server])):
                 tasks.append(
                     cluster.async_wait_for_stats(
                         [server], bucket, 'tap',
                         'eq_tapq:replication_ns_1@' + server_repl.ip +
                         ':idle', '==', 'true'))
                 tasks.append(
                     cluster.async_wait_for_stats(
                         [server], bucket, 'tap',
                         'eq_tapq:replication_ns_1@' + server_repl.ip +
                         ':backfill_completed', '==', 'true'))
     try:
         for task in tasks:
             task.result(timeout)
     finally:
         if cluster_helper is None:
             # stop all newly created task manager threads
             cluster.shutdown()
         return True
 def wait_for_replication(servers, cluster_helper=None, timeout=600):
     if cluster_helper is None:
         cluster = Cluster()
     else:
         cluster = cluster_helper
     tasks = []
     rest = RestConnection(servers[0])
     buckets = rest.get_buckets()
     for server in servers:
         for bucket in buckets:
             for server_repl in list(set(servers) - set([server])):
                 tasks.append(
                     cluster.async_wait_for_stats(
                         [server],
                         bucket,
                         "tap",
                         "eq_tapq:replication_ns_1@" + server_repl.ip + ":idle",
                         "==",
                         "true",
                     )
                 )
                 tasks.append(
                     cluster.async_wait_for_stats(
                         [server],
                         bucket,
                         "tap",
                         "eq_tapq:replication_ns_1@" + server_repl.ip + ":backfill_completed",
                         "==",
                         "true",
                     )
                 )
     try:
         for task in tasks:
             task.result(timeout)
     finally:
         if cluster_helper is None:
             # stop all newly created task manager threads
             cluster.shutdown()
         return True
Example #3
0
class BaseTestCase(unittest.TestCase):

    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.cluster = Cluster()
        self.servers = self.input.servers
        self.buckets = {}

        self.default_bucket = self.input.param("default_bucket", True)
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
        self.num_servers = self.input.param("servers", len(self.servers))
        self.num_replicas = self.input.param("replicas", 1)
        self.num_items = self.input.param("items", 1000)
        self.dgm_run = self.input.param("dgm_run", False)

        if not self.input.param("skip_cleanup", False):
            BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
            for server in self.servers:
                ClusterOperationHelper.cleanup_cluster([server])
            ClusterOperationHelper.wait_for_ns_servers_or_assert([self.servers[0]], self)

        self.quota = self._initialize_nodes(self.cluster, self.servers)
        if self.dgm_run:
            self.quota = 256
        self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)
        if self.default_bucket:
            self.cluster.create_default_bucket(self.servers[0], self.bucket_size, self.num_replicas)
            self.buckets['default'] = {1 : KVStore()}
        self._create_sasl_buckets(self.servers[0], self.sasl_buckets)
        # TODO (Mike): Create Standard buckets

    def tearDown(self):
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        ClusterOperationHelper.cleanup_cluster(self.servers)
        ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
        self.buckets = {}
        self.cluster.shutdown()

    def _initialize_nodes(self, cluster, servers):
        quota = 0
        init_tasks = []
        for server in servers:
            init_tasks.append(cluster.async_init_node(server))
        for task in init_tasks:
            node_quota = task.result()
            if node_quota < quota or quota == 0:
                quota = node_quota
        return quota

    def _get_bucket_size(self, quota, num_buckets, ratio=2.0/3.0):
        ip = self.servers[0]
        for server in self.servers:
            if server.ip == ip:
                return int(ratio / float(self.num_servers) / float(num_buckets) * float(quota))
        return int(ratio / float(num_buckets) * float(quota))

    def _create_sasl_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'bucket' + str(i)
            bucket_tasks.append(self.cluster.async_create_sasl_bucket(server, name,
                                                                      'password',
                                                                      self.bucket_size,
                                                                      self.num_replicas))
            self.buckets[name] = {1 : KVStore()}
        for task in bucket_tasks:
            task.result()

    def _verify_stats_all_buckets(self, servers):
        stats_tasks = []
        for bucket, kv_stores in self.buckets.items():
            items = sum([len(kv_store) for kv_store in kv_stores.values()])
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                               'curr_items', '==', items))
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                               'vb_active_curr_items', '==', items))

            available_replicas = self.num_replicas
            if len(servers) == self.num_replicas:
                available_replicas = len(servers) - 1
            elif len(servers) <= self.num_replicas:
                available_replicas = len(servers) - 1

            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                                   'vb_replica_curr_items', '==', items * available_replicas))
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                                   'curr_items_tot', '==', items * (available_replicas + 1)))

        for task in stats_tasks:
            task.result(60)


    """Asynchronously applys load generation to all bucekts in the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_gen - The generator to use to generate load. (DocumentGenerator)
        op_type - "create", "read", "update", or "delete" (String)
        exp - The expiration for the items if updated or created (int)
        kv_store - The index of the bucket's kv_store to use. (int)

    Returns:
        A list of all of the tasks created.
    """
    def _async_load_all_buckets(self, server, kv_gen, op_type, exp, kv_store=1):
        tasks = []
        for bucket, kv_stores in self.buckets.items():
            gen = copy.deepcopy(kv_gen)
            tasks.append(self.cluster.async_load_gen_docs(server, bucket, gen,
                                                          kv_stores[kv_store],
                                                          op_type, exp))
        return tasks

    """Synchronously applys load generation to all bucekts in the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_gen - The generator to use to generate load. (DocumentGenerator)
        op_type - "create", "read", "update", or "delete" (String)
        exp - The expiration for the items if updated or created (int)
        kv_store - The index of the bucket's kv_store to use. (int)
    """
    def _load_all_buckets(self, server, kv_gen, op_type, exp, kv_store=1):
        tasks = self._async_load_all_buckets(server, kv_gen, op_type, exp, kv_store)
        for task in tasks:
            task.result()

    """Waits for queues to drain on all servers and buckets in a cluster.

    A utility function that waits for all of the items loaded to be persisted
    and replicated.

    Args:
        servers - A list of all of the servers in the cluster. ([TestInputServer])
    """
    def _wait_for_stats_all_buckets(self, servers):
        tasks = []
        for server in servers:
            for bucket in self.buckets:
                tasks.append(self.cluster.async_wait_for_stats([server], bucket, '',
                                   'ep_queue_size', '==', 0))
                tasks.append(self.cluster.async_wait_for_stats([server], bucket, '',
                                   'ep_flusher_todo', '==', 0))
        for task in tasks:
            task.result()

    """Verifies data on all of the nodes in a cluster.

    Verifies all of the data in a specific kv_store index for all buckets in
    the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_store - The kv store index to check. (int)
    """
    def _verify_all_buckets(self, server, kv_store=1):
        tasks = []
        for bucket, kv_stores in self.buckets.items():
            tasks.append(self.cluster.async_verify_data(server, bucket, kv_stores[kv_store]))
        for task in tasks:
            task.result()
Example #4
0
class BaseTestCase(unittest.TestCase):
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.buckets = []
        self.master = self.servers[0]
        self.cluster = Cluster()
        self.wait_timeout = self.input.param("wait_timeout", 60)
        #number of case that is performed from testrunner( increment each time)
        self.case_number = self.input.param("case_number", 0)
        self.default_bucket = self.input.param("default_bucket", True)
        if self.default_bucket:
            self.default_bucket_name = "default"
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
        self.num_servers = self.input.param("servers", len(self.servers))
        #initial number of items in the cluster
        self.nodes_init = self.input.param("nodes_init", 1)

        self.num_replicas = self.input.param("replicas", 1)
        self.num_items = self.input.param("items", 1000)
        self.dgm_run = self.input.param("dgm_run", False)
        #max items number to verify in ValidateDataTask, None - verify all
        self.max_verify = self.input.param("max_verify", None)
        #we don't change consistent_view on server by default
        self.disabled_consistent_view = self.input.param(
            "disabled_consistent_view", None)
        self.log.info("==============  basetestcase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
        #avoid clean up if the previous test has been tear down
        if not self.input.param("skip_cleanup", True) or self.case_number == 1:
            self.tearDown()
            self.cluster = Cluster()
        if str(self.__class__).find('rebalanceout.RebalanceOutTests') != -1:
            #rebalance all nodes into the cluster before each test
            self.cluster.rebalance(self.servers[:self.num_servers],
                                   self.servers[1:self.num_servers], [])
        elif self.nodes_init > 1:
            self.cluster.rebalance(self.servers[:1],
                                   self.servers[1:self.nodes_init], [])
        self.quota = self._initialize_nodes(self.cluster, self.servers,
                                            self.disabled_consistent_view)
        if self.dgm_run:
            self.quota = 256
        if self.total_buckets > 0:
            self.bucket_size = self._get_bucket_size(self.quota,
                                                     self.total_buckets)

        if self.default_bucket:
            self.cluster.create_default_bucket(self.master, self.bucket_size,
                                               self.num_replicas)
            self.buckets.append(
                Bucket(name="default",
                       authType="sasl",
                       saslPassword="",
                       num_replicas=self.num_replicas,
                       bucket_size=self.bucket_size))

        self._create_sasl_buckets(self.master, self.sasl_buckets)
        self._create_standard_buckets(self.master, self.standard_buckets)
        self.log.info("==============  basetestcase setup was finished for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))
        self._log_start(self)

    def tearDown(self):
        try:
            if (hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \
                and TestInputSingleton.input.param("stop-on-failure", False))\
                    or self.input.param("skip_cleanup", False):
                self.log.warn("CLEANUP WAS SKIPPED")
            else:
                self.log.info("==============  basetestcase cleanup was started for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))
                rest = RestConnection(self.master)
                alerts = rest.get_alerts()
                if alerts is not None and len(alerts) != 0:
                    self.log.warn("Alerts were found: {0}".format(alerts))
                if rest._rebalance_progress_status() == 'running':
                    self.log.warning(
                        "rebalancing is still running, test should be verified"
                    )
                    stopped = rest.stop_rebalance()
                    self.assertTrue(stopped, msg="unable to stop rebalance")
                BucketOperationHelper.delete_all_buckets_or_assert(
                    self.servers, self)
                ClusterOperationHelper.cleanup_cluster(self.servers)
                time.sleep(10)
                ClusterOperationHelper.wait_for_ns_servers_or_assert(
                    self.servers, self)
                self.log.info("==============  basetestcase cleanup was finished for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))
        finally:
            #stop all existing task manager threads
            self.cluster.shutdown()
            self._log_finish(self)

    @staticmethod
    def _log_start(self):
        try:
            msg = "{0} : {1} started ".format(datetime.datetime.now(),
                                              self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    @staticmethod
    def _log_finish(self):
        try:
            msg = "{0} : {1} finished ".format(datetime.datetime.now(),
                                               self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    def _initialize_nodes(self,
                          cluster,
                          servers,
                          disabled_consistent_view=None):
        quota = 0
        init_tasks = []
        for server in servers:
            init_tasks.append(
                cluster.async_init_node(server, disabled_consistent_view))
        for task in init_tasks:
            node_quota = task.result()
            if node_quota < quota or quota == 0:
                quota = node_quota
        return quota

    def _get_bucket_size(self, quota, num_buckets, ratio=2.0 / 3.0):
        ip = self.servers[0]
        for server in self.servers:
            if server.ip == ip:
                return int(ratio / float(self.num_servers) /
                           float(num_buckets) * float(quota))
        return int(ratio / float(num_buckets) * float(quota))

    def _create_sasl_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'bucket' + str(i)
            bucket_tasks.append(
                self.cluster.async_create_sasl_bucket(server, name, 'password',
                                                      self.bucket_size,
                                                      self.num_replicas))
            self.buckets.append(
                Bucket(name=name,
                       authType="sasl",
                       saslPassword='******',
                       num_replicas=self.num_replicas,
                       bucket_size=self.bucket_size))
        for task in bucket_tasks:
            task.result()

    def _create_standard_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'standard_bucket' + str(i)
            bucket_tasks.append(
                self.cluster.async_create_standard_bucket(
                    server, name, 11214 + i, self.bucket_size,
                    self.num_replicas))

            self.buckets.append(
                Bucket(name=name,
                       authType=None,
                       saslPassword=None,
                       num_replicas=self.num_replicas,
                       bucket_size=self.bucket_size,
                       port=11214 + i))
        for task in bucket_tasks:
            task.result()

    def _all_buckets_delete(self, server):
        delete_tasks = []
        for bucket in self.buckets:
            delete_tasks.append(
                self.cluster.async_bucket_delete(server, bucket.name))

        for task in delete_tasks:
            task.result()
        self.buckets = []

    def _verify_stats_all_buckets(self, servers):
        stats_tasks = []
        for bucket in self.buckets:
            items = sum([len(kv_store) for kv_store in bucket.kvs.values()])
            stats_tasks.append(
                self.cluster.async_wait_for_stats(servers, bucket, '',
                                                  'curr_items', '==', items))
            stats_tasks.append(
                self.cluster.async_wait_for_stats(servers, bucket, '',
                                                  'vb_active_curr_items', '==',
                                                  items))

            available_replicas = self.num_replicas
            if len(servers) == self.num_replicas:
                available_replicas = len(servers) - 1
            elif len(servers) <= self.num_replicas:
                available_replicas = len(servers) - 1

            stats_tasks.append(
                self.cluster.async_wait_for_stats(servers, bucket, '',
                                                  'vb_replica_curr_items',
                                                  '==',
                                                  items * available_replicas))
            stats_tasks.append(
                self.cluster.async_wait_for_stats(
                    servers, bucket, '', 'curr_items_tot', '==',
                    items * (available_replicas + 1)))

        for task in stats_tasks:
            task.result(60)

    """Asynchronously applys load generation to all bucekts in the cluster.
 bucket.name, gen,
                                                          bucket.kvs[kv_store],
                                                          op_type, exp
    Args:
        server - A server in the cluster. (TestInputServer)
        kv_gen - The generator to use to generate load. (DocumentGenerator)
        op_type - "create", "read", "update", or "delete" (String)
        exp - The expiration for the items if updated or created (int)
        kv_store - The index of the bucket's kv_store to use. (int)

    Returns:
        A list of all of the tasks created.
    """

    def _async_load_all_buckets(self,
                                server,
                                kv_gen,
                                op_type,
                                exp,
                                kv_store=1,
                                flag=0,
                                only_store_hash=True,
                                batch_size=1,
                                pause_secs=1,
                                timeout_secs=30):
        tasks = []
        for bucket in self.buckets:
            gen = copy.deepcopy(kv_gen)
            tasks.append(
                self.cluster.async_load_gen_docs(server, bucket.name, gen,
                                                 bucket.kvs[kv_store], op_type,
                                                 exp, flag, only_store_hash,
                                                 batch_size, pause_secs,
                                                 timeout_secs))
        return tasks

    """Synchronously applys load generation to all bucekts in the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_gen - The generator to use to generate load. (DocumentGenerator)
        op_type - "create", "read", "update", or "delete" (String)
        exp - The expiration for the items if updated or created (int)
        kv_store - The index of the bucket's kv_store to use. (int)
    """

    def _load_all_buckets(self,
                          server,
                          kv_gen,
                          op_type,
                          exp,
                          kv_store=1,
                          flag=0,
                          only_store_hash=True,
                          batch_size=1000,
                          pause_secs=1,
                          timeout_secs=30):
        tasks = self._async_load_all_buckets(server, kv_gen, op_type, exp,
                                             kv_store, flag, only_store_hash,
                                             batch_size, pause_secs,
                                             timeout_secs)
        for task in tasks:
            task.result()

    """Waits for queues to drain on all servers and buckets in a cluster.

    A utility function that waits for all of the items loaded to be persisted
    and replicated.

    Args:
        servers - A list of all of the servers in the cluster. ([TestInputServer])
    """

    def _wait_for_stats_all_buckets(self, servers):
        tasks = []
        for server in servers:
            for bucket in self.buckets:
                tasks.append(
                    self.cluster.async_wait_for_stats([server], bucket, '',
                                                      'ep_queue_size', '==',
                                                      0))
                tasks.append(
                    self.cluster.async_wait_for_stats([server], bucket, '',
                                                      'ep_flusher_todo', '==',
                                                      0))
        for task in tasks:
            task.result()

    """Verifies data on all of the nodes in a cluster.

    Verifies all of the data in a specific kv_store index for all buckets in
    the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_store - The kv store index to check. (int)
    """

    def _verify_all_buckets(self,
                            server,
                            kv_store=1,
                            timeout=180,
                            max_verify=None,
                            only_store_hash=True,
                            batch_size=1000):
        tasks = []
        for bucket in self.buckets:
            tasks.append(
                self.cluster.async_verify_data(server, bucket,
                                               bucket.kvs[kv_store],
                                               max_verify, only_store_hash,
                                               batch_size))
        for task in tasks:
            task.result(timeout)

    def disable_compaction(self, server=None, bucket="default"):

        server = server or self.servers[0]
        new_config = {
            "viewFragmntThresholdPercentage": None,
            "dbFragmentThresholdPercentage": None,
            "dbFragmentThreshold": None,
            "viewFragmntThreshold": None
        }
        self.cluster.modify_fragmentation_config(server, new_config, bucket)

    def async_create_views(self,
                           server,
                           design_doc_name,
                           views,
                           bucket="default",
                           with_query=True):
        tasks = []
        if len(views):
            for view in views:
                t_ = self.cluster.async_create_view(server, design_doc_name,
                                                    view, bucket, with_query)
                tasks.append(t_)
        else:
            t_ = self.cluster.async_create_view(server, design_doc_name, None,
                                                bucket, with_query)
            tasks.append(t_)
        return tasks

    def create_views(self,
                     server,
                     design_doc_name,
                     views,
                     bucket="default",
                     timeout=None):
        if len(views):
            for view in views:
                self.cluster.create_view(server, design_doc_name, view, bucket,
                                         timeout)
        else:
            self.cluster.create_view(server, design_doc_name, None, bucket,
                                     timeout)

    def make_default_views(self, prefix, count, is_dev_ddoc=False):
        ref_view = self.default_view
        ref_view.name = (prefix, ref_view.name)[prefix is None]
        return [
            View(ref_view.name + str(i), ref_view.map_func, None, is_dev_ddoc)
            for i in xrange(count)
        ]

    def _load_doc_data_all_buckets(self, data_op="create", batch_size=1000):
        #initialize the template for document generator
        age = range(5)
        first = ['james', 'sharon']
        template = '{{ "age": {0}, "first_name": "{1}" }}'
        gen_load = DocumentGenerator('test_docs',
                                     template,
                                     age,
                                     first,
                                     start=0,
                                     end=self.num_items)

        self.log.info("%s %s documents..." % (data_op, self.num_items))
        self._load_all_buckets(self.master,
                               gen_load,
                               data_op,
                               0,
                               batch_size=batch_size)

    def verify_cluster_stats(self, servers=None, master=None, max_verify=None):
        if servers is None:
            servers = self.servers
        if master is None:
            master = self.master
        if max_verify is None:
            max_verify = self.max_verify
        self._wait_for_stats_all_buckets(servers)
        self._verify_all_buckets(master, max_verify=max_verify)
        self._verify_stats_all_buckets(servers)
        #verify that curr_items_tot corresponds to sum of curr_items from all nodes
        verified = True
        for bucket in self.buckets:
            verified &= RebalanceHelper.wait_till_total_numbers_match(
                master, bucket)
        self.assertTrue(
            verified,
            "Lost items!!! Replication was completed but sum(curr_items) don't match the curr_items_total"
        )
Example #5
0
class CheckpointTests(unittest.TestCase):
    def setUp(self):
        self.cluster = Cluster()

        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.num_servers = self.input.param("servers", 1)

        master = self.servers[0]
        num_replicas = self.input.param("replicas", 1)
        self.bucket = 'default'

        # Start: Should be in a before class function
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        for server in self.servers:
            ClusterOperationHelper.cleanup_cluster([server])
        ClusterOperationHelper.wait_for_ns_servers_or_assert([master], self)
        # End: Should be in a before class function

        self.quota = self.cluster.init_node(master)
        self.old_vbuckets = self._get_vbuckets(master)
        ClusterOperationHelper.set_vbuckets(master, 1)
        self.cluster.create_default_bucket(master, self.quota, num_replicas)
        self.cluster.rebalance(self.servers[:self.num_servers],
                               self.servers[1:self.num_servers], [])

    def tearDown(self):
        master = self.servers[0]
        ClusterOperationHelper.set_vbuckets(master, self.old_vbuckets)
        rest = RestConnection(master)
        rest.stop_rebalance()
        self.cluster.rebalance(self.servers[:self.num_servers], [],
                               self.servers[1:self.num_servers])
        self.cluster.bucket_delete(master, self.bucket)
        self.cluster.shutdown()

    def checkpoint_create_items(self):
        param = 'checkpoint'
        stat_key = 'vb_0:open_checkpoint_id'
        num_items = 6000

        master = self._get_server_by_state(self.servers[:self.num_servers],
                                           self.bucket, ACTIVE)
        self._set_checkpoint_size(self.servers[:self.num_servers], self.bucket,
                                  '5000')
        chk_stats = StatsCommon.get_stats(self.servers[:self.num_servers],
                                          self.bucket, param, stat_key)
        load_thread = self.generate_load(master, self.bucket, num_items)
        load_thread.join()
        tasks = []
        for server, value in chk_stats.items():
            tasks.append(
                self.cluster.async_wait_for_stats([server], self.bucket, param,
                                                  stat_key, '>', value))
        for task in tasks:
            try:
                timeout = 30 if (num_items * .001) < 30 else num_items * .001
                task.result(timeout)
            except TimeoutError:
                self.fail("New checkpoint not created")

    def checkpoint_create_time(self):
        param = 'checkpoint'
        timeout = 60
        stat_key = 'vb_0:open_checkpoint_id'

        master = self._get_server_by_state(self.servers[:self.num_servers],
                                           self.bucket, ACTIVE)
        self._set_checkpoint_timeout(self.servers[:self.num_servers],
                                     self.bucket, str(timeout))
        chk_stats = StatsCommon.get_stats(self.servers[:self.num_servers],
                                          self.bucket, param, stat_key)
        load_thread = self.generate_load(master, self.bucket, 1)
        load_thread.join()
        log.info("Sleeping for {0} seconds)".format(timeout))
        time.sleep(timeout)
        tasks = []
        for server, value in chk_stats.items():
            tasks.append(
                self.cluster.async_wait_for_stats([server], self.bucket, param,
                                                  stat_key, '>', value))
        for task in tasks:
            try:
                task.result(60)
            except TimeoutError:
                self.fail("New checkpoint not created")
        self._set_checkpoint_timeout(self.servers[:self.num_servers],
                                     self.bucket, str(600))

    def checkpoint_collapse(self):
        param = 'checkpoint'
        chk_size = 5000
        num_items = 25000
        stat_key = 'vb_0:last_closed_checkpoint_id'
        stat_chk_itms = 'vb_0:num_checkpoint_items'

        master = self._get_server_by_state(self.servers[:self.num_servers],
                                           self.bucket, ACTIVE)
        slave1 = self._get_server_by_state(self.servers[:self.num_servers],
                                           self.bucket, REPLICA1)
        slave2 = self._get_server_by_state(self.servers[:self.num_servers],
                                           self.bucket, REPLICA2)
        self._set_checkpoint_size(self.servers[:self.num_servers], self.bucket,
                                  str(chk_size))
        m_stats = StatsCommon.get_stats([master], self.bucket, param, stat_key)
        self._stop_replication(slave2, self.bucket)
        load_thread = self.generate_load(master, self.bucket, num_items)
        load_thread.join()

        tasks = []
        chk_pnt = str(int(m_stats[m_stats.keys()[0]]) + (num_items / chk_size))
        tasks.append(
            self.cluster.async_wait_for_stats([master], self.bucket, param,
                                              stat_key, '==', chk_pnt))
        tasks.append(
            self.cluster.async_wait_for_stats([slave1], self.bucket, param,
                                              stat_key, '==', chk_pnt))
        tasks.append(
            self.cluster.async_wait_for_stats([slave1], self.bucket, param,
                                              stat_chk_itms, '>=',
                                              str(num_items)))
        for task in tasks:
            try:
                task.result(60)
            except TimeoutError:
                self.fail("Checkpoint not collapsed")

        tasks = []
        self._start_replication(slave2, self.bucket)
        chk_pnt = str(int(m_stats[m_stats.keys()[0]]) + (num_items / chk_size))
        tasks.append(
            self.cluster.async_wait_for_stats([slave2], self.bucket, param,
                                              stat_key, '==', chk_pnt))
        tasks.append(
            self.cluster.async_wait_for_stats([slave1], self.bucket, param,
                                              stat_chk_itms, '<', num_items))
        for task in tasks:
            try:
                task.result(60)
            except TimeoutError:
                self.fail("Checkpoints not replicated to secondary slave")

    def checkpoint_deduplication(self):
        param = 'checkpoint'
        stat_key = 'vb_0:num_checkpoint_items'

        master = self._get_server_by_state(self.servers[:self.num_servers],
                                           self.bucket, ACTIVE)
        slave1 = self._get_server_by_state(self.servers[:self.num_servers],
                                           self.bucket, REPLICA1)
        self._set_checkpoint_size(self.servers[:self.num_servers], self.bucket,
                                  '5000')
        self._stop_replication(slave1, self.bucket)
        load_thread = self.generate_load(master, self.bucket, 4500)
        load_thread.join()
        load_thread = self.generate_load(master, self.bucket, 1000)
        load_thread.join()
        self._start_replication(slave1, self.bucket)

        tasks = []
        tasks.append(
            self.cluster.async_wait_for_stats([master], self.bucket, param,
                                              stat_key, '==', 4501))
        tasks.append(
            self.cluster.async_wait_for_stats([slave1], self.bucket, param,
                                              stat_key, '==', 4501))
        for task in tasks:
            try:
                task.result(60)
            except TimeoutError:
                self.fail("Items weren't deduplicated")

    def _set_checkpoint_size(self, servers, bucket, size):
        ClusterOperationHelper.flushctl_set(servers[0], 'chk_max_items', size,
                                            bucket)

    def _set_checkpoint_timeout(self, servers, bucket, time):
        ClusterOperationHelper.flushctl_set(servers[0], 'chk_period', time,
                                            bucket)

    def _stop_replication(self, server, bucket):
        ClusterOperationHelper.flushctl_set_per_node(server,
                                                     'tap_throttle_queue_cap',
                                                     0, bucket)

    def _start_replication(self, server, bucket):
        ClusterOperationHelper.flushctl_set_per_node(server,
                                                     'tap_throttle_queue_cap',
                                                     1000000, bucket)

    def _get_vbuckets(self, server):
        rest = RestConnection(server)
        command = "ns_config:search(couchbase_num_vbuckets_default)"
        status, content = rest.diag_eval(command)

        try:
            vbuckets = int(re.sub('[^\d]', '', content))
        except:
            vbuckets = 1024
        return vbuckets

    def _get_server_by_state(self, servers, bucket, vb_state):
        rest = RestConnection(servers[0])
        vbuckets = rest.get_vbuckets(self.bucket)[0]
        addr = None
        if vb_state == ACTIVE:
            addr = vbuckets.master
        elif vb_state == REPLICA1:
            addr = vbuckets.replica[0].encode("ascii", "ignore")
        elif vb_state == REPLICA2:
            addr = vbuckets.replica[1].encode("ascii", "ignore")
        elif vb_state == REPLICA3:
            addr = vbuckets.replica[2].encode("ascii", "ignore")
        else:
            return None

        addr = addr.split(':', 1)[0]
        for server in servers:
            if addr == server.ip:
                return server
        return None

    def generate_load(self, server, bucket, num_items):
        class LoadGen(Thread):
            def __init__(self, server, bucket, num_items):
                Thread.__init__(self)
                self.server = server
                self.bucket = bucket
                self.num_items = num_items

            def run(self):
                client = MemcachedClientHelper.direct_client(server, bucket)
                for i in range(num_items):
                    key = "key-{0}".format(i)
                    value = "value-{0}".format(str(uuid.uuid4())[:7])
                    client.set(key, 0, 0, value, 0)
                log.info("Loaded {0} key".format(num_items))

        load_thread = LoadGen(server, bucket, num_items)
        load_thread.start()
        return load_thread
Example #6
0
class BaseTestCase(unittest.TestCase):
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.buckets = []
        self.master = self.servers[0]
        self.cluster = Cluster()
        self.wait_timeout = self.input.param("wait_timeout", 60)
        #number of case that is performed from testrunner( increment each time)
        self.case_number = self.input.param("case_number", 0)
        self.default_bucket = self.input.param("default_bucket", True)
        if self.default_bucket:
            self.default_bucket_name = "default"
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
        self.num_servers = self.input.param("servers", len(self.servers))
        self.num_replicas = self.input.param("replicas", 1)
        self.num_items = self.input.param("items", 1000)
        self.dgm_run = self.input.param("dgm_run", False)
        #max items number to verify in ValidateDataTask, None - verify all
        self.max_verify = self.input.param("max_verify", None)
        #we don't change consistent_view on server by default
        self.disabled_consistent_view = self.input.param(
            "disabled_consistent_view", None)
        self.log.info("==============  basetestcase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
        #avoid clean up if the previous test has been tear down
        if not self.input.param("skip_cleanup", True) or self.case_number == 1:
            self.tearDown()
            self.cluster = Cluster()
        self.quota = self._initialize_nodes(self.cluster, self.servers,
                                            self.disabled_consistent_view)
        if self.dgm_run:
            self.quota = 256
        if self.total_buckets > 0:
            self.bucket_size = self._get_bucket_size(self.quota,
                                                     self.total_buckets)

        if self.default_bucket:
            self.cluster.create_default_bucket(self.master, self.bucket_size,
                                               self.num_replicas)
            self.buckets.append(
                Bucket(name="default",
                       authType="sasl",
                       saslPassword="",
                       num_replicas=self.num_replicas,
                       bucket_size=self.bucket_size))

        self._create_sasl_buckets(self.master, self.sasl_buckets)
        self._create_standard_buckets(self.master, self.standard_buckets)
        self.log.info("==============  basetestcase setup was finished for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))
        self._log_start(self)

    def tearDown(self):
        if not self.input.param("skip_cleanup", False):
            try:
                self.log.info("==============  basetestcase cleanup was started for test #{0} {1} =============="\
                          .format(self.case_number, self._testMethodName))
                rest = RestConnection(self.master)
                alerts = rest.get_alerts()
                if alerts is not None and len(alerts) != 0:
                    self.log.warn("Alerts were found: {0}".format(alerts))
                if rest._rebalance_progress_status() == 'running':
                    self.log.warning(
                        "rebalancing is still running, test should be verified"
                    )
                    stopped = rest.stop_rebalance()
                    self.assertTrue(stopped, msg="unable to stop rebalance")
                BucketOperationHelper.delete_all_buckets_or_assert(
                    self.servers, self)
                ClusterOperationHelper.cleanup_cluster(self.servers)
                time.sleep(10)
                ClusterOperationHelper.wait_for_ns_servers_or_assert(
                    self.servers, self)
                self.log.info("==============  basetestcase cleanup was finished for test #{0} {1} =============="\
                          .format(self.case_number, self._testMethodName))
            finally:
                #stop all existing task manager threads
                self.cluster.shutdown()
                self._log_finish(self)

    @staticmethod
    def _log_start(self):
        try:
            msg = "{0} : {1} started ".format(datetime.datetime.now(),
                                              self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    @staticmethod
    def _log_finish(self):
        try:
            msg = "{0} : {1} finished ".format(datetime.datetime.now(),
                                               self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    def _initialize_nodes(self,
                          cluster,
                          servers,
                          disabled_consistent_view=None):
        quota = 0
        init_tasks = []
        for server in servers:
            init_tasks.append(
                cluster.async_init_node(server, disabled_consistent_view))
        for task in init_tasks:
            node_quota = task.result()
            if node_quota < quota or quota == 0:
                quota = node_quota
        return quota

    def _get_bucket_size(self, quota, num_buckets, ratio=2.0 / 3.0):
        ip = self.servers[0]
        for server in self.servers:
            if server.ip == ip:
                return int(ratio / float(self.num_servers) /
                           float(num_buckets) * float(quota))
        return int(ratio / float(num_buckets) * float(quota))

    def _create_sasl_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'bucket' + str(i)
            bucket_tasks.append(
                self.cluster.async_create_sasl_bucket(server, name, 'password',
                                                      self.bucket_size,
                                                      self.num_replicas))
            self.buckets.append(
                Bucket(name=name,
                       authType="sasl",
                       saslPassword='******',
                       num_replicas=self.num_replicas,
                       bucket_size=self.bucket_size))
        for task in bucket_tasks:
            task.result()

    def _create_standard_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'standard_bucket' + str(i)
            bucket_tasks.append(
                self.cluster.async_create_standard_bucket(
                    server, name, 11214 + i, self.bucket_size,
                    self.num_replicas))

            self.buckets.append(
                Bucket(name=name,
                       authType=None,
                       saslPassword=None,
                       num_replicas=self.num_replicas,
                       bucket_size=self.bucket_size,
                       port=11214 + i))
        for task in bucket_tasks:
            task.result()

    def _all_buckets_delete(self, server):
        delete_tasks = []
        for bucket in self.buckets:
            delete_tasks.append(
                self.cluster.async_bucket_delete(server, bucket.name))

        for task in delete_tasks:
            task.result()
        self.buckets = []

    def _verify_stats_all_buckets(self, servers):
        stats_tasks = []
        for bucket in self.buckets:
            items = sum([len(kv_store) for kv_store in bucket.kvs.values()])
            stats_tasks.append(
                self.cluster.async_wait_for_stats(servers, bucket, '',
                                                  'curr_items', '==', items))
            stats_tasks.append(
                self.cluster.async_wait_for_stats(servers, bucket, '',
                                                  'vb_active_curr_items', '==',
                                                  items))

            available_replicas = self.num_replicas
            if len(servers) == self.num_replicas:
                available_replicas = len(servers) - 1
            elif len(servers) <= self.num_replicas:
                available_replicas = len(servers) - 1

            stats_tasks.append(
                self.cluster.async_wait_for_stats(servers, bucket, '',
                                                  'vb_replica_curr_items',
                                                  '==',
                                                  items * available_replicas))
            stats_tasks.append(
                self.cluster.async_wait_for_stats(
                    servers, bucket, '', 'curr_items_tot', '==',
                    items * (available_replicas + 1)))

        for task in stats_tasks:
            task.result(60)

    """Asynchronously applys load generation to all bucekts in the cluster.
 bucket.name, gen,
                                                          bucket.kvs[kv_store],
                                                          op_type, exp
    Args:
        server - A server in the cluster. (TestInputServer)
        kv_gen - The generator to use to generate load. (DocumentGenerator)
        op_type - "create", "read", "update", or "delete" (String)
        exp - The expiration for the items if updated or created (int)
        kv_store - The index of the bucket's kv_store to use. (int)

    Returns:
        A list of all of the tasks created.
    """

    def _async_load_all_buckets(self,
                                server,
                                kv_gen,
                                op_type,
                                exp,
                                kv_store=1,
                                flag=0,
                                only_store_hash=True,
                                batch_size=1,
                                pause_secs=1,
                                timeout_secs=30):
        tasks = []
        for bucket in self.buckets:
            gen = copy.deepcopy(kv_gen)
            tasks.append(
                self.cluster.async_load_gen_docs(server, bucket.name, gen,
                                                 bucket.kvs[kv_store], op_type,
                                                 exp, flag, only_store_hash,
                                                 batch_size, pause_secs,
                                                 timeout_secs))
        return tasks

    """Synchronously applys load generation to all bucekts in the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_gen - The generator to use to generate load. (DocumentGenerator)
        op_type - "create", "read", "update", or "delete" (String)
        exp - The expiration for the items if updated or created (int)
        kv_store - The index of the bucket's kv_store to use. (int)
    """

    def _load_all_buckets(self,
                          server,
                          kv_gen,
                          op_type,
                          exp,
                          kv_store=1,
                          flag=0,
                          only_store_hash=True,
                          batch_size=1,
                          pause_secs=1,
                          timeout_secs=30):
        tasks = self._async_load_all_buckets(server, kv_gen, op_type, exp,
                                             kv_store, flag, only_store_hash,
                                             batch_size, pause_secs,
                                             timeout_secs)
        for task in tasks:
            task.result()

    """Waits for queues to drain on all servers and buckets in a cluster.

    A utility function that waits for all of the items loaded to be persisted
    and replicated.

    Args:
        servers - A list of all of the servers in the cluster. ([TestInputServer])
    """

    def _wait_for_stats_all_buckets(self, servers):
        tasks = []
        for server in servers:
            for bucket in self.buckets:
                tasks.append(
                    self.cluster.async_wait_for_stats([server], bucket, '',
                                                      'ep_queue_size', '==',
                                                      0))
                tasks.append(
                    self.cluster.async_wait_for_stats([server], bucket, '',
                                                      'ep_flusher_todo', '==',
                                                      0))
        for task in tasks:
            task.result()

    """Verifies data on all of the nodes in a cluster.

    Verifies all of the data in a specific kv_store index for all buckets in
    the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_store - The kv store index to check. (int)
    """

    def _verify_all_buckets(self,
                            server,
                            kv_store=1,
                            timeout=180,
                            max_verify=None,
                            only_store_hash=True,
                            batch_size=1):
        tasks = []
        for bucket in self.buckets:
            tasks.append(
                self.cluster.async_verify_data(server, bucket,
                                               bucket.kvs[kv_store],
                                               max_verify, only_store_hash,
                                               batch_size))
        for task in tasks:
            task.result(timeout)

    def disable_compaction(self, server=None, bucket="default"):

        server = server or self.servers[0]
        new_config = {
            "viewFragmntThresholdPercentage": None,
            "dbFragmentThresholdPercentage": None,
            "dbFragmentThreshold": None,
            "viewFragmntThreshold": None
        }
        self.cluster.modify_fragmentation_config(server, new_config, bucket)

    def async_create_views(self,
                           server,
                           design_doc_name,
                           views,
                           bucket="default"):
        tasks = []
        if len(views):
            for view in views:
                t_ = self.cluster.async_create_view(server, design_doc_name,
                                                    view, bucket)
                tasks.append(t_)
        else:
            t_ = self.cluster.async_create_view(server, design_doc_name, None,
                                                bucket)
            tasks.append(t_)
        return tasks

    def create_views(self,
                     server,
                     design_doc_name,
                     views,
                     bucket="default",
                     timeout=None):
        if len(views):
            for view in views:
                self.cluster.create_view(server, design_doc_name, view, bucket,
                                         timeout)
        else:
            self.cluster.create_view(server, design_doc_name, None, bucket,
                                     timeout)

    def make_default_views(self, prefix, count, is_dev_ddoc=False):
        ref_view = self.default_view
        ref_view.name = (prefix, ref_view.name)[prefix is None]
        return [
            View(ref_view.name + str(i), ref_view.map_func, None, is_dev_ddoc)
            for i in xrange(count)
        ]

    def _load_doc_data_all_buckets(self, data_op="create"):
        #initialize the template for document generator
        age = range(5)
        first = ['james', 'sharon']
        template = '{{ "age": {0}, "first_name": "{1}" }}'
        gen_load = DocumentGenerator('test_docs',
                                     template,
                                     age,
                                     first,
                                     start=0,
                                     end=self.num_items)

        self.log.info("%s %s documents..." % (data_op, self.num_items))
        self._load_all_buckets(self.master, gen_load, data_op, 0)

    #returns true if warmup is completed in wait_time sec,
    #otherwise return false
    @staticmethod
    def _wait_warmup_completed(self, servers, bucket_name, wait_time=300):
        warmed_up = False
        log = logger.Logger.get_logger()
        for server in servers:
            mc = None
            start = time.time()
            # Try to get the stats for 5 minutes, else hit out.
            while time.time() - start < wait_time:
                # Get the wamrup time for each server
                try:
                    mc = MemcachedClientHelper.direct_client(
                        server, bucket_name)
                    stats = mc.stats()
                    if stats is not None:
                        warmup_time = int(stats["ep_warmup_time"])
                        log.info("ep_warmup_time is %s " % warmup_time)
                        log.info(
                            "Collected the stats %s for server %s:%s" %
                            (stats["ep_warmup_time"], server.ip, server.port))
                        break
                    else:
                        log.info(
                            " Did not get the stats from the server yet, trying again....."
                        )
                        time.sleep(2)
                except Exception as e:
                    log.error(
                        "Could not get warmup_time stats from server %s:%s, exception %s"
                        % (server.ip, server.port, e))
            else:
                self.fail(
                    "Fail! Unable to get the warmup-stats from server %s:%s after trying for %s seconds."
                    % (server.ip, server.port, wait_time))

            # Waiting for warm-up
            start = time.time()
            warmed_up = False
            while time.time() - start < wait_time and not warmed_up:
                if mc.stats()["ep_warmup_thread"] == "complete":
                    log.info(
                        "warmup completed, awesome!!! Warmed up. %s items " %
                        (mc.stats()["curr_items_tot"]))
                    warmed_up = True
                    continue
                elif mc.stats()["ep_warmup_thread"] == "running":
                    log.info("still warming up .... curr_items_tot : %s" %
                             (mc.stats()["curr_items_tot"]))
                else:
                    fail(
                        "Value of ep warmup thread does not exist, exiting from this server"
                    )
                time.sleep(5)
            mc.close()
        return warmed_up
Example #7
0
class BaseTestCase(unittest.TestCase):

    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.buckets = []
        self.master = self.servers[0]
        self.cluster = Cluster()
        self.wait_timeout = self.input.param("wait_timeout", 60)
        #number of case that is performed from testrunner( increment each time)
        self.case_number = self.input.param("case_number", 0)
        self.default_bucket = self.input.param("default_bucket", True)
        if self.default_bucket:
            self.default_bucket_name = "default"
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
        self.num_servers = self.input.param("servers", len(self.servers))
        self.num_replicas = self.input.param("replicas", 1)
        self.num_items = self.input.param("items", 1000)
        self.dgm_run = self.input.param("dgm_run", False)
        #max items number to verify in ValidateDataTask, None - verify all
        self.max_verify = self.input.param("max_verify", None)
        #we don't change consistent_view on server by default
        self.disabled_consistent_view = self.input.param("disabled_consistent_view", None)
        self.log.info("==============  basetestcase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
        #avoid clean up if the previous test has been tear down
        if not self.input.param("skip_cleanup", True) or self.case_number == 1:
            self.tearDown()
            self.cluster = Cluster()
        self.quota = self._initialize_nodes(self.cluster, self.servers, self.disabled_consistent_view)
        if self.dgm_run:
            self.quota = 256
        if self.total_buckets > 0:
            self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)

        if self.default_bucket:
            self.cluster.create_default_bucket(self.master, self.bucket_size, self.num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
                                       num_replicas=self.num_replicas, bucket_size=self.bucket_size))

        self._create_sasl_buckets(self.master, self.sasl_buckets)
        self._create_standard_buckets(self.master, self.standard_buckets)
        self.log.info("==============  basetestcase setup was finished for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))
        self._log_start(self)

    def tearDown(self):
        if not self.input.param("skip_cleanup", False):
            try:
                self.log.info("==============  basetestcase cleanup was started for test #{0} {1} =============="\
                          .format(self.case_number, self._testMethodName))
                rest = RestConnection(self.master)
                alerts = rest.get_alerts()
                if alerts is not None and len(alerts) != 0:
                    self.log.warn("Alerts were found: {0}".format(alerts))
                if rest._rebalance_progress_status() == 'running':
                    self.log.warning("rebalancing is still running, test should be verified")
                    stopped = rest.stop_rebalance()
                    self.assertTrue(stopped, msg="unable to stop rebalance")
                BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
                ClusterOperationHelper.cleanup_cluster(self.servers)
                time.sleep(10)
                ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
                self.log.info("==============  basetestcase cleanup was finished for test #{0} {1} =============="\
                          .format(self.case_number, self._testMethodName))
            finally:
                #stop all existing task manager threads
                self.cluster.shutdown()
                self._log_finish(self)

    @staticmethod
    def _log_start(self):
        try:
            msg = "{0} : {1} started ".format(datetime.datetime.now(), self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    @staticmethod
    def _log_finish(self):
        try:
            msg = "{0} : {1} finished ".format(datetime.datetime.now(), self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    def _initialize_nodes(self, cluster, servers, disabled_consistent_view=None):
        quota = 0
        init_tasks = []
        for server in servers:
            init_tasks.append(cluster.async_init_node(server, disabled_consistent_view))
        for task in init_tasks:
            node_quota = task.result()
            if node_quota < quota or quota == 0:
                quota = node_quota
        return quota

    def _get_bucket_size(self, quota, num_buckets, ratio=2.0 / 3.0):
        ip = self.servers[0]
        for server in self.servers:
            if server.ip == ip:
                return int(ratio / float(self.num_servers) / float(num_buckets) * float(quota))
        return int(ratio / float(num_buckets) * float(quota))

    def _create_sasl_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'bucket' + str(i)
            bucket_tasks.append(self.cluster.async_create_sasl_bucket(server, name,
                                                                      'password',
                                                                      self.bucket_size,
                                                                      self.num_replicas))
            self.buckets.append(Bucket(name=name, authType="sasl", saslPassword='******',
                                       num_replicas=self.num_replicas, bucket_size=self.bucket_size));
        for task in bucket_tasks:
            task.result()

    def _create_standard_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'standard_bucket' + str(i)
            bucket_tasks.append(self.cluster.async_create_standard_bucket(server, name,
                                                                          11214 + i,
                                                                          self.bucket_size,
                                                                          self.num_replicas))

            self.buckets.append(Bucket(name=name, authType=None, saslPassword=None, num_replicas=self.num_replicas,
                                       bucket_size=self.bucket_size, port=11214 + i));
        for task in bucket_tasks:
            task.result()

    def _all_buckets_delete(self, server):
        delete_tasks = []
        for bucket in self.buckets:
            delete_tasks.append(self.cluster.async_bucket_delete(server, bucket.name))

        for task in delete_tasks:
            task.result()
        self.buckets = []

    def _verify_stats_all_buckets(self, servers):
        stats_tasks = []
        for bucket in self.buckets:
            items = sum([len(kv_store) for kv_store in bucket.kvs.values()])
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                               'curr_items', '==', items))
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                               'vb_active_curr_items', '==', items))

            available_replicas = self.num_replicas
            if len(servers) == self.num_replicas:
                available_replicas = len(servers) - 1
            elif len(servers) <= self.num_replicas:
                available_replicas = len(servers) - 1

            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                                   'vb_replica_curr_items', '==', items * available_replicas))
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                                   'curr_items_tot', '==', items * (available_replicas + 1)))

        for task in stats_tasks:
            task.result(60)


    """Asynchronously applys load generation to all bucekts in the cluster.
 bucket.name, gen,
                                                          bucket.kvs[kv_store],
                                                          op_type, exp
    Args:
        server - A server in the cluster. (TestInputServer)
        kv_gen - The generator to use to generate load. (DocumentGenerator)
        op_type - "create", "read", "update", or "delete" (String)
        exp - The expiration for the items if updated or created (int)
        kv_store - The index of the bucket's kv_store to use. (int)

    Returns:
        A list of all of the tasks created.
    """
    def _async_load_all_buckets(self, server, kv_gen, op_type, exp, kv_store=1, flag=0, only_store_hash=True, batch_size=1, pause_secs=1, timeout_secs=30):
        tasks = []
        for bucket in self.buckets:
            gen = copy.deepcopy(kv_gen)
            tasks.append(self.cluster.async_load_gen_docs(server, bucket.name, gen,
                                                          bucket.kvs[kv_store],
                                                          op_type, exp, flag, only_store_hash, batch_size, pause_secs, timeout_secs))
        return tasks

    """Synchronously applys load generation to all bucekts in the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_gen - The generator to use to generate load. (DocumentGenerator)
        op_type - "create", "read", "update", or "delete" (String)
        exp - The expiration for the items if updated or created (int)
        kv_store - The index of the bucket's kv_store to use. (int)
    """
    def _load_all_buckets(self, server, kv_gen, op_type, exp, kv_store=1, flag=0, only_store_hash=True, batch_size=1, pause_secs=1, timeout_secs=30):
        tasks = self._async_load_all_buckets(server, kv_gen, op_type, exp, kv_store, flag, only_store_hash, batch_size, pause_secs, timeout_secs)
        for task in tasks:
            task.result()

    """Waits for queues to drain on all servers and buckets in a cluster.

    A utility function that waits for all of the items loaded to be persisted
    and replicated.

    Args:
        servers - A list of all of the servers in the cluster. ([TestInputServer])
    """
    def _wait_for_stats_all_buckets(self, servers):
        tasks = []
        for server in servers:
            for bucket in self.buckets:
                tasks.append(self.cluster.async_wait_for_stats([server], bucket, '',
                                   'ep_queue_size', '==', 0))
                tasks.append(self.cluster.async_wait_for_stats([server], bucket, '',
                                   'ep_flusher_todo', '==', 0))
        for task in tasks:
            task.result()

    """Verifies data on all of the nodes in a cluster.

    Verifies all of the data in a specific kv_store index for all buckets in
    the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_store - The kv store index to check. (int)
    """
    def _verify_all_buckets(self, server, kv_store=1, timeout=180, max_verify=None, only_store_hash=True, batch_size=1):
        tasks = []
        for bucket in self.buckets:
            tasks.append(self.cluster.async_verify_data(server, bucket, bucket.kvs[kv_store], max_verify, only_store_hash, batch_size))
        for task in tasks:
            task.result(timeout)


    def disable_compaction(self, server=None, bucket="default"):

        server = server or self.servers[0]
        new_config = {"viewFragmntThresholdPercentage" : None,
                      "dbFragmentThresholdPercentage" :  None,
                      "dbFragmentThreshold" : None,
                      "viewFragmntThreshold" : None}
        self.cluster.modify_fragmentation_config(server, new_config, bucket)

    def async_create_views(self, server, design_doc_name, views, bucket="default"):
        tasks = []
        if len(views):
            for view in views:
                t_ = self.cluster.async_create_view(server, design_doc_name, view, bucket)
                tasks.append(t_)
        else:
            t_ = self.cluster.async_create_view(server, design_doc_name, None, bucket)
            tasks.append(t_)
        return tasks

    def create_views(self, server, design_doc_name, views, bucket="default", timeout=None):
        if len(views):
            for view in views:
                self.cluster.create_view(server, design_doc_name, view, bucket, timeout)
        else:
            self.cluster.create_view(server, design_doc_name, None, bucket, timeout)

    def make_default_views(self, prefix, count, is_dev_ddoc=False):
        ref_view = self.default_view
        ref_view.name = (prefix, ref_view.name)[prefix is None]
        return [View(ref_view.name + str(i), ref_view.map_func, None, is_dev_ddoc) for i in xrange(count)]

    def _load_doc_data_all_buckets(self, data_op="create"):
        #initialize the template for document generator
        age = range(5)
        first = ['james', 'sharon']
        template = '{{ "age": {0}, "first_name": "{1}" }}'
        gen_load = DocumentGenerator('test_docs', template, age, first, start=0, end=self.num_items)

        self.log.info("%s %s documents..." % (data_op, self.num_items))
        self._load_all_buckets(self.master, gen_load, data_op, 0)

    #returns true if warmup is completed in wait_time sec,
    #otherwise return false
    @staticmethod
    def _wait_warmup_completed(self, servers, bucket_name, wait_time=300):
        warmed_up = False
        log = logger.Logger.get_logger()
        for server in servers:
            mc = None
            start = time.time()
            # Try to get the stats for 5 minutes, else hit out.
            while time.time() - start < wait_time:
                # Get the wamrup time for each server
                try:
                    mc = MemcachedClientHelper.direct_client(server, bucket_name)
                    stats = mc.stats()
                    if stats is not None:
                        warmup_time = int(stats["ep_warmup_time"])
                        log.info("ep_warmup_time is %s " % warmup_time)
                        log.info(
                            "Collected the stats %s for server %s:%s" % (stats["ep_warmup_time"], server.ip,
                                server.port))
                        break
                    else:
                        log.info(" Did not get the stats from the server yet, trying again.....")
                        time.sleep(2)
                except Exception as e:
                    log.error(
                        "Could not get warmup_time stats from server %s:%s, exception %s" % (server.ip,
                            server.port, e))
            else:
                self.fail(
                    "Fail! Unable to get the warmup-stats from server %s:%s after trying for %s seconds." % (
                        server.ip, server.port, wait_time))

            # Waiting for warm-up
            start = time.time()
            warmed_up = False
            while time.time() - start < wait_time and not warmed_up:
                if mc.stats()["ep_warmup_thread"] == "complete":
                    log.info("warmup completed, awesome!!! Warmed up. %s items " % (mc.stats()["curr_items_tot"]))
                    warmed_up = True
                    continue
                elif mc.stats()["ep_warmup_thread"] == "running":
                    log.info(
                                "still warming up .... curr_items_tot : %s" % (mc.stats()["curr_items_tot"]))
                else:
                    fail("Value of ep warmup thread does not exist, exiting from this server")
                time.sleep(5)
            mc.close()
        return warmed_up
Example #8
0
class FailoverBaseTest(unittest.TestCase):
    @staticmethod
    def setUp(self):
        log = logger.Logger.get_logger()
        self._input = TestInputSingleton.input
        self._keys_count = self._input.param("keys_count", DEFAULT_KEY_COUNT)
        self._num_replicas = self._input.param("replica", DEFAULT_REPLICA)
        self.bidirectional = self._input.param("bidirectional", False)
        self.case_number = self._input.param("case_number", 0)
        self._value_size = self._input.param("value_size", 256)
        self.wait_timeout = self._input.param("wait_timeout", 60)
        self._servers = self._input.servers
        self.master = self._servers[0]
        self._failed_nodes = []
        num_buckets = 0
        self.buckets = []
        self.default_bucket = self._input.param("default_bucket", True)
        if self.default_bucket:
            self.default_bucket_name = "default"
            num_buckets += 1
        self._standard_buckets = self._input.param("standard_buckets", 0)
        self._sasl_buckets = self._input.param("sasl_buckets", 0)
        num_buckets += self._standard_buckets + self._sasl_buckets
        self.dgm_run = self._input.param("dgm_run", True)
        self.log = logger.Logger().get_logger()
        self._cluster_helper = Cluster()
        self.disabled_consistent_view = self._input.param(
            "disabled_consistent_view", None)
        self._quota = self._initialize_nodes(self._cluster_helper,
                                             self._servers,
                                             self.disabled_consistent_view)
        if self.dgm_run:
            self.quota = 256
        self.bucket_size = int(
            (2.0 / 3.0) / float(num_buckets) * float(self._quota))
        self.gen_create = BlobGenerator('loadOne',
                                        'loadOne_',
                                        self._value_size,
                                        end=self._keys_count)
        self.add_back_flag = False
        self._cleanup_nodes = []
        log.info("==============  setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
        RemoteUtilHelper.common_basic_setup(self._servers)
        BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
        for server in self._servers:
            ClusterOperationHelper.cleanup_cluster([server])
        ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self)
        self._setup_cluster()
        self._create_buckets_()
        log.info("==============  setup was finished for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))

    @staticmethod
    def tearDown(self):
        try:
            self._cluster_helper.shutdown()
            log = logger.Logger.get_logger()
            log.info("==============  tearDown was started for test #{0} {1} =============="\
                              .format(self.case_number, self._testMethodName))
            RemoteUtilHelper.common_basic_setup(self._servers)
            log.info("10 seconds delay to wait for membase-server to start")
            time.sleep(10)
            for server in self._cleanup_nodes:
                shell = RemoteMachineShellConnection(server)
                o, r = shell.execute_command("iptables -F")
                shell.log_command_output(o, r)
                o, r = shell.execute_command(
                    "/sbin/iptables -A INPUT -p tcp -i eth0 --dport 1000:60000 -j ACCEPT"
                )
                shell.log_command_output(o, r)
                o, r = shell.execute_command(
                    "/sbin/iptables -A OUTPUT -p tcp -o eth0 --dport 1000:60000 -j ACCEPT"
                )
                shell.log_command_output(o, r)
                o, r = shell.execute_command(
                    "/etc/init.d/couchbase-server start")
                shell.log_command_output(o, r)
                shell.disconnect()
            BucketOperationHelper.delete_all_buckets_or_assert(
                self._servers, self)
            ClusterOperationHelper.cleanup_cluster(self._servers)
            ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self)
            log.info("==============  tearDown was finished for test #{0} {1} =============="\
                              .format(self.case_number, self._testMethodName))
        finally:
            pass

    def _initialize_nodes(self,
                          cluster,
                          servers,
                          disabled_consistent_view=None):
        quota = 0
        init_tasks = []
        for server in servers:
            init_tasks.append(
                cluster.async_init_node(server, disabled_consistent_view))
        for task in init_tasks:
            node_quota = task.result()
            if node_quota < quota or quota == 0:
                quota = node_quota
        return quota

    def _setup_cluster(self):
        rest = RestConnection(self.master)
        credentials = self._input.membase_settings
        ClusterOperationHelper.add_all_nodes_or_assert(self.master,
                                                       self._servers,
                                                       credentials, self)
        nodes = rest.node_statuses()
        rest.rebalance(otpNodes=[node.id for node in nodes], ejectedNodes=[])
        msg = "rebalance failed after adding these nodes {0}".format(nodes)
        self.assertTrue(rest.monitorRebalance(), msg=msg)

    def _create_buckets_(self):
        if self.default_bucket:
            self._cluster_helper.create_default_bucket(self.master,
                                                       self.bucket_size,
                                                       self._num_replicas)
            self.buckets.append(
                Bucket(name="default",
                       authType="sasl",
                       saslPassword="",
                       num_replicas=self._num_replicas,
                       bucket_size=self.bucket_size))

        self._create_sasl_buckets(self.master, self._sasl_buckets)
        self._create_standard_buckets(self.master, self._standard_buckets)

    def _create_sasl_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'bucket' + str(i)
            bucket_tasks.append(
                self._cluster_helper.async_create_sasl_bucket(
                    server, name, 'password', self.bucket_size,
                    self._num_replicas))
            self.buckets.append(
                Bucket(name=name,
                       authType="sasl",
                       saslPassword='******',
                       num_replicas=self._num_replicas,
                       bucket_size=self.bucket_size))
        for task in bucket_tasks:
            task.result()

    def _create_standard_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'standard_bucket' + str(i)
            bucket_tasks.append(
                self._cluster_helper.async_create_standard_bucket(
                    server, name, 11214 + i, self.bucket_size,
                    self._num_replicas))

            self.buckets.append(
                Bucket(name=name,
                       authType=None,
                       saslPassword=None,
                       num_replicas=self._num_replicas,
                       bucket_size=self.bucket_size,
                       port=11214 + i))
        for task in bucket_tasks:
            task.result()

    def _async_load_all_buckets(self,
                                server,
                                kv_gen,
                                op_type,
                                exp,
                                kv_store=1,
                                flag=0,
                                only_store_hash=True,
                                batch_size=1,
                                pause_secs=1,
                                timeout_secs=30):
        tasks = []
        for bucket in self.buckets:
            gen = copy.deepcopy(kv_gen)
            tasks.append(
                self._cluster_helper.async_load_gen_docs(
                    server, bucket.name, gen, bucket.kvs[kv_store], op_type,
                    exp, flag, only_store_hash, batch_size, pause_secs,
                    timeout_secs))
        return tasks

    def _load_all_buckets(self,
                          server,
                          kv_gen,
                          op_type,
                          exp,
                          kv_store=1,
                          flag=0,
                          only_store_hash=True,
                          batch_size=1,
                          pause_secs=1,
                          timeout_secs=30):
        tasks = self._async_load_all_buckets(server, kv_gen, op_type, exp,
                                             kv_store, flag, only_store_hash,
                                             batch_size, pause_secs,
                                             timeout_secs)
        for task in tasks:
            task.result()

    def _wait_for_stats_all_buckets(self, servers):
        tasks = []
        for server in servers:
            for bucket in self.buckets:
                tasks.append(
                    self._cluster_helper.async_wait_for_stats([server], bucket,
                                                              '',
                                                              'ep_queue_size',
                                                              '==', 0))
                tasks.append(
                    self._cluster_helper.async_wait_for_stats(
                        [server], bucket, '', 'ep_flusher_todo', '==', 0))
        for task in tasks:
            task.result()

    def _wait_for_replication(self, servers, timeout=600):
        tasks = []
        for server in servers:
            for bucket in self.buckets:
                for server_repl in list(set(servers) - set([server])):
                    tasks.append(
                        self._cluster_helper.async_wait_for_stats(
                            [server], bucket, 'tap',
                            'eq_tapq:replication_ns_1@' + server_repl.ip +
                            ':idle', '==', 'true'))
                    tasks.append(
                        self._cluster_helper.async_wait_for_stats(
                            [server], bucket, 'tap',
                            'eq_tapq:replication_ns_1@' + server_repl.ip +
                            ':backfill_completed', '==', 'true'))
        for task in tasks:
            task.result(timeout)

    def _verify_all_buckets(self,
                            server,
                            kv_store=1,
                            timeout=180,
                            max_verify=None,
                            only_store_hash=True,
                            batch_size=1):
        tasks = []
        for bucket in self.buckets:
            tasks.append(
                self._cluster_helper.async_verify_data(server, bucket,
                                                       bucket.kvs[kv_store],
                                                       max_verify,
                                                       only_store_hash,
                                                       batch_size))
        for task in tasks:
            task.result(timeout)

    def _verify_stats_all_buckets(self, servers):
        stats_tasks = []
        for bucket in self.buckets:
            items = sum([len(kv_store) for kv_store in bucket.kvs.values()])
            stats_tasks.append(
                self._cluster_helper.async_wait_for_stats(
                    servers, bucket, '', 'curr_items', '==', items))
            stats_tasks.append(
                self._cluster_helper.async_wait_for_stats(
                    servers, bucket, '', 'vb_active_curr_items', '==', items))

            available_replicas = self._num_replicas
            if len(servers) == self._num_replicas:
                available_replicas = len(servers) - 1
            elif len(servers) <= self._num_replicas:
                available_replicas = len(servers) - 1

            stats_tasks.append(
                self._cluster_helper.async_wait_for_stats(
                    servers, bucket, '', 'vb_replica_curr_items', '==',
                    items * available_replicas))
            stats_tasks.append(
                self._cluster_helper.async_wait_for_stats(
                    servers, bucket, '', 'curr_items_tot', '==',
                    items * (available_replicas + 1)))

        for task in stats_tasks:
            task.result(60)
Example #9
0
class CheckpointTests(unittest.TestCase):

    def setUp(self):
        self.cluster = Cluster()

        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.num_servers = self.input.param("servers", 1)

        master = self.servers[0]
        num_replicas = self.input.param("replicas", 1)
        self.bucket = 'default'

        # Start: Should be in a before class function
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        for server in self.servers:
            ClusterOperationHelper.cleanup_cluster([server])
        ClusterOperationHelper.wait_for_ns_servers_or_assert([master], self)
        # End: Should be in a before class function

        self.quota = self.cluster.init_node(master)
        self.old_vbuckets = self._get_vbuckets(master)
        ClusterOperationHelper.set_vbuckets(master, 1)
        self.cluster.create_default_bucket(master, self.quota, num_replicas)
        self.cluster.rebalance(self.servers[:self.num_servers],
                               self.servers[1:self.num_servers], [])

    def tearDown(self):
        master = self.servers[0]
        ClusterOperationHelper.set_vbuckets(master, self.old_vbuckets)
        rest = RestConnection(master)
        rest.stop_rebalance()
        self.cluster.rebalance(self.servers[:self.num_servers], [],
                               self.servers[1:self.num_servers])
        self.cluster.bucket_delete(master, self.bucket)
        self.cluster.shutdown()

    def checkpoint_create_items(self):
        param = 'checkpoint'
        stat_key = 'vb_0:open_checkpoint_id'
        num_items = 6000

        master = self._get_server_by_state(self.servers[:self.num_servers], self.bucket, ACTIVE)
        self._set_checkpoint_size(self.servers[:self.num_servers], self.bucket, '5000')
        chk_stats = StatsCommon.get_stats(self.servers[:self.num_servers], self.bucket,
                                          param, stat_key)
        load_thread = self.generate_load(master, self.bucket, num_items)
        load_thread.join()
        tasks = []
        for server, value in chk_stats.items():
            tasks.append(self.cluster.async_wait_for_stats([server], self.bucket, param,
                                                           stat_key, '>', value))
        for task in tasks:
            try:
                timeout = 30 if (num_items * .001) < 30 else num_items * .001
                task.result(timeout)
            except TimeoutError:
                self.fail("New checkpoint not created")

    def checkpoint_create_time(self):
        param = 'checkpoint'
        timeout = 60
        stat_key = 'vb_0:open_checkpoint_id'

        master = self._get_server_by_state(self.servers[:self.num_servers], self.bucket, ACTIVE)
        self._set_checkpoint_timeout(self.servers[:self.num_servers], self.bucket, str(timeout))
        chk_stats = StatsCommon.get_stats(self.servers[:self.num_servers], self.bucket,
                                          param, stat_key)
        load_thread = self.generate_load(master, self.bucket, 1)
        load_thread.join()
        log.info("Sleeping for {0} seconds)".format(timeout))
        time.sleep(timeout)
        tasks = []
        for server, value in chk_stats.items():
            tasks.append(self.cluster.async_wait_for_stats([server], self.bucket, param,
                                                           stat_key, '>', value))
        for task in tasks:
            try:
                task.result(60)
            except TimeoutError:
                self.fail("New checkpoint not created")
        self._set_checkpoint_timeout(self.servers[:self.num_servers], self.bucket, str(600))

    def checkpoint_collapse(self):
        param = 'checkpoint'
        chk_size = 5000
        num_items = 25000
        stat_key = 'vb_0:last_closed_checkpoint_id'
        stat_chk_itms = 'vb_0:num_checkpoint_items'

        master = self._get_server_by_state(self.servers[:self.num_servers], self.bucket, ACTIVE)
        slave1 = self._get_server_by_state(self.servers[:self.num_servers], self.bucket, REPLICA1)
        slave2 = self._get_server_by_state(self.servers[:self.num_servers], self.bucket, REPLICA2)
        self._set_checkpoint_size(self.servers[:self.num_servers], self.bucket, str(chk_size))
        m_stats = StatsCommon.get_stats([master], self.bucket, param, stat_key)
        self._stop_replication(slave2, self.bucket)
        load_thread = self.generate_load(master, self.bucket, num_items)
        load_thread.join()

        tasks = []
        chk_pnt = str(int(m_stats[m_stats.keys()[0]]) + (num_items / chk_size))
        tasks.append(self.cluster.async_wait_for_stats([master], self.bucket, param, stat_key,
                                                       '==', chk_pnt))
        tasks.append(self.cluster.async_wait_for_stats([slave1], self.bucket, param, stat_key,
                                                       '==', chk_pnt))
        tasks.append(self.cluster.async_wait_for_stats([slave1], self.bucket, param,
                                                       stat_chk_itms, '>=', str(num_items)))
        for task in tasks:
            try:
                task.result(60)
            except TimeoutError:
                self.fail("Checkpoint not collapsed")

        tasks = []
        self._start_replication(slave2, self.bucket)
        chk_pnt = str(int(m_stats[m_stats.keys()[0]]) + (num_items / chk_size))
        tasks.append(self.cluster.async_wait_for_stats([slave2], self.bucket, param, stat_key,
                                                       '==', chk_pnt))
        tasks.append(self.cluster.async_wait_for_stats([slave1], self.bucket, param,
                                                       stat_chk_itms, '<', num_items))
        for task in tasks:
            try:
                task.result(60)
            except TimeoutError:
                self.fail("Checkpoints not replicated to secondary slave")

    def checkpoint_deduplication(self):
        param = 'checkpoint'
        stat_key = 'vb_0:num_checkpoint_items'

        master = self._get_server_by_state(self.servers[:self.num_servers], self.bucket, ACTIVE)
        slave1 = self._get_server_by_state(self.servers[:self.num_servers], self.bucket, REPLICA1)
        self._set_checkpoint_size(self.servers[:self.num_servers], self.bucket, '5000')
        self._stop_replication(slave1, self.bucket)
        load_thread = self.generate_load(master, self.bucket, 4500)
        load_thread.join()
        load_thread = self.generate_load(master, self.bucket, 1000)
        load_thread.join()
        self._start_replication(slave1, self.bucket)

        tasks = []
        tasks.append(self.cluster.async_wait_for_stats([master], self.bucket, param,
                                                       stat_key, '==', 4501))
        tasks.append(self.cluster.async_wait_for_stats([slave1], self.bucket, param,
                                                       stat_key, '==', 4501))
        for task in tasks:
            try:
                task.result(60)
            except TimeoutError:
                self.fail("Items weren't deduplicated")

    def _set_checkpoint_size(self, servers, bucket, size):
        ClusterOperationHelper.flushctl_set(servers[0], 'chk_max_items', size, bucket)

    def _set_checkpoint_timeout(self, servers, bucket, time):
        ClusterOperationHelper.flushctl_set(servers[0], 'chk_period', time, bucket)

    def _stop_replication(self, server, bucket):
        ClusterOperationHelper.flushctl_set_per_node(server, 'tap_throttle_queue_cap', 0, bucket)

    def _start_replication(self, server, bucket):
        ClusterOperationHelper.flushctl_set_per_node(server, 'tap_throttle_queue_cap', 1000000, bucket)

    def _get_vbuckets(self, server):
        rest = RestConnection(server)
        command = "ns_config:search(couchbase_num_vbuckets_default)"
        status, content = rest.diag_eval(command)

        try:
            vbuckets = int(re.sub('[^\d]', '', content))
        except:
            vbuckets = 1024
        return vbuckets

    def _get_server_by_state(self, servers, bucket, vb_state):
        rest = RestConnection(servers[0])
        vbuckets = rest.get_vbuckets(self.bucket)[0]
        addr = None
        if vb_state == ACTIVE:
            addr = vbuckets.master
        elif vb_state == REPLICA1:
            addr = vbuckets.replica[0].encode("ascii", "ignore")
        elif vb_state == REPLICA2:
            addr = vbuckets.replica[1].encode("ascii", "ignore")
        elif vb_state == REPLICA3:
            addr = vbuckets.replica[2].encode("ascii", "ignore")
        else:
            return None

        addr = addr.split(':', 1)[0]
        for server in servers:
            if addr == server.ip:
                return server
        return None

    def generate_load(self, server, bucket, num_items):
        class LoadGen(Thread):
            def __init__(self, server, bucket, num_items):
                Thread.__init__(self)
                self.server = server
                self.bucket = bucket
                self.num_items = num_items

            def run(self):
                client = MemcachedClientHelper.direct_client(server, bucket)
                for i in range(num_items):
                    key = "key-{0}".format(i)
                    value = "value-{0}".format(str(uuid.uuid4())[:7])
                    client.set(key, 0, 0, value, 0)
                log.info("Loaded {0} key".format(num_items))

        load_thread = LoadGen(server, bucket, num_items)
        load_thread.start()
        return load_thread
Example #10
0
class FailoverBaseTest(unittest.TestCase):

    @staticmethod
    def setUp(self):
        log = logger.Logger.get_logger()
        self._input = TestInputSingleton.input
        self._keys_count = self._input.param("keys_count", DEFAULT_KEY_COUNT)
        self._num_replicas = self._input.param("replica", DEFAULT_REPLICA)
        self.bidirectional = self._input.param("bidirectional", False)
        self.case_number = self._input.param("case_number", 0)
        self._value_size = self._input.param("value_size", 256)
        self.wait_timeout = self._input.param("wait_timeout", 60)
        self._servers = self._input.servers
        self.master = self._servers[0]
        self._failed_nodes = []
        num_buckets = 0
        self.buckets = []
        self.default_bucket = self._input.param("default_bucket", True)
        if self.default_bucket:
            self.default_bucket_name = "default"
            num_buckets += 1
        self._standard_buckets = self._input.param("standard_buckets", 0)
        self._sasl_buckets = self._input.param("sasl_buckets", 0)
        num_buckets += self._standard_buckets + self._sasl_buckets
        self.dgm_run = self._input.param("dgm_run", True)
        self.log = logger.Logger().get_logger()
        self._cluster_helper = Cluster()
        self.disabled_consistent_view = self._input.param("disabled_consistent_view", None)
        self._quota = self._initialize_nodes(self._cluster_helper, self._servers, self.disabled_consistent_view)
        if self.dgm_run:
            self.quota = 256
        self.bucket_size = int((2.0 / 3.0) / float(num_buckets) * float(self._quota))
        self.gen_create = BlobGenerator('loadOne', 'loadOne_', self._value_size, end=self._keys_count)
        self.add_back_flag = False
        self._cleanup_nodes = []
        log.info("==============  setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
        RemoteUtilHelper.common_basic_setup(self._servers)
        BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
        for server in self._servers:
            ClusterOperationHelper.cleanup_cluster([server])
        ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self)
        self._setup_cluster()
        self._create_buckets_()
        log.info("==============  setup was finished for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))

    @staticmethod
    def tearDown(self):
        try:
            self._cluster_helper.shutdown()
            log = logger.Logger.get_logger()
            log.info("==============  tearDown was started for test #{0} {1} =============="\
                              .format(self.case_number, self._testMethodName))
            RemoteUtilHelper.common_basic_setup(self._servers)
            log.info("10 seconds delay to wait for membase-server to start")
            time.sleep(10)
            for server in self._cleanup_nodes:
                shell = RemoteMachineShellConnection(server)
                o, r = shell.execute_command("iptables -F")
                shell.log_command_output(o, r)
                o, r = shell.execute_command("/sbin/iptables -A INPUT -p tcp -i eth0 --dport 1000:60000 -j ACCEPT")
                shell.log_command_output(o, r)
                o, r = shell.execute_command("/sbin/iptables -A OUTPUT -p tcp -o eth0 --dport 1000:60000 -j ACCEPT")
                shell.log_command_output(o, r)
                o, r = shell.execute_command("/etc/init.d/couchbase-server start")
                shell.log_command_output(o, r)
                shell.disconnect()
            BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
            ClusterOperationHelper.cleanup_cluster(self._servers)
            ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self)
            log.info("==============  tearDown was finished for test #{0} {1} =============="\
                              .format(self.case_number, self._testMethodName))
        finally:
            pass

    def _initialize_nodes(self, cluster, servers, disabled_consistent_view=None):
        quota = 0
        init_tasks = []
        for server in servers:
            init_tasks.append(cluster.async_init_node(server, disabled_consistent_view))
        for task in init_tasks:
            node_quota = task.result()
            if node_quota < quota or quota == 0:
                quota = node_quota
        return quota

    def _setup_cluster(self):
        rest = RestConnection(self.master)
        credentials = self._input.membase_settings
        ClusterOperationHelper.add_all_nodes_or_assert(self.master, self._servers, credentials, self)
        nodes = rest.node_statuses()
        rest.rebalance(otpNodes=[node.id for node in nodes], ejectedNodes=[])
        msg = "rebalance failed after adding these nodes {0}".format(nodes)
        self.assertTrue(rest.monitorRebalance(), msg=msg)

    def _create_buckets_(self):
        if self.default_bucket:
            self._cluster_helper.create_default_bucket(self.master, self.bucket_size, self._num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
                                       num_replicas=self._num_replicas, bucket_size=self.bucket_size))

        self._create_sasl_buckets(self.master, self._sasl_buckets)
        self._create_standard_buckets(self.master, self._standard_buckets)

    def _create_sasl_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'bucket' + str(i)
            bucket_tasks.append(self._cluster_helper.async_create_sasl_bucket(server, name,
                                                                      'password',
                                                                      self.bucket_size,
                                                                      self._num_replicas))
            self.buckets.append(Bucket(name=name, authType="sasl", saslPassword='******',
                                       num_replicas=self._num_replicas, bucket_size=self.bucket_size));
        for task in bucket_tasks:
            task.result()

    def _create_standard_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'standard_bucket' + str(i)
            bucket_tasks.append(self._cluster_helper.async_create_standard_bucket(server, name,
                                                                          11214 + i,
                                                                          self.bucket_size,
                                                                          self._num_replicas))

            self.buckets.append(Bucket(name=name, authType=None, saslPassword=None, num_replicas=self._num_replicas,
                                       bucket_size=self.bucket_size, port=11214 + i));
        for task in bucket_tasks:
            task.result()

    def _async_load_all_buckets(self, server, kv_gen, op_type, exp, kv_store=1, flag=0, only_store_hash=True, batch_size=1, pause_secs=1, timeout_secs=30):
        tasks = []
        for bucket in self.buckets:
            gen = copy.deepcopy(kv_gen)
            tasks.append(self._cluster_helper.async_load_gen_docs(server, bucket.name, gen,
                                                          bucket.kvs[kv_store],
                                                          op_type, exp, flag, only_store_hash, batch_size, pause_secs, timeout_secs))
        return tasks

    def _load_all_buckets(self, server, kv_gen, op_type, exp, kv_store=1, flag=0, only_store_hash=True, batch_size=1, pause_secs=1, timeout_secs=30):
        tasks = self._async_load_all_buckets(server, kv_gen, op_type, exp, kv_store, flag, only_store_hash, batch_size, pause_secs, timeout_secs)
        for task in tasks:
            task.result()

    def _wait_for_stats_all_buckets(self, servers):
        tasks = []
        for server in servers:
            for bucket in self.buckets:
                tasks.append(self._cluster_helper.async_wait_for_stats([server], bucket, '',
                                   'ep_queue_size', '==', 0))
                tasks.append(self._cluster_helper.async_wait_for_stats([server], bucket, '',
                                   'ep_flusher_todo', '==', 0))
        for task in tasks:
            task.result()

    def _wait_for_replication(self, servers, timeout=600):
        tasks = []
        for server in servers:
            for bucket in self.buckets:
                for server_repl in list(set(servers) - set([server])):
                    tasks.append(self._cluster_helper.async_wait_for_stats([server], bucket, 'tap',
                                   'eq_tapq:replication_ns_1@' + server_repl.ip + ':idle', '==', 'true'))
                    tasks.append(self._cluster_helper.async_wait_for_stats([server], bucket, 'tap',
                                   'eq_tapq:replication_ns_1@' + server_repl.ip + ':backfill_completed', '==', 'true'))
        for task in tasks:
            task.result(timeout)


    def _verify_all_buckets(self, server, kv_store=1, timeout=180, max_verify=None, only_store_hash=True, batch_size=1):
        tasks = []
        for bucket in self.buckets:
            tasks.append(self._cluster_helper.async_verify_data(server, bucket, bucket.kvs[kv_store], max_verify, only_store_hash, batch_size))
        for task in tasks:
            task.result(timeout)

    def _verify_stats_all_buckets(self, servers):
        stats_tasks = []
        for bucket in self.buckets:
            items = sum([len(kv_store) for kv_store in bucket.kvs.values()])
            stats_tasks.append(self._cluster_helper.async_wait_for_stats(servers, bucket, '',
                               'curr_items', '==', items))
            stats_tasks.append(self._cluster_helper.async_wait_for_stats(servers, bucket, '',
                               'vb_active_curr_items', '==', items))

            available_replicas = self._num_replicas
            if len(servers) == self._num_replicas:
                available_replicas = len(servers) - 1
            elif len(servers) <= self._num_replicas:
                available_replicas = len(servers) - 1

            stats_tasks.append(self._cluster_helper.async_wait_for_stats(servers, bucket, '',
                                   'vb_replica_curr_items', '==', items * available_replicas))
            stats_tasks.append(self._cluster_helper.async_wait_for_stats(servers, bucket, '',
                                   'curr_items_tot', '==', items * (available_replicas + 1)))

        for task in stats_tasks:
            task.result(60)
Example #11
0
class BaseTestCase(unittest.TestCase):
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.buckets = []
        self.master = self.servers[0]
        self.cluster = Cluster()
        self.wait_timeout = self.input.param("wait_timeout", 60)
        #number of case that is performed from testrunner( increment each time)
        self.case_number = self.input.param("case_number", 0)
        self.default_bucket = self.input.param("default_bucket", True)
        if self.default_bucket:
            self.default_bucket_name = "default"
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
        self.num_servers = self.input.param("servers", len(self.servers))
        #initial number of items in the cluster
        self.nodes_init = self.input.param("nodes_init", 1)

        self.num_replicas = self.input.param("replicas", 1)
        self.num_items = self.input.param("items", 1000)
        self.dgm_run = self.input.param("dgm_run", False)
        #max items number to verify in ValidateDataTask, None - verify all
        self.max_verify = self.input.param("max_verify", None)
        #we don't change consistent_view on server by default
        self.disabled_consistent_view = self.input.param("disabled_consistent_view", None)
        self.log.info("==============  basetestcase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
        #avoid clean up if the previous test has been tear down
        if not self.input.param("skip_cleanup", True) or self.case_number == 1:
            self.tearDown()
            self.cluster = Cluster()
        if str(self.__class__).find('rebalanceout.RebalanceOutTests') != -1:
            #rebalance all nodes into the cluster before each test
            self.cluster.rebalance(self.servers[:self.num_servers], self.servers[1:self.num_servers], [])
        elif self.nodes_init > 1:
            self.cluster.rebalance(self.servers[:1], self.servers[1:self.nodes_init], [])
        self.quota = self._initialize_nodes(self.cluster, self.servers, self.disabled_consistent_view)
        if self.dgm_run:
            self.quota = 256
        if self.total_buckets > 0:
            self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)

        if self.default_bucket:
            self.cluster.create_default_bucket(self.master, self.bucket_size, self.num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
                                       num_replicas=self.num_replicas, bucket_size=self.bucket_size))

        self._create_sasl_buckets(self.master, self.sasl_buckets)
        self._create_standard_buckets(self.master, self.standard_buckets)
        self.log.info("==============  basetestcase setup was finished for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))
        self._log_start(self)

    def tearDown(self):
            try:
                if (hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \
                    and TestInputSingleton.input.param("stop-on-failure", False))\
                        or self.input.param("skip_cleanup", False):
                    self.log.warn("CLEANUP WAS SKIPPED")
                else:
                    self.log.info("==============  basetestcase cleanup was started for test #{0} {1} =============="\
                          .format(self.case_number, self._testMethodName))
                    rest = RestConnection(self.master)
                    alerts = rest.get_alerts()
                    if alerts is not None and len(alerts) != 0:
                        self.log.warn("Alerts were found: {0}".format(alerts))
                    if rest._rebalance_progress_status() == 'running':
                        self.log.warning("rebalancing is still running, test should be verified")
                        stopped = rest.stop_rebalance()
                        self.assertTrue(stopped, msg="unable to stop rebalance")
                    BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
                    ClusterOperationHelper.cleanup_cluster(self.servers)
                    time.sleep(10)
                    ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
                    self.log.info("==============  basetestcase cleanup was finished for test #{0} {1} =============="\
                          .format(self.case_number, self._testMethodName))
            finally:
                #stop all existing task manager threads
                self.cluster.shutdown()
                self._log_finish(self)

    @staticmethod
    def _log_start(self):
        try:
            msg = "{0} : {1} started ".format(datetime.datetime.now(), self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    @staticmethod
    def _log_finish(self):
        try:
            msg = "{0} : {1} finished ".format(datetime.datetime.now(), self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    def _initialize_nodes(self, cluster, servers, disabled_consistent_view=None):
        quota = 0
        init_tasks = []
        for server in servers:
            init_tasks.append(cluster.async_init_node(server, disabled_consistent_view))
        for task in init_tasks:
            node_quota = task.result()
            if node_quota < quota or quota == 0:
                quota = node_quota
        return quota

    def _get_bucket_size(self, quota, num_buckets, ratio=2.0 / 3.0):
        ip = self.servers[0]
        for server in self.servers:
            if server.ip == ip:
                return int(ratio / float(self.num_servers) / float(num_buckets) * float(quota))
        return int(ratio / float(num_buckets) * float(quota))

    def _create_sasl_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'bucket' + str(i)
            bucket_tasks.append(self.cluster.async_create_sasl_bucket(server, name,
                                                                      'password',
                                                                      self.bucket_size,
                                                                      self.num_replicas))
            self.buckets.append(Bucket(name=name, authType="sasl", saslPassword='******',
                                       num_replicas=self.num_replicas, bucket_size=self.bucket_size));
        for task in bucket_tasks:
            task.result()

    def _create_standard_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'standard_bucket' + str(i)
            bucket_tasks.append(self.cluster.async_create_standard_bucket(server, name,
                                                                          11214 + i,
                                                                          self.bucket_size,
                                                                          self.num_replicas))

            self.buckets.append(Bucket(name=name, authType=None, saslPassword=None, num_replicas=self.num_replicas,
                                       bucket_size=self.bucket_size, port=11214 + i));
        for task in bucket_tasks:
            task.result()

    def _all_buckets_delete(self, server):
        delete_tasks = []
        for bucket in self.buckets:
            delete_tasks.append(self.cluster.async_bucket_delete(server, bucket.name))

        for task in delete_tasks:
            task.result()
        self.buckets = []

    def _verify_stats_all_buckets(self, servers):
        stats_tasks = []
        for bucket in self.buckets:
            items = sum([len(kv_store) for kv_store in bucket.kvs.values()])
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                               'curr_items', '==', items))
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                               'vb_active_curr_items', '==', items))

            available_replicas = self.num_replicas
            if len(servers) == self.num_replicas:
                available_replicas = len(servers) - 1
            elif len(servers) <= self.num_replicas:
                available_replicas = len(servers) - 1

            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                                   'vb_replica_curr_items', '==', items * available_replicas))
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                                   'curr_items_tot', '==', items * (available_replicas + 1)))

        for task in stats_tasks:
            task.result(60)


    """Asynchronously applys load generation to all bucekts in the cluster.
 bucket.name, gen,
                                                          bucket.kvs[kv_store],
                                                          op_type, exp
    Args:
        server - A server in the cluster. (TestInputServer)
        kv_gen - The generator to use to generate load. (DocumentGenerator)
        op_type - "create", "read", "update", or "delete" (String)
        exp - The expiration for the items if updated or created (int)
        kv_store - The index of the bucket's kv_store to use. (int)

    Returns:
        A list of all of the tasks created.
    """
    def _async_load_all_buckets(self, server, kv_gen, op_type, exp, kv_store=1, flag=0, only_store_hash=True, batch_size=1, pause_secs=1, timeout_secs=30):
        tasks = []
        for bucket in self.buckets:
            gen = copy.deepcopy(kv_gen)
            tasks.append(self.cluster.async_load_gen_docs(server, bucket.name, gen,
                                                          bucket.kvs[kv_store],
                                                          op_type, exp, flag, only_store_hash, batch_size, pause_secs, timeout_secs))
        return tasks

    """Synchronously applys load generation to all bucekts in the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_gen - The generator to use to generate load. (DocumentGenerator)
        op_type - "create", "read", "update", or "delete" (String)
        exp - The expiration for the items if updated or created (int)
        kv_store - The index of the bucket's kv_store to use. (int)
    """
    def _load_all_buckets(self, server, kv_gen, op_type, exp, kv_store=1, flag=0, only_store_hash=True, batch_size=1000, pause_secs=1, timeout_secs=30):
        tasks = self._async_load_all_buckets(server, kv_gen, op_type, exp, kv_store, flag, only_store_hash, batch_size, pause_secs, timeout_secs)
        for task in tasks:
            task.result()

    """Waits for queues to drain on all servers and buckets in a cluster.

    A utility function that waits for all of the items loaded to be persisted
    and replicated.

    Args:
        servers - A list of all of the servers in the cluster. ([TestInputServer])
    """
    def _wait_for_stats_all_buckets(self, servers):
        tasks = []
        for server in servers:
            for bucket in self.buckets:
                tasks.append(self.cluster.async_wait_for_stats([server], bucket, '',
                                   'ep_queue_size', '==', 0))
                tasks.append(self.cluster.async_wait_for_stats([server], bucket, '',
                                   'ep_flusher_todo', '==', 0))
        for task in tasks:
            task.result()

    """Verifies data on all of the nodes in a cluster.

    Verifies all of the data in a specific kv_store index for all buckets in
    the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_store - The kv store index to check. (int)
    """
    def _verify_all_buckets(self, server, kv_store=1, timeout=180, max_verify=None, only_store_hash=True, batch_size=1000):
        tasks = []
        for bucket in self.buckets:
            tasks.append(self.cluster.async_verify_data(server, bucket, bucket.kvs[kv_store], max_verify, only_store_hash, batch_size))
        for task in tasks:
            task.result(timeout)


    def disable_compaction(self, server=None, bucket="default"):

        server = server or self.servers[0]
        new_config = {"viewFragmntThresholdPercentage" : None,
                      "dbFragmentThresholdPercentage" :  None,
                      "dbFragmentThreshold" : None,
                      "viewFragmntThreshold" : None}
        self.cluster.modify_fragmentation_config(server, new_config, bucket)

    def async_create_views(self, server, design_doc_name, views, bucket="default", with_query=True):
        tasks = []
        if len(views):
            for view in views:
                t_ = self.cluster.async_create_view(server, design_doc_name, view, bucket, with_query)
                tasks.append(t_)
        else:
            t_ = self.cluster.async_create_view(server, design_doc_name, None, bucket, with_query)
            tasks.append(t_)
        return tasks

    def create_views(self, server, design_doc_name, views, bucket="default", timeout=None):
        if len(views):
            for view in views:
                self.cluster.create_view(server, design_doc_name, view, bucket, timeout)
        else:
            self.cluster.create_view(server, design_doc_name, None, bucket, timeout)

    def make_default_views(self, prefix, count, is_dev_ddoc=False):
        ref_view = self.default_view
        ref_view.name = (prefix, ref_view.name)[prefix is None]
        return [View(ref_view.name + str(i), ref_view.map_func, None, is_dev_ddoc) for i in xrange(count)]

    def _load_doc_data_all_buckets(self, data_op="create", batch_size=1000):
        #initialize the template for document generator
        age = range(5)
        first = ['james', 'sharon']
        template = '{{ "age": {0}, "first_name": "{1}" }}'
        gen_load = DocumentGenerator('test_docs', template, age, first, start=0, end=self.num_items)

        self.log.info("%s %s documents..." % (data_op, self.num_items))
        self._load_all_buckets(self.master, gen_load, data_op, 0, batch_size=batch_size)

    def verify_cluster_stats(self, servers=None, master=None, max_verify=None):
        if servers is None:
            servers = self.servers
        if master is None:
            master = self.master
        if max_verify is None:
            max_verify = self.max_verify
        self._wait_for_stats_all_buckets(servers)
        self._verify_all_buckets(master, max_verify=max_verify)
        self._verify_stats_all_buckets(servers)
        #verify that curr_items_tot corresponds to sum of curr_items from all nodes
        verified = True
        for bucket in self.buckets:
            verified &= RebalanceHelper.wait_till_total_numbers_match(master, bucket)
        self.assertTrue(verified, "Lost items!!! Replication was completed but sum(curr_items) don't match the curr_items_total")