def setUp(self):
     super(AutoFailoverAbortsRebalance, self).setUp()
     self.master = self.servers[0]
     self._get_params()
     self.rest = RestConnection(self.orchestrator)
     node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
     self.num_buckets = self.num_buckets - 1  # this is done as default is created by base class
     if self.num_buckets:
         BucketOperationHelper.create_multiple_buckets(self.master, self.num_replicas, node_ram_ratio * (2.0 / 3.0),
                                                       howmany=self.num_buckets, bucket_storage=self.bucket_storage)
     self.buckets = self.rest.get_buckets()
     for bucket in self.buckets:
         ready = BucketOperationHelper.wait_for_memcached(self.master, bucket.name)
         self.assertTrue(ready, "wait_for_memcached failed")
     self.initial_load_gen = BlobGenerator('auto-failover',
                                           'auto-failover-',
                                           self.value_size,
                                           end=self.num_items)
     self.update_load_gen = BlobGenerator('auto-failover',
                                          'auto-failover-',
                                          self.value_size,
                                          end=self.update_items)
     self.delete_load_gen = BlobGenerator('auto-failover',
                                          'auto-failover-',
                                          self.value_size,
                                          start=self.update_items,
                                          end=self.delete_items)
     self._load_all_buckets(self.servers[0], self.initial_load_gen,
                            "create", 0)
     self._async_load_all_buckets(self.orchestrator,
                                  self.update_load_gen, "update", 0)
     self._async_load_all_buckets(self.orchestrator,
                                  self.delete_load_gen, "delete", 0)
Пример #2
0
    def common_setup(input, testcase, bucket_ram_ratio=(2.8 / 3.0), replica=0):
        log = logger.Logger.get_logger()
        servers = input.servers
        BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
        ClusterOperationHelper.cleanup_cluster(servers)
        ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase)
        serverInfo = servers[0]

        log.info('picking server : {0} as the master'.format(serverInfo))
        #if all nodes are on the same machine let's have the bucket_ram_ratio as bucket_ram_ratio * 1/len(servers)
        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(servers)
        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
        if "ascii" in TestInputSingleton.input.test_params\
        and TestInputSingleton.input.test_params["ascii"].lower() == "true":
            BucketOperationHelper.create_multiple_buckets(serverInfo, replica, node_ram_ratio * bucket_ram_ratio,
                                                          howmany=1, sasl=False)
        else:
            BucketOperationHelper.create_multiple_buckets(serverInfo, replica, node_ram_ratio * bucket_ram_ratio,
                                                          howmany=1, sasl=True)
        buckets = rest.get_buckets()
        for bucket in buckets:
            ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket.name)
            testcase.assertTrue(ready, "wait_for_memcached failed")
Пример #3
0
 def create_buckets(servers, testcase, howmany=1, replica=1, bucket_ram_ratio=(2.0 / 3.0)):
     node_ram_ratio = BucketOperationHelper.base_bucket_ratio(servers)
     master = servers[0]
     BucketOperationHelper.create_multiple_buckets(master, replica, node_ram_ratio * bucket_ram_ratio, howmany=howmany)
     rest = RestConnection(master)
     buckets = rest.get_buckets()
     for bucket in buckets:
         ready = BucketOperationHelper.wait_for_memcached(master, bucket.name)
         testcase.assertTrue(ready, "wait_for_memcached failed")
Пример #4
0
    def common_setup(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        master = self.servers[0]
        rest = RestConnection(master)

        # Cleanup previous state
        self.task_manager = None
        rest.stop_rebalance()
        RebalanceBaseTest.reset(self)

        # Initialize test params
        self.replica = self.input.param("replica", 1)

        # By default we use keys-count for LoadTask
        # Use keys-count=-1 to use load-ratio
        self.keys_count = self.input.param("keys-count", 30000)
        self.load_ratio = self.input.param("load-ratio", 6)
        self.expiry_ratio = self.input.param("expiry-ratio", 0.1)
        self.delete_ratio = self.input.param("delete-ratio", 0.1)
        self.access_ratio = self.input.param("access-ratio", 0.8)
        self.num_buckets = self.input.param("num-buckets", 1)
        self.num_rebalance = self.input.param("num-rebalance", 1)
        self.do_ascii = self.input.param("ascii", False)
        self.do_verify = self.input.param("do-verify", True)
        self.repeat = self.input.param("repeat", 1)
        self.max_ops_per_second = self.input.param("max_ops_per_second", 500)
        self.min_item_size = self.input.param("min_item_size", 128)
        self.do_stop = self.input.param("do-stop", False)
        self.skip_cleanup = self.input.param("skip-cleanup", False)

        self.checkResidentRatio = self.input.param("checkResidentRatio", False)
        self.activeRatio = self.input.param("activeRatio", 50)
        self.replicaRatio = self.input.param("replicaRatio", 50)
        self.case_number = self.input.param("case_number", 0)

        self.log.info('picking server : {0} as the master'.format(master))

        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
        info = rest.get_nodes_self()
        rest.init_cluster(username=master.rest_username,
            password=master.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
        BucketOperationHelper.create_multiple_buckets(master, self.replica, node_ram_ratio * (2.0 / 3.0),
                howmany=self.num_buckets, sasl=not self.do_ascii)
        buckets = rest.get_buckets()
        for bucket in buckets:
            ready = BucketOperationHelper.wait_for_memcached(master, bucket.name)
            self.assertTrue(ready, "wait_for_memcached failed")

        # Initialize and start the taskManager
        self.task_manager = taskmanager.TaskManager()
        self.task_manager.start()
Пример #5
0
    def _cluster_setup(self):
        replicas = self.input.param("replicas", 1)
        keys_count = self.input.param("keys-count", 0)
        num_buckets = self.input.param("num-buckets", 1)

        bucket_name = "default"
        master = self.servers[0]
        credentials = self.input.membase_settings
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        rest.init_cluster(username=self.master.rest_username,
                          password=self.master.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        rest.reset_autofailover()
        ClusterOperationHelper.add_and_rebalance(self.servers, True)

        if num_buckets == 1:
            bucket_ram = info.memoryQuota * 2 / 3
            rest.create_bucket(bucket=bucket_name,
                               ramQuotaMB=bucket_ram,
                               replicaNumber=replicas,
                               proxyPort=info.moxi)
        else:
            created = BucketOperationHelper.create_multiple_buckets(self.master, replicas, howmany=num_buckets)
            self.assertTrue(created, "unable to create multiple buckets")

        buckets = rest.get_buckets()
        for bucket in buckets:
                ready = BucketOperationHelper.wait_for_memcached(self.master, bucket.name)
                self.assertTrue(ready, msg="wait_for_memcached failed")

        for bucket in buckets:
            inserted_keys_cnt = self.load_data(self.master, bucket.name, keys_count)
            log.info('inserted {0} keys'.format(inserted_keys_cnt))
Пример #6
0
    def setUp(self):
        self.log = logger.Logger().get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.master = self.servers[0]
        self.ip = self.master.ip
        self.finished = False
        self.keys = []
        self.keycount = 0
        self.failure_string = ""

        self.cleanup()

        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        self.port = info.moxi + 1

        rest.init_cluster(username=self.master.rest_username,
                          password=self.master.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        created = BucketOperationHelper.create_multiple_buckets(
            self.master,
            replica=1,
            bucket_ram_ratio=(2.0 / 3.0),
            howmany=10,
            sasl=False)
        self.assertTrue(created, "bucket creation failed")

        ready = BucketOperationHelper.wait_for_memcached(
            self.master, "bucket-0")
        self.assertTrue(ready, "wait_for_memcached failed")
Пример #7
0
    def setUp(self):
        self.log = logger.Logger().get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.master = self.servers[0]
        self.ip = self.master.ip
        self.finished = False
        self.keys = []
        self.keycount = 0
        self.failure_string = ""

        self.cleanup()

        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        self.port = info.moxi+1

        rest.init_cluster(username=self.master.rest_username,
                          password=self.master.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        created = BucketOperationHelper.create_multiple_buckets(self.master,
                                                                replica=1,
                                                                bucket_ram_ratio=(2.0 / 3.0),
                                                                howmany=10,
                                                                sasl=False)
        self.assertTrue(created, "bucket creation failed")

        ready = BucketOperationHelper.wait_for_memcached(self.master, "bucket-0")
        self.assertTrue(ready, "wait_for_memcached failed")
Пример #8
0
 def common_setUp(self, with_buckets):
     ClusterOperationHelper.cleanup_cluster(self.servers)
     server = self.servers[0]
     if with_buckets:
         BucketOperationHelper.delete_all_buckets_or_assert(self.servers, test_case=self)
         ok = BucketOperationHelper.create_multiple_buckets(server, 1)
         if not ok:
             self.fail("unable to create multiple buckets on this node : {0}".format(server))
Пример #9
0
 def common_setUp(self, with_buckets):
     ClusterOperationHelper.cleanup_cluster(self.servers)
     server = self.servers[0]
     if with_buckets:
         BucketOperationHelper.delete_all_buckets_or_assert(self.servers, test_case=self)
         ok = BucketOperationHelper.create_multiple_buckets(server, 1)
         if not ok:
             self.fail("unable to create multiple buckets on this node : {0}".format(server))
Пример #10
0
    def _cluster_setup(self):
        keys_count = self.input.param("keys-count", 0)
        num_buckets = self.input.param("num-buckets", 1)
        bucketType = self.input.param("bucketType", "ephemeral")
        evictionPolicy = self.input.param("evictionPolicy",
                                          "noEviction")  # fullEviction
        self.bucket_storage = self.input.param("bucket_storage", 'couchstore')

        # master = self.servers[0]
        # credentials = self.input.membase_settings
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        rest.init_cluster(username=self.master.rest_username,
                          password=self.master.rest_password)
        memory = min(info.mcdMemoryReserved,
                     self.input.param("kv_memory", 1000))
        rest.init_cluster_memoryQuota(memoryQuota=memory)
        rest.reset_autoreprovision()
        self._add_and_rebalance(self.servers, True)

        if num_buckets == 1:
            bucket_name = "default"
            bucket_ram = info.memoryQuota * 2 // 3
            rest.create_bucket(bucket=bucket_name,
                               ramQuotaMB=bucket_ram,
                               replicaNumber=self.replicas,
                               proxyPort=info.moxi,
                               bucketType=bucketType,
                               evictionPolicy=evictionPolicy,
                               storageBackend=self.bucket_storage)
        else:
            created = BucketOperationHelper.create_multiple_buckets(
                self.master,
                self.replicas,
                howmany=num_buckets,
                bucketType=bucketType,
                evictionPolicy=evictionPolicy,
                storageBackend=self.bucket_storage)
            self.assertTrue(created, "unable to create multiple buckets")

        buckets = rest.get_buckets()
        for bucket in buckets:
            ready = BucketOperationHelper.wait_for_memcached(
                self.master, bucket.name)
            self.assertTrue(ready, msg="wait_for_memcached failed")

        for bucket in buckets:
            distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
            inserted_count, rejected_count = self.load_bucket_and_return_the_keys(
                servers=[self.master],
                name=bucket.name,
                # ram_load_ratio=0.02,
                value_size_distribution=distribution,
                write_only=True,
                moxi=True,
                number_of_threads=2,
                number_of_items=keys_count)
            self.loaded_items[bucket.name] = inserted_count
Пример #11
0
    def _create_multiple_buckets(self, replica=1):
        master = self.servers[0]
        bucket_type = self.input.param('bucket-type', None)
        if bucket_type == 'sasl':
            created = BucketOperationHelper.create_multiple_buckets(master, replica,
                                                                    howmany=self.num_buckets,
                                                                    saslPassword="******")
        else:
            created = BucketOperationHelper.create_multiple_buckets(master, replica,
                                                                    howmany=self.num_buckets,
                                                                    saslPassword="")
        self.assertTrue(created, "unable to create multiple buckets")

        rest = RestConnection(master)
        buckets = rest.get_buckets()
        for bucket in buckets:
            ready = BucketOperationHelper.wait_for_memcached(master, bucket.name)
            self.assertTrue(ready, msg="wait_for_memcached failed")
Пример #12
0
    def _create_multiple_buckets(self, replica=1):
        master = self.servers[0]
        created = BucketOperationHelper.create_multiple_buckets(master, replica, howmany=self.num_buckets)
        self.assertTrue(created, "unable to create multiple buckets")

        rest = RestConnection(master)
        buckets = rest.get_buckets()
        for bucket in buckets:
            ready = BucketOperationHelper.wait_for_memcached(master, bucket.name)
            self.assertTrue(ready, msg="wait_for_memcached failed")
Пример #13
0
    def _create_multiple_buckets(self, replica=1):
        master = self.servers[0]
        created = BucketOperationHelper.create_multiple_buckets(master, replica, howmany=self.num_buckets)
        self.assertTrue(created, "unable to create multiple buckets")

        rest = RestConnection(master)
        buckets = rest.get_buckets()
        for bucket in buckets:
            ready = BucketOperationHelper.wait_for_memcached(master, bucket.name)
            self.assertTrue(ready, msg="wait_for_memcached failed")
Пример #14
0
    def _cluster_setup(self):
        log = logger.Logger.get_logger()

        replicas = self._input.param("replicas", 1)
        keys_count = self._input.param("keys-count", 0)
        num_buckets = self._input.param("num-buckets", 1)

        bucket_name = "default"
        master = self._servers[0]
        credentials = self._input.membase_settings
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        rest.init_cluster(username=self.master.rest_username,
                          password=self.master.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        rest.reset_autofailover()
        ClusterOperationHelper.add_all_nodes_or_assert(self.master,
                                                       self._servers,
                                                       credentials, self)
        bucket_ram = info.memoryQuota * 2 / 3

        if num_buckets == 1:
            rest.create_bucket(bucket=bucket_name,
                               ramQuotaMB=bucket_ram,
                               replicaNumber=replicas,
                               proxyPort=info.moxi)
            ready = BucketOperationHelper.wait_for_memcached(
                self.master, bucket_name)
            nodes = rest.node_statuses()
            rest.rebalance(otpNodes=[node.id for node in nodes],
                           ejectedNodes=[])
            buckets = rest.get_buckets()
        else:
            created = BucketOperationHelper.create_multiple_buckets(
                self.master, replicas, howmany=num_buckets)
            self.assertTrue(created, "unable to create multiple buckets")
            buckets = rest.get_buckets()
            for bucket in buckets:
                ready = BucketOperationHelper.wait_for_memcached(
                    self.master, bucket.name)
                self.assertTrue(ready, msg="wait_for_memcached failed")
                nodes = rest.node_statuses()
                rest.rebalance(otpNodes=[node.id for node in nodes],
                               ejectedNodes=[])

        for bucket in buckets:
            inserted_keys_cnt = self.load_data(self.master, bucket.name,
                                               keys_count)
            log.info('inserted {0} keys'.format(inserted_keys_cnt))

        msg = "rebalance failed after adding these nodes {0}".format(nodes)
        self.assertTrue(rest.monitorRebalance(), msg=msg)
        self.assertTrue(ready, "wait_for_memcached failed")
Пример #15
0
    def common_setup(input, testcase, bucket_ram_ratio=(2.8 / 3.0), replica=0):
        log = logger.Logger.get_logger()
        servers = input.servers
        BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
        ClusterOperationHelper.cleanup_cluster(servers)
        ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase)
        serverInfo = servers[0]

        log.info('picking server : {0} as the master'.format(serverInfo))
        #if all nodes are on the same machine let's have the bucket_ram_ratio as bucket_ram_ratio * 1/len(servers)
        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(servers)
        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved *
                                                      node_ram_ratio))
        if "ascii" in TestInputSingleton.input.test_params\
        and TestInputSingleton.input.test_params["ascii"].lower() == "true":
            BucketOperationHelper.create_multiple_buckets(serverInfo,
                                                          replica,
                                                          node_ram_ratio *
                                                          bucket_ram_ratio,
                                                          howmany=1,
                                                          sasl=False)
        else:
            BucketOperationHelper.create_multiple_buckets(serverInfo,
                                                          replica,
                                                          node_ram_ratio *
                                                          bucket_ram_ratio,
                                                          howmany=1,
                                                          sasl=True)
        buckets = rest.get_buckets()
        for bucket in buckets:
            ready = BucketOperationHelper.wait_for_memcached(
                serverInfo, bucket.name)
            testcase.assertTrue(ready, "wait_for_memcached failed")
Пример #16
0
    def _cluster_setup(self):
        keys_count = self.input.param("keys-count", 0)
        num_buckets = self.input.param("num-buckets", 1)
        bucketType = self.input.param("bucketType", "ephemeral")
        evictionPolicy = self.input.param("evictionPolicy", "noEviction")  # fullEviction

        # master = self.servers[0]
        # credentials = self.input.membase_settings
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        rest.init_cluster(username=self.master.rest_username,
                          password=self.master.rest_password)
        memory = min(info.mcdMemoryReserved, self.input.param("kv_memory", 1000))
        rest.init_cluster_memoryQuota(memoryQuota=memory)
        rest.reset_autoreprovision()
        self._add_and_rebalance(self.servers, True)

        if num_buckets == 1:
            bucket_name = "default"
            bucket_ram = info.memoryQuota * 2 / 3
            rest.create_bucket(bucket=bucket_name,
                               ramQuotaMB=bucket_ram,
                               replicaNumber=self.replicas,
                               proxyPort=info.moxi,
                               bucketType=bucketType,
                               evictionPolicy=evictionPolicy)
        else:
            created = BucketOperationHelper.create_multiple_buckets(
                self.master, self.replicas, howmany=num_buckets,
                bucketType=bucketType, evictionPolicy=evictionPolicy)
            self.assertTrue(created, "unable to create multiple buckets")

        buckets = rest.get_buckets()
        for bucket in buckets:
            ready = BucketOperationHelper.wait_for_memcached(self.master, bucket.name)
            self.assertTrue(ready, msg="wait_for_memcached failed")

        for bucket in buckets:
            distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
            inserted_keys, rejected_keys = self.load_bucket_and_return_the_keys(servers=[self.master],
                                                                                name=bucket.name,
                                                                                # ram_load_ratio=0.02,
                                                                                value_size_distribution=distribution,
                                                                                write_only=True,
                                                                                moxi=True,
                                                                                number_of_threads=2,
                                                                                number_of_items=keys_count)
            self.loaded_items[bucket.name] = inserted_keys
Пример #17
0
    def _cluster_setup(self):
        log = logger.Logger.get_logger()

        replicas = self._input.param("replicas", 1)
        keys_count = self._input.param("keys-count", 0)
        num_buckets = self._input.param("num-buckets", 1)

        bucket_name = "default"
        master = self._servers[0]
        credentials = self._input.membase_settings
        rest = RestConnection(master)
        info = rest.get_nodes_self()
        rest.init_cluster(username=master.rest_username,
                          password=master.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        rest.reset_autofailover()
        ClusterOperationHelper.add_all_nodes_or_assert(master, self._servers, credentials, self)
        bucket_ram = info.memoryQuota * 2 / 3

        if num_buckets == 1:
            rest.create_bucket(bucket=bucket_name,
                               ramQuotaMB=bucket_ram,
                               replicaNumber=replicas,
                               proxyPort=info.moxi)
            ready = BucketOperationHelper.wait_for_memcached(master, bucket_name)
            nodes = rest.node_statuses()
            rest.rebalance(otpNodes=[node.id for node in nodes], ejectedNodes=[])
            buckets = rest.get_buckets()
        else:
            created = BucketOperationHelper.create_multiple_buckets(master, replicas, howmany=num_buckets)
            self.assertTrue(created, "unable to create multiple buckets")
            buckets = rest.get_buckets()
            for bucket in buckets:
                ready = BucketOperationHelper.wait_for_memcached(master, bucket.name)
                self.assertTrue(ready, msg="wait_for_memcached failed")
                nodes = rest.node_statuses()
                rest.rebalance(otpNodes=[node.id for node in nodes], ejectedNodes=[])

#        self.load_data(master, bucket_name, keys_count)

        for bucket in buckets:
            inserted_keys_cnt = self.load_data(master, bucket.name, keys_count)
            log.info('inserted {0} keys'.format(inserted_keys_cnt))

        msg = "rebalance failed after adding these nodes {0}".format(nodes)
        self.assertTrue(rest.monitorRebalance(), msg=msg)
        self.assertTrue(ready, "wait_for_memcached failed")
Пример #18
0
    def _cluster_setup(self):
        replicas = self.input.param("replicas", 1)
        keys_count = self.input.param("keys-count", 0)
        num_buckets = self.input.param("num-buckets", 1)
        bucket_storage = self.input.param("bucket_storage", 'couchstore')

        bucket_name = "default"
        master = self.servers[0]
        credentials = self.input.membase_settings
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        rest.init_cluster(username=self.master.rest_username,
                          password=self.master.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        rest.reset_autofailover()
        ClusterOperationHelper.add_and_rebalance(self.servers, True)

        if num_buckets == 1:
            bucket_ram = info.memoryQuota * 2 // 3
            rest.create_bucket(bucket=bucket_name,
                               ramQuotaMB=bucket_ram,
                               replicaNumber=replicas,
                               proxyPort=info.moxi,
                               storageBackend=bucket_storage)
        else:
            created = BucketOperationHelper.create_multiple_buckets(
                self.master,
                replicas,
                howmany=num_buckets,
                bucket_ram_ratio=(1.0 / 4.0),
                bucket_storage=bucket_storage)
            self.assertTrue(created, "unable to create multiple buckets")

        buckets = rest.get_buckets()
        for bucket in buckets:
            ready = BucketOperationHelper.wait_for_memcached(
                self.master, bucket.name)
            self.assertTrue(ready, msg="wait_for_memcached failed")

        for bucket in buckets:
            inserted_keys_cnt = self.load_data(self.master, bucket.name,
                                               keys_count)
            log.info('inserted {0} keys'.format(inserted_keys_cnt))
Пример #19
0
def _create_load_multiple_bucket(self, server, bucket_data, howmany=2):
    created = BucketOperationHelper.create_multiple_buckets(server, 1, howmany=howmany)
    self.assertTrue(created, "unable to create multiple buckets")
    rest = RestConnection(server)
    buckets = rest.get_buckets()
    for bucket in buckets:
        bucket_data[bucket.name] = {}
        ready = BucketOperationHelper.wait_for_memcached(server, bucket.name)
        self.assertTrue(ready, "wait_for_memcached failed")
        #let's insert some data
        distribution = {2 * 1024: 0.5, 20: 0.5}
        bucket_data[bucket.name]["inserted_keys"], bucket_data[bucket.name]["reject_keys"] =\
        MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[server], name=bucket.name,
                                                              ram_load_ratio=2.0,
                                                              number_of_threads=2,
                                                              value_size_distribution=distribution,
                                                              write_only=True,
                                                              moxi=True)
        RebalanceHelper.wait_for_stats(server, bucket.name, 'ep_queue_size', 0)
        RebalanceHelper.wait_for_stats(server, bucket.name, 'ep_flusher_todo', 0)
Пример #20
0
def _create_load_multiple_bucket(self, server, bucket_data, howmany=2):
    created = BucketOperationHelper.create_multiple_buckets(server,
        1, howmany=howmany)
    self.assertTrue(created, "unable to create multiple buckets")
    rest = RestConnection(server)
    buckets = rest.get_buckets()
    for bucket in buckets:
        bucket_data[bucket.name] = {}
        ready = BucketOperationHelper.wait_for_memcached(server, bucket.name)
        self.assertTrue(ready, "wait_for_memcached failed")
        #let's insert some data
        distribution = {2 * 1024: 0.5, 20: 0.5}
        bucket_data[bucket.name]["inserted_keys"], \
            bucket_data[bucket.name]["reject_keys"] = \
        MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[server],
            name=bucket.name, ram_load_ratio=2.0, number_of_threads=2,
            value_size_distribution=distribution, write_only=True,
            moxi=True)
        RebalanceHelper.wait_for_stats(server, bucket.name, 'ep_queue_size', 0)
        RebalanceHelper.wait_for_stats(server, bucket.name, 'ep_flusher_todo',
            0)