def getr_negative_corrupted_vbucket_test(self): vbucket_state = self.input.param("vbucket_state", '') gen = DocumentGenerator('test_docs', '{{"age": {0}}}', xrange(5), start=0, end=self.num_items) self.perform_docs_ops(self.master, [gen], 'create') self.log.info("Checking replica read") client = VBucketAwareMemcached(RestConnection(self.master), self.default_bucket_name) vbuckets_num = RestConnection(self.master).get_vbuckets(self.buckets[0]) while gen.has_next(): try: key, _ = gen.next() vBucketId = client._get_vBucket_id(key) mem = client.memcached_for_replica_vbucket(vBucketId) if vbucket_state: mem.set_vbucket_state(vBucketId, vbucket_state) msg = "Vbucket %s set to pending state" % vBucketId mem_to_read = mem else: wrong_vbucket = [v for v in client.vBucketMapReplica if mem.host != client.vBucketMapReplica[v][0].split(':')[0] or\ str(mem.port) != client.vBucketMapReplica[v][0].split(':')[1]][0] mem_to_read = client.memcached_for_replica_vbucket(wrong_vbucket) msg = "Key: %s. Correct host is %s, test try to get from %s host. " %( key, mem.host, mem_to_read.host) msg += "Correct vbucket %s, wrong vbucket %s" % (vBucketId, wrong_vbucket) self.log.info(msg) client._send_op(mem_to_read.getr, key) except Exception, ex: if self.error and str(ex).find(self.error) != -1: self.log.info("Expected error %s appeared as expected" % self.error) else: raise ex else: if self.error: self.fail("Expected error %s didn't appear as expected" % self.error)
def test_vbucket_id_option(self): bucket = RestConnection(self.server_origin).get_bucket(self.buckets[0]) self.num_items = self.num_items - (self.num_items % len(bucket.vbuckets)) num_items_per_vb = self.num_items / len(bucket.vbuckets) template = '{{ "mutated" : 0, "age": {0}, "first_name": "{1}" }}' gen_load = DocumentGenerator('cbtransfer', template, range(5), ['james', 'john'], start=0, end=self.num_items) client = MemcachedClient(self.server_origin.ip, int(bucket.vbuckets[0].master.split(':')[1])) kv_value_dict = {} vb_id_to_check = bucket.vbuckets[-1].id for vb_id in xrange(len(bucket.vbuckets)): cur_items_per_vb = 0 while cur_items_per_vb < num_items_per_vb: key, value = gen_load.next() client.set(key, 0, 0, value, vb_id) if vb_id_to_check == vb_id: kv_value_dict[key] = value cur_items_per_vb += 1 transfer_source = 'http://%s:%s' % (self.server_origin.ip, self.server_origin.port) transfer_destination = 'http://%s:%s' % (self.server_recovery.ip, self.server_recovery.port) output = self.shell.execute_cbtransfer(transfer_source, transfer_destination, "-b %s -B %s -i %s" % (bucket.name, bucket.name, vb_id_to_check)) client = MemcachedClient(self.server_recovery.ip, int(bucket.vbuckets[0].master.split(':')[1])) for key, value in kv_value_dict.iteritems(): _, _, d = client.get(key, vbucket=vb_id_to_check) self.assertEquals(d, value, 'Key: %s expected. Value expected %s. Value actual %s' % ( key, value, d))
def getr_negative_test(self): gen_1 = DocumentGenerator('test_docs', '{{"age": {0}}}', xrange(5), start=0, end=self.num_items / 2) gen_2 = DocumentGenerator('test_docs', '{{"age": {0}}}', xrange(5), start=self.num_items / 2, end=self.num_items) self.log.info("LOAD PHASE") if not self.skipload: self.perform_docs_ops(self.master, [gen_1, gen_2], self.data_ops) if self.wait_expiration: self.sleep(self.expiration) self.log.info("READ REPLICA PHASE") self.log.info("Checking replica read") try: self._load_all_buckets(self.master, gen_1, 'read_replica', self.expiration, batch_size=1) except Exception, ex: if self.error and str(ex).find(self.error) != -1: self.log.info("Expected error %s appeared as expected" % self.error) else: raise ex
def setup_doc_gens(self): # create json doc generators ordering = range(self._num_items / 4) sites1 = ['google', 'bing', 'yahoo', 'wiki'] sites2 = ['mashable', 'techcrunch', 'hackernews', 'slashdot'] template = '{{ "ordering": {0}, "site_name": "{1}" }}' delete_start = int( (self._num_items) * (float)(100 - self._percent_delete) / 100) update_end = int( (self._num_items) * (float)(self._percent_update) / 100) self.gen_create =\ DocumentGenerator('es_xdcr_docs', template, ordering, sites1, start=0, end=self._num_items) self.gen_recreate =\ DocumentGenerator('es_xdcr_docs', template, ordering, sites2, start=0, end=self._num_items) self.gen_update =\ DocumentGenerator('es_xdcr_docs', template, ordering, sites1, start=0, end=update_end) self.gen_delete =\ DocumentGenerator('es_xdcr_docs', template, ordering, sites1, start=delete_start, end=self._num_items) self.gen_blob = BlobGenerator('loadOne', 'loadOne', self._value_size, end=self._num_items)
def _load_by_vbuckets(self, bucket): bucket = RestConnection(self.master).get_bucket(bucket) self.num_items = self.num_items - (self.num_items % len(bucket.vbuckets)) num_items_per_vb = self.num_items/len(bucket.vbuckets) template = '{{ "mutated" : 0, "age": {0}, "first_name": "{1}" }}' gen_load = DocumentGenerator('vbuckettool', template, range(5), ['james', 'john'], start=0, end=self.num_items) self._get_clients(bucket) for vb in bucket.vbuckets: cur_items_per_vb = [] while len(cur_items_per_vb) < num_items_per_vb: key, value = gen_load.next() self.clients[vb.master].set(key, 0, 0, value, vb.id) cur_items_per_vb.append(key) self.keys_per_vbuckets_dict[vb] = cur_items_per_vb
def getr_rebalance_test(self): gen = DocumentGenerator('test_docs', '{{"age": {0}}}', xrange(5), start=0, end=self.num_items) self.perform_docs_ops(self.master, [gen], 'create') self.log.info("Checking replica read") client = VBucketAwareMemcached(RestConnection(self.master), self.default_bucket_name) rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], self.servers[self.nodes_init : self.nodes_init + self.nodes_in], []) try: while gen.has_next(): key, _ = gen.next() o, c, d = client.getr(key) finally: rebalance.result()
def test_getr_bucket_ops(self): bucket_to_delete_same_read = self.input.param( "bucket_to_delete_same_read", True) gen_1 = DocumentGenerator('test_docs', '{{"age": {0}}}', xrange(5), start=0, end=self.num_items) self.log.info("LOAD PHASE") self.perform_docs_ops(self.master, [gen_1], self.data_ops) self.log.info("Start bucket ops") bucket_read = self.buckets[0] bucket_delete = (self.buckets[1], self.buckets[0])[bucket_to_delete_same_read] try: self.log.info("READ REPLICA PHASE") self.log.info("Checking replica read") task_verify = self.cluster.async_verify_data( self.master, bucket_read, bucket_read.kvs[1], only_store_hash=False, replica_to_read=self.replica_to_read) task_delete_bucket = self.cluster.async_bucket_delete( self.master, bucket_delete.name) task_verify.result() task_delete_bucket.result() except Exception, ex: task_delete_bucket.result() if self.error and str(ex).find(self.error) != -1: self.log.info("Expected error %s appeared as expected" % self.error) else: raise ex
def test_docs_int_big_values(self): degree = self.input.param("degree", 53) error = self.input.param("error", False) number = 2**degree first = ['james', 'sharon'] template = '{{ "number": {0}, "first_name": "{1}" }}' gen_load = DocumentGenerator('test_docs', template, [ number, ], first, start=0, end=self.num_items) self.log.info("create %s documents..." % (self.num_items)) try: self._load_all_buckets(self.master, gen_load, "create", 0) self._verify_stats_all_buckets([self.master]) except Exception as e: if error: self.log.info("Unable to create documents as expected: %s" % str(e)) else: raise e else: if error: self.fail("Able to create documents with value: %s" % str(number))
def load_in_with_ops(self): self.log.warning("before test2") age = range(5) first = ['james', 'sharon'] template = '{{ "age": {0}, "first_name": "{1}" }}' gen_delete = DocumentGenerator('test_docs', template, age, first, start=self.num_items / 2, end=self.num_items) gen_create = DocumentGenerator('test_docs', template, age, first, start=self.num_items + 1, end=self.num_items * 3 / 2) self.gen_update = DocumentGenerator('test_docs', template, age, first, end=(self.num_items / 2 - 1)) """gen_delete = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items / 2, end=self.num_items) gen_create = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items + 1, end=self.num_items * 3 / 2)""" if (self.doc_ops is not None): tasks = [] # define which doc's ops will be performed during rebalancing # allows multiple of them but one by one if ("update" in self.doc_ops): tasks += self._async_load_all_buckets(self.master, self.gen_update, "update", 0) if ("create" in self.doc_ops): tasks += self._async_load_all_buckets(self.master, gen_create, "create", 0) if ("delete" in self.doc_ops): tasks += self._async_load_all_buckets(self.master, gen_delete, "delete", 0) for task in tasks: task.result() self.verify_cluster_stats(self.servers[:1]) super(ElasticSearchRebalanceInTests, self)._wait_for_elasticsearch(self.servers[:1]) super(ElasticSearchRebalanceInTests, self)._verify_elasticsearch(self.servers[:1]) self.log.warning("after test2")
def getr_negative_corrupted_vbucket_test(self): vbucket_state = self.input.param("vbucket_state", '') gen = DocumentGenerator('test_docs', '{{"age": {0}}}', xrange(5), start=0, end=self.num_items) self.perform_docs_ops(self.master, [gen], 'create') self.log.info("Checking replica read") client = VBucketAwareMemcached(RestConnection(self.master), self.default_bucket_name) vbuckets_num = RestConnection(self.master).get_vbuckets( self.buckets[0]) while gen.has_next(): try: key, _ = gen.next() vBucketId = client._get_vBucket_id(key) mem = client.memcached_for_replica_vbucket(vBucketId) if vbucket_state: mem.set_vbucket_state(vBucketId, vbucket_state) msg = "Vbucket %s set to pending state" % vBucketId mem_to_read = mem else: wrong_vbucket = [v for v in client.vBucketMapReplica if mem.host != client.vBucketMapReplica[v][0].split(':')[0] or\ str(mem.port) != client.vBucketMapReplica[v][0].split(':')[1]][0] mem_to_read = client.memcached_for_replica_vbucket( wrong_vbucket) msg = "Key: %s. Correct host is %s, test try to get from %s host. " % ( key, mem.host, mem_to_read.host) msg += "Correct vbucket %s, wrong vbucket %s" % ( vBucketId, wrong_vbucket) self.log.info(msg) client._send_op(mem_to_read.getr, key) except Exception, ex: if self.error and str(ex).find(self.error) != -1: self.log.info("Expected error %s appeared as expected" % self.error) else: raise ex else: if self.error: self.fail("Expected error %s didn't appear as expected" % self.error)
def getr_rebalance_test(self): gen = DocumentGenerator('test_docs', '{{"age": {0}}}', xrange(5), start=0, end=self.num_items) self.perform_docs_ops(self.master, [gen], 'create') self.log.info("Checking replica read") client = VBucketAwareMemcached(RestConnection(self.master), self.default_bucket_name) rebalance = self.cluster.async_rebalance( self.servers[:self.nodes_init], self.servers[self.nodes_init:self.nodes_init + self.nodes_in], []) try: while gen.has_next(): key, _ = gen.next() o, c, d = client.getr(key) finally: rebalance.result()
def _init_data_gen(self, key="dockey"): age = range(5) first = ['james', 'sharon'] template = '{{ "mutated" : 0, "age": {0}, "first_name": "{1}" }}' gen_load = DocumentGenerator(key, template, age, first, start=0, end=self.num_items) return gen_load
def _load_doc_data_all_buckets(self, data_op="create", batch_size=1000, gen_load=None): # initialize the template for document generator age = range(5) first = ['james', 'sharon'] template = '{{ "mutated" : 0, "age": {0}, "first_name": "{1}" }}' if gen_load is None: gen_load = DocumentGenerator('test_docs', template, age, first, start=0, end=self.num_items) self.log.info("%s %s documents..." % (data_op, self.num_items)) self._load_all_buckets(self.master, gen_load, data_op, 0, batch_size=batch_size) return gen_load
def _load_doc_data_all_buckets(self, data_op="create"): #initialize the template for document generator age = range(5) first = ['james', 'sharon'] template = '{{ "age": {0}, "first_name": "{1}" }}' gen_load = DocumentGenerator('test_docs', template, age, first, start=0, end=self.num_items) self.log.info("%s %s documents..." % (data_op, self.num_items)) self._load_all_buckets(self.master, gen_load, data_op, 0)
def getr_dgm_test(self): resident_ratio = self.input.param("resident_ratio", 50) gens = [] delta_items = 200000 self.num_items = 0 mc = MemcachedClientHelper.direct_client(self.master, self.default_bucket_name) self.log.info("LOAD PHASE") end_time = time.time() + self.wait_timeout * 30 while (int(mc.stats()["vb_active_perc_mem_resident"]) == 0 or\ int(mc.stats()["vb_active_perc_mem_resident"]) > resident_ratio) and\ time.time() < end_time: self.log.info("Resident ratio is %s" % mc.stats()["vb_active_perc_mem_resident"]) gen = DocumentGenerator('test_docs', '{{"age": {0}}}', xrange(5), start=self.num_items, end=(self.num_items + delta_items)) gens.append(copy.deepcopy(gen)) self._load_all_buckets(self.master, gen, 'create', self.expiration, kv_store=1, flag=self.flags, only_store_hash=False, batch_size=1) self.num_items += delta_items self.log.info("Resident ratio is %s" % mc.stats()["vb_active_perc_mem_resident"]) self.assertTrue( int(mc.stats()["vb_active_perc_mem_resident"]) < resident_ratio, "Resident ratio is not reached") self.verify_cluster_stats(self.servers[:self.nodes_init], only_store_hash=False, batch_size=1) self.log.info("Currently loaded items: %s" % self.num_items) self.log.info("READ REPLICA PHASE") self.verify_cluster_stats(self.servers[:self.nodes_init], only_store_hash=False, replica_to_read=self.replica_to_read, batch_size=1)
def _big_int_test_setup(self, num_items): timestamp = [13403751757202, 13403751757402, 13403751757302] docId = ['0830c075-2a81-448a-80d6-85214ee3ad64', '0830c075-2a81-448a-80d6-85214ee3ad65', '0830c075-2a81-448a-80d6-85214ee3ad66'] conversationId = [929342299234203] msg = ['msg1', 'msg2'] template = '{{ "docId": "{0}", "conversationId": {1}, "timestamp": {2}, "msg": "{3}" }}' gen_load = DocumentGenerator('test_docs', template, docId, conversationId, timestamp, msg, start=0, end=num_items) self.log.info("Inserting json data into bucket") self._load_all_buckets(self.master, gen_load, "create", 0) self._wait_for_stats_all_buckets([self.master]) map_fn = 'function (doc) {emit([doc.conversationId, doc.timestamp], doc);}' view = [View('view_big_int', map_fn, dev_view=False)] self.create_views(self.master, 'ddoc_big_int', view)
def getr_negative_corrupted_keys_test(self): key = self.input.param("key", '') gen = DocumentGenerator('test_docs', '{{"age": {0}}}', xrange(5), start=0, end=self.num_items) self.perform_docs_ops(self.master, [gen], 'create') self.log.info("Checking replica read") client = VBucketAwareMemcached(RestConnection(self.master), self.default_bucket_name) try: o, c, d = client.getr(key) except Exception, ex: if self.error and str(ex).find(self.error) != -1: self.log.info("Expected error %s appeared as expected" % self.error) else: raise ex
def memory_quota_default_bucket(self): resident_ratio = self.input.param("resident_ratio", 50) delta_items = 200000 mc = MemcachedClientHelper.direct_client(self.master, self.default_bucket_name) self.log.info("LOAD PHASE") end_time = time.time() + self.wait_timeout * 30 while (int(mc.stats()["vb_active_perc_mem_resident"]) == 0 or\ int(mc.stats()["vb_active_perc_mem_resident"]) > resident_ratio) and\ time.time() < end_time: self.log.info("Resident ratio is %s" % mc.stats()["vb_active_perc_mem_resident"]) gen = DocumentGenerator('test_docs', '{{"age": {0}}}', xrange(5), start=self.num_items, end=(self.num_items + delta_items)) self._load_all_buckets(self.master, gen, 'create', 0) self.num_items += delta_items self.log.info("Resident ratio is %s" % mc.stats()["vb_active_perc_mem_resident"]) memory_mb = int(mc.stats("memory")["total_allocated_bytes"])/(1024 * 1024) self.log.info("total_allocated_bytes is %s" % memory_mb) self.assertTrue(memory_mb <= self.quota, "total_allocated_bytes %s should be within %s" %( memory_mb, self.quota))
def getr_test(self): if self.nodes_init > len(self.servers): result = unittest.TextTestRunner(verbosity=2)._makeResult() result.skipped = [('getr_test', "There is not enough VMs!!!")] return result gen_1 = DocumentGenerator('test_docs', '{{"age": {0}}}', xrange(5), start=0, end=self.num_items / 2) gen_2 = DocumentGenerator('test_docs', '{{"age": {0}}}', xrange(5), start=self.num_items / 2, end=self.num_items) if self.value_size: gen_1 = DocumentGenerator('test_docs', '{{"name": "{0}"}}', [self.value_size * 'a'], start=0, end=self.num_items / 2) gen_2 = DocumentGenerator('test_docs', '{{"name": "{0}"}}', [self.value_size * 'a'], start=self.num_items / 2, end=self.num_items) self.log.info("LOAD PHASE") if not self.skipload: self.perform_docs_ops(self.master, [gen_1, gen_2], self.data_ops) self.log.info("CLUSTER OPS PHASE") if self.rebalance == GetrTests.AFTER_REBALANCE: self.cluster.rebalance(self.servers[:self.nodes_init], self.servers[self.nodes_init:], []) if self.rebalance == GetrTests.DURING_REBALANCE: rebalance = self.cluster.async_rebalance( self.servers[:self.nodes_init], self.servers[self.nodes_init:self.nodes_init + self.nodes_in], []) if self.rebalance == GetrTests.SWAP_REBALANCE: self.cluster.rebalance( self.servers[:self.nodes_init], self.servers[self.nodes_init:self.nodes_init + self.nodes_in], self.servers[self.nodes_init - self.nodes_in:self.nodes_init]) if self.warmup_nodes: self.perform_warm_up() if self.failover: self.perform_failover() if self.wait_expiration: self.sleep(self.expiration) try: self.log.info("READ REPLICA PHASE") servrs = self.servers[:self.nodes_init] if self.failover in [ GetrTests.FAILOVER_NO_REBALANCE, GetrTests.FAILOVER_REBALANCE ]: servrs = self.servers[:self.nodes_init - self.failover_factor] if self.rebalance == GetrTests.AFTER_REBALANCE: servrs = self.servers if self.rebalance == GetrTests.SWAP_REBALANCE: servrs = self.servers[:self.nodes_init - self.nodes_in] servrs.extend(self.servers[self.nodes_init:self.nodes_init + self.nodes_in]) self.log.info("Checking replica read") if self.failover == GetrTests.FAILOVER_NO_REBALANCE: self._verify_all_buckets(self.master, only_store_hash=False, replica_to_read=self.replica_to_read, batch_size=1) else: self.verify_cluster_stats(servrs, only_store_hash=False, replica_to_read=self.replica_to_read, batch_size=1, timeout=(self.wait_timeout * 10)) except Exception, ex: if self.error and str(ex).find(self.error) != -1: self.log.info("Expected error %s appeared as expected" % self.error) else: raise ex