def test_full_eviction_changed_to_value_eviction(self): KEY_NAME = 'key1' gen_create = BlobGenerator('eviction', 'eviction-', self.value_size, end=self.num_items) gen_create2 = BlobGenerator('eviction2', 'eviction2-', self.value_size, end=self.num_items) self._load_all_buckets(self.master, gen_create, "create", 0) self._wait_for_stats_all_buckets(self.servers[:self.nodes_init]) self._verify_stats_all_buckets(self.servers[:self.nodes_init]) remote = RemoteMachineShellConnection(self.master) for bucket in self.buckets: output, _ = remote.execute_couchbase_cli(cli_command='bucket-edit', cluster_host="localhost:8091", user=self.master.rest_username, password=self.master.rest_password, options='--bucket=%s --bucket-eviction-policy=valueOnly' % bucket.name) self.assertTrue(' '.join(output).find('SUCCESS') != -1, 'Eviction policy wasn\'t changed') ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers[:self.nodes_init], self, wait_time=self.wait_timeout, wait_if_warmup=True) self.sleep(10, 'Wait some time before next load') # self._load_all_buckets(self.master, gen_create2, "create", 0) rest = RestConnection(self.master) client = VBucketAwareMemcached(rest, 'default') mcd = client.memcached(KEY_NAME) try: rc = mcd.set(KEY_NAME, 0, 0, json.dumps({'value': 'value2'})) self.fail('Bucket is incorrectly functional') except MemcachedError as e: pass # this is the exception we are hoping for
def insert_docs(self, num_of_docs, prefix, extra_values={}, wait_for_persistence=True, return_docs=False): rest = RestConnection(self.master) smart = VBucketAwareMemcached(rest, self.bucket) doc_names = [] for i in range(0, num_of_docs): key = doc_name = "{0}-{1}".format(prefix, i) geom = {"type": "Point", "coordinates": [random.randrange(-180, 180), random.randrange(-90, 90)]} value = {"name": doc_name, "age": random.randrange(1, 1000), "geometry": geom} value.update(extra_values) if not return_docs: doc_names.append(doc_name) else: doc_names.append(value) # loop till value is set fail_count = 0 while True: try: smart.set(key, 0, 0, json.dumps(value)) break except MemcachedError as e: fail_count += 1 if (e.status == 133 or e.status == 132) and fail_count < 60: if i == 0: self.log.error("waiting 5 seconds. error {0}".format(e)) time.sleep(5) else: self.log.error(e) time.sleep(1) else: raise e if wait_for_persistence: self.wait_for_persistence() self.log.info("inserted {0} json documents".format(num_of_docs)) return doc_names
def _check_cas(self, check_conflict_resolution=False, master=None, bucket=None, time_sync=None): self.log.info(' Verifying cas and max cas for the keys') #select_count = 20 #Verifying top 20 keys if master: self.rest = RestConnection(master) self.client = VBucketAwareMemcached(self.rest, bucket) k=0 while k < self.items: key = "{0}{1}".format(self.prefix, k) k += 1 mc_active = self.client.memcached(key) cas = mc_active.getMeta(key)[4] max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] ) #print 'max_cas is {0}'.format(max_cas) self.assertTrue(cas == max_cas, '[ERROR]Max cas is not 0 it is {0}'.format(cas)) if check_conflict_resolution: get_meta_resp = mc_active.getMeta(key, request_extended_meta_data=False) if time_sync == 'enabledWithoutDrift': self.assertTrue( get_meta_resp[5] == 1, msg='[ERROR] Metadata indicate conflict resolution is not set') elif time_sync == 'disabled': self.assertTrue( get_meta_resp[5] == 0, msg='[ERROR] Metadata indicate conflict resolution is set')
def _verify_es_values(self, src_server, dest_server, kv_store=1, verification_count=10000): cb_rest = RestConnection(src_server) es_rest = RestConnection(dest_server) buckets = self.xd_ref._get_cluster_buckets(src_server) for bucket in buckets: mc = VBucketAwareMemcached(cb_rest, bucket) es_valid = es_rest.all_docs(indices=[bucket.name], size=verification_count) # compare values of es documents to documents in couchbase for row in es_valid[:verification_count]: key = str(row['meta']['id']) try: _, _, doc = mc.get(key) val_src = str(json.loads(doc)['site_name']) val_dest = str(row['doc']['site_name']) if val_src != val_dest: self.xd_ref.fail("Document %s has unexpected value (%s) expected (%s)" % \ (key, val_src, val_dest)) except MemcachedError as e: self.xd_ref.fail( "Error during verification. Index contains invalid key: %s" % key) self._log.info("Verified doc values in couchbase bucket (%s) match values in elastic search" % \ (bucket.name))
def verify_single_node(self, server, kv_store=1): """This is the verification function for single node backup. Args: server: the master server in the cluster as self.master. kv_store: default value is 1. This is the key of the kv_store of each bucket. If --single-node flag appears in backup commad line, we just backup all the items from a single node (the master node in this case). For each bucket, we request for the vBucketMap. For every key in the kvstore of that bucket, we use hash function to get the vBucketId corresponding to that key. By using the vBucketMap, we can know whether that key is in master node or not. If yes, keep it. Otherwise delete it.""" rest = RestConnection(server) for bucket in self.buckets: VBucketAware = VBucketAwareMemcached(rest, bucket.name) memcacheds, vBucketMap, vBucketMapReplica = VBucketAware.request_map(rest, bucket.name) valid_keys, deleted_keys = bucket.kvs[kv_store].key_set() for key in valid_keys: vBucketId = VBucketAware._get_vBucket_id(key) which_server = vBucketMap[vBucketId] sub = which_server.find(":") which_server_ip = which_server[:sub] if which_server_ip != server.ip: partition = bucket.kvs[kv_store].acquire_partition(key) partition.delete(key) bucket.kvs[kv_store].release_partition(key) self._verify_all_buckets(server, kv_store, self.wait_timeout * 50, self.max_verify, True, 1)
def do_get_random_key(self): # MB-31548, get_Random key gets hung sometimes. self.log.info("Creating few docs in the bucket") rest = RestConnection(self.master) client = VBucketAwareMemcached(rest, 'default') key = "test_docs-" for index in range(1000): doc_key = key + str(index) client.memcached(doc_key).set(doc_key, 0, 0, json.dumps({'value': 'value1'})) self.log.info("Performing random_gets") mc = MemcachedClient(self.master.ip, 11210) mc.sasl_auth_plain(self.master.rest_username, self.master.rest_password) mc.bucket_select('default') count = 0 while (count < 1000000): count = count + 1 try: mc.get_random_key() except MemcachedError as error: self.fail("<MemcachedError #%d ``%s''>" % (error.status, error.message)) if count % 1000 == 0: self.log.info('The number of iteration is {}'.format(count))
def verify_single_node(self, server, kv_store=1): """This is the verification function for single node backup. Args: server: the master server in the cluster as self.master. kv_store: default value is 1. This is the key of the kv_store of each bucket. If --single-node flag appears in backup commad line, we just backup all the items from a single node (the master node in this case). For each bucket, we request for the vBucketMap. For every key in the kvstore of that bucket, we use hash function to get the vBucketId corresponding to that key. By using the vBucketMap, we can know whether that key is in master node or not. If yes, keep it. Otherwise delete it.""" rest = RestConnection(server) for bucket in self.buckets: VBucketAware = VBucketAwareMemcached(rest, bucket.name) memcacheds, vBucketMap, vBucketMapReplica = VBucketAware.request_map(rest, bucket.name) valid_keys, deleted_keys = bucket.kvs[kv_store].key_set() for key in valid_keys: vBucketId = VBucketAware._get_vBucket_id(key) which_server = vBucketMap[vBucketId] sub = which_server.find(":") which_server_ip = which_server[:sub] if which_server_ip != server.ip: partition = bucket.kvs[kv_store].acquire_partition(key) partition.delete(key) bucket.kvs[kv_store].release_partition(key) self._verify_all_buckets(server, kv_store, self.wait_timeout * 50, self.max_verify, True, 1)
def _verify_es_values(self, src_server, dest_server, kv_store=1, verification_count=10000): cb_rest = RestConnection(src_server) es_rest = RestConnection(dest_server) buckets = self.xd_ref._get_cluster_buckets(src_server) for bucket in buckets: mc = VBucketAwareMemcached(cb_rest, bucket) es_valid = es_rest.all_docs(indices=[bucket.name], size=verification_count) # compare values of es documents to documents in couchbase for row in es_valid[:verification_count]: key = str(row["meta"]["id"]) try: _, _, doc = mc.get(key) val_src = str(json.loads(doc)["site_name"]) val_dest = str(row["doc"]["site_name"]) if val_src != val_dest: self.xd_ref.fail( "Document %s has unexpected value (%s) expected (%s)" % (key, val_src, val_dest) ) except MemcachedError as e: self.xd_ref.fail("Error during verification. Index contains invalid key: %s" % key) self._log.info( "Verified doc values in couchbase bucket (%s) match values in elastic search" % (bucket.name) )
def load_docs(self, node, num_docs, bucket = 'default', password = '', exp = 0, flags = 0): client = VBucketAwareMemcached(RestConnection(node), bucket) for i in range(num_docs): key = "key%s"%i rc = client.set(key, 0, 0, "value")
def test_full_eviction_changed_to_value_eviction(self): KEY_NAME = 'key1' gen_create = BlobGenerator('eviction', 'eviction-', self.value_size, end=self.num_items) gen_create2 = BlobGenerator('eviction2', 'eviction2-', self.value_size, end=self.num_items) self._load_all_buckets(self.master, gen_create, "create", 0) self._wait_for_stats_all_buckets(self.servers[:self.nodes_init]) self._verify_stats_all_buckets(self.servers[:self.nodes_init]) remote = RemoteMachineShellConnection(self.master) for bucket in self.buckets: output, _ = remote.execute_couchbase_cli(cli_command='bucket-edit', cluster_host="localhost", user=self.master.rest_username, password=self.master.rest_password, options='--bucket=%s --bucket-eviction-policy=valueOnly' % bucket.name) self.assertTrue(' '.join(output).find('SUCCESS') != -1, 'Eviction policy wasn\'t changed') ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers[:self.nodes_init], self, wait_time=self.wait_timeout, wait_if_warmup=True) self.sleep(10, 'Wait some time before next load') #self._load_all_buckets(self.master, gen_create2, "create", 0) #import pdb;pdb.set_trace() rest = RestConnection(self.master) client = VBucketAwareMemcached(rest, 'default') mcd = client.memcached(KEY_NAME) try: rc = mcd.set(KEY_NAME, 0,0, json.dumps({'value':'value2'})) self.fail('Bucket is incorrectly functional') except MemcachedError, e: pass # this is the exception we are hoping for
def verify_one_node_has_time_sync_and_one_does_not(self): # need to explicitly enable and disable sync when it is supported self.log.info( '\n\nStarting verify_one_node_has_time_sync_and_one_does_not') client = VBucketAwareMemcached(RestConnection(self.master), 'default') vbucket_id = client._get_vBucket_id(LWW_EP_Engine.TEST_KEY) mc_master = client.memcached_for_vbucket(vbucket_id) mc_replica = client.memcached_for_replica_vbucket(vbucket_id) # set for master but not for the replica result = mc_master.set_time_drift_counter_state(vbucket_id, 0, 1) mc_master.set(LWW_EP_Engine.TEST_KEY, 0, 0, LWW_EP_Engine.TEST_VALUE) get_meta_resp = mc_master.getMeta(LWW_EP_Engine.TEST_KEY, request_extended_meta_data=True) self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicates conflict resolution is not set') self.log.info( '\n\nEnding verify_one_node_has_time_sync_and_one_does_not')
def _load_ops(self, ops=None, mutations=1, master=None, bucket=None): if master: self.rest = RestConnection(master) if bucket: self.client = VBucketAwareMemcached(self.rest, bucket) k=0 payload = MemcachedClientHelper.create_value('*', self.value_size) while k < self.items: key = "{0}{1}".format(self.prefix, k) k += 1 for i in range(mutations): if ops=='set': #print 'set' self.client.memcached(key).set(key, 0, 0, payload) elif ops=='add': #print 'add' self.client.memcached(key).add(key, 0, 0, payload) elif ops=='replace': self.client.memcached(key).replace(key, 0, 0, payload) #print 'Replace' elif ops=='delete': #print 'delete' self.client.memcached(key).delete(key) elif ops=='expiry': #print 'expiry' self.client.memcached(key).set(key, self.expire_time, 0, payload) elif ops=='touch': #print 'touch' self.client.memcached(key).touch(key, 10) self.log.info("Done with specified {0} ops".format(ops))
def insert_docs(self, num_of_docs, prefix='doc', extra_values={}, return_docs=False, scope=None, collection=None): random.seed(12345) rest = RestConnection(self.master) smart = VBucketAwareMemcached(rest, self.bucket) doc_names = [] for i in range(0, num_of_docs): key = doc_name = "{0}-{1}".format(prefix, i) geom = { "type": "Point", "coordinates": [random.randrange(-180, 180), random.randrange(-90, 90)] } value = { "name": doc_name, "age": random.randrange(1, 1000), "geometry": geom, "height": random.randrange(1, 13000), "bloom": random.randrange(1, 6), "shed_leaves": random.randrange(6, 13) } value.update(extra_values) if not return_docs: doc_names.append(doc_name) else: doc_names.append(value) # loop till value is set fail_count = 0 while True: try: smart.set(key, 0, 0, json.dumps(value), scope=scope, collection=collection) break except MemcachedError as e: fail_count += 1 if (e.status == 133 or e.status == 132 or e.status == 134) and fail_count < 60: if i == 0: self.log.error( "waiting 5 seconds. error {0}".format(e)) time.sleep(5) else: self.log.error(e) time.sleep(1) else: raise e self.log.info("Inserted {0} json documents".format(num_of_docs)) return doc_names
def insert_key(serverInfo, bucket_name, count, size): rest = RestConnection(serverInfo) smart = VBucketAwareMemcached(rest, bucket_name) for i in xrange(count * 1000): key = "key_" + str(i) flag = random.randint(1, 999) value = {"value": MemcachedClientHelper.create_value("*", size)} smart.memcached(key).set(key, 0, 0, json.dumps(value))
def insert_key(serverInfo, bucket_name, count, size): rest = RestConnection(serverInfo) smart = VBucketAwareMemcached(rest, bucket_name) for i in xrange(count * 1000): key = "key_" + str(i) flag = random.randint(1, 999) value = {"value": MemcachedClientHelper.create_value("*", size)} smart.memcached(key).set(key, 0, 0, json.dumps(value))
def get_rev_info(rest_conn, bucket, keys): vbmc = VBucketAwareMemcached(rest_conn,bucket) ris = [] for k in keys: mc = vbmc.memcached(k) ri = mc.getRev(k) ris.append(ri) return ris
def wait_for_vbuckets_ready_state(node, bucket, timeout_in_seconds=300, log_msg=''): log = logger.Logger.get_logger() start_time = time.time() end_time = start_time + timeout_in_seconds ready_vbuckets = {} rest = RestConnection(node) servers = rest.get_nodes() RestHelper(rest).vbucket_map_ready(bucket, 60) vbucket_count = len(rest.get_vbuckets(bucket)) vbuckets = rest.get_vbuckets(bucket) obj = VBucketAwareMemcached(rest, bucket) memcacheds, vbucket_map, vbucket_map_replica = obj.request_map(rest, bucket) #Create dictionary with key:"ip:port" and value: a list of vbuckets server_dict = defaultdict(list) for everyID in range(0, vbucket_count): memcached_ip_port = str(vbucket_map[everyID]) server_dict[memcached_ip_port].append(everyID) while time.time() < end_time and len(ready_vbuckets) < vbucket_count: for every_ip_port in server_dict: #Retrieve memcached ip and port ip, port = every_ip_port.split(":") client = MemcachedClient(ip, int(port), timeout=30) client.vbucket_count = len(vbuckets) bucket_info = rest.get_bucket(bucket) client.sasl_auth_plain(bucket_info.name.encode('ascii'), bucket_info.saslPassword.encode('ascii')) for i in server_dict[every_ip_port]: try: (a, b, c) = client.get_vbucket_state(i) except mc_bin_client.MemcachedError as e: ex_msg = str(e) if "Not my vbucket" in log_msg: log_msg = log_msg[:log_msg.find("vBucketMap") + 12] + "..." if e.status == memcacheConstants.ERR_NOT_MY_VBUCKET: # May receive this while waiting for vbuckets, continue and retry...S continue log.error("%s: %s" % (log_msg, ex_msg)) continue except exceptions.EOFError: # The client was disconnected for some reason. This can # happen just after the bucket REST API is returned (before # the buckets are created in each of the memcached processes.) # See here for some details: http://review.couchbase.org/#/c/49781/ # Longer term when we don't disconnect clients in this state we # should probably remove this code. log.error("got disconnected from the server, reconnecting") client.reconnect() client.sasl_auth_plain(bucket_info.name.encode('ascii'), bucket_info.saslPassword.encode('ascii')) continue if c.find("\x01") > 0 or c.find("\x02") > 0: ready_vbuckets[i] = True elif i in ready_vbuckets: log.warning("vbucket state changed from active to {0}".format(c)) del ready_vbuckets[i] client.close() return len(ready_vbuckets) == vbucket_count
def setUp(self): super(OpsChangeCasTests, self).setUp() self.prefix = "test_" self.expire_time = self.input.param("expire_time", 35) self.item_flag = self.input.param("item_flag", 0) self.value_size = self.input.param("value_size", 256) self.items = self.input.param("items", 20) self.rest = RestConnection(self.master) self.client = VBucketAwareMemcached(self.rest, self.bucket)
def test_meta_failover(self): KEY_NAME = 'key2' rest = RestConnection(self.master) client = VBucketAwareMemcached(rest, self.bucket) for i in range(10): # set a key value = 'value' + str(i) client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,json.dumps({'value':value})) vbucket_id = client._get_vBucket_id(KEY_NAME) #print 'vbucket_id is {0}'.format(vbucket_id) mc_active = client.memcached(KEY_NAME) mc_master = client.memcached_for_vbucket( vbucket_id ) mc_replica = client.memcached_for_replica_vbucket(vbucket_id) cas_active = mc_active.getMeta(KEY_NAME)[4] #print 'cas_a {0} '.format(cas_active) max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(client._get_vBucket_id(KEY_NAME)) + ':max_cas'] ) self.assertTrue(cas_active == max_cas, '[ERROR]Max cas is not 0 it is {0}'.format(cas_active)) # failover that node self.log.info('Failing over node with active data {0}'.format(self.master)) self.cluster.failover(self.servers, [self.master]) self.log.info('Remove the node with active data {0}'.format(self.master)) rebalance = self.cluster.async_rebalance(self.servers[:], [] ,[self.master]) rebalance.result() time.sleep(60) replica_CAS = mc_replica.getMeta(KEY_NAME)[4] #print 'replica CAS {0}'.format(replica_CAS) # add the node back self.log.info('Add the node back, the max_cas should be healed') rebalance = self.cluster.async_rebalance(self.servers[-1:], [self.master], []) rebalance.result() # verify the CAS is good client = VBucketAwareMemcached(rest, self.bucket) mc_active = client.memcached(KEY_NAME) active_CAS = mc_active.getMeta(KEY_NAME)[4] #print 'active cas {0}'.format(active_CAS) get_meta_resp = mc_active.getMeta(KEY_NAME,request_extended_meta_data=False) #print 'replica CAS {0}'.format(replica_CAS) #print 'replica ext meta {0}'.format(get_meta_resp) self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS,replica_CAS)) self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')
def connect_host_port(self, host, port, user, pswd): from membase.api.rest_client import RestConnection from memcached.helper.data_helper import VBucketAwareMemcached info = { "ip": host, "port": port, 'username': user or 'Administrator', 'password': pswd or 'password' } rest = RestConnection(info) self.awareness = VBucketAwareMemcached(rest, user or 'default', info) self.backoff = 0 self.xfer_sent = 0 self.xfer_recv = 0
def _verify_es_results(self, bucket='default'): es_docs = self.esrest_conn.all_docs() self.log.info("Retrieved ES Docs") memcached_conn = VBucketAwareMemcached(self.rest, bucket) self.log.info("Comparing CB and ES data") for doc in es_docs: es_data = doc['doc'] mc_active = memcached_conn.memcached(str(es_data['_id'])) cb_flags, cb_cas, cb_data = mc_active.get(str(es_data['_id'])) self.assertDictEqual(es_data, json.loads(cb_data), "Data mismatch found - es data: {0} cb data: {1}". format(str(es_data), str(cb_data))) self.log.info("Data verified")
def load_docs(self, node, num_docs, bucket='default', password='', exp=0, flags=0): client = VBucketAwareMemcached(RestConnection(node), bucket) for i in range(num_docs): key = "key%s" % i rc = client.set(key, 0, 0, "value")
def setUp(self): super(Upgrade_EpTests, self).setUp() print(self.master) self.rest = RestConnection(self.master) self.bucket = 'default' # temp fix self.client = VBucketAwareMemcached(self.rest, self.bucket) self.time_synchronization = 'disabled' print('checking for self.servers '.format(self.servers[1])) self.prefix = "test_" self.expire_time = 5 self.item_flag = 0 self.value_size = 256
def insert_nested_docs(self, num_of_docs, prefix='doc', levels=16, size=512, return_docs=False, long_path=False, scope=None, collection=None): rest = RestConnection(self.master) smart = VBucketAwareMemcached(rest, self.bucket) doc_names = [] dict = {'doc': {}, 'levels': levels} for i in range(0, num_of_docs): key = doc_name = "{0}-{1}".format(prefix, i) if long_path: self._createNestedJson_longPath(key, dict) else: self._createNestedJson(key, dict) value = dict['doc'] if not return_docs: doc_names.append(doc_name) else: doc_names.append(value) # loop till value is set fail_count = 0 while True: try: smart.set(key, 0, 0, json.dumps(value), scope=scope, collection=collection) break except MemcachedError as e: fail_count += 1 if (e.status == 133 or e.status == 132) and fail_count < 60: if i == 0: self.log.error( "waiting 5 seconds. error {0}".format(e)) time.sleep(5) else: self.log.error(e) time.sleep(1) else: raise e self.log.info("Inserted {0} json documents".format(num_of_docs)) return doc_names
def load(self, path, bucket, prefix='test'): client = VBucketAwareMemcached(RestConnection(self.master), bucket) for file in os.listdir(path): f = open(path + '/' + file, 'r') rq_s = f.read() f.close() rq_json = json.loads(rq_s) key = str(file) try: o, c, d = client.set(key, 0, 0, json.dumps(rq_json)) except Exception, ex: print 'WARN=======================' print ex
def insert_docs(self, num_of_docs, prefix, extra_values={}, wait_for_persistence=True, return_docs=False): rest = RestConnection(self.master) smart = VBucketAwareMemcached(rest, self.bucket) doc_names = [] for i in range(0, num_of_docs): key = doc_name = "{0}-{1}".format(prefix, i) geom = { "type": "Point", "coordinates": [random.randrange(-180, 180), random.randrange(-90, 90)] } value = { "name": doc_name, "age": random.randrange(1, 1000), "geometry": geom } value.update(extra_values) if not return_docs: doc_names.append(doc_name) else: doc_names.append(value) # loop till value is set fail_count = 0 while True: try: smart.set(key, 0, 0, json.dumps(value)) break except MemcachedError as e: fail_count += 1 if (e.status == 133 or e.status == 132) and fail_count < 60: if i == 0: self.log.error( "waiting 5 seconds. error {0}".format(e)) time.sleep(5) else: self.log.error(e) time.sleep(1) else: raise e if wait_for_persistence: self.wait_for_persistence() self.log.info("inserted {0} json documents".format(num_of_docs)) return doc_names
def corrupt_cas_is_healed_on_rebalance_out_in(self): self.log.info('Start corrupt_cas_is_healed_on_rebalance_out_in') KEY_NAME = 'key1' rest = RestConnection(self.master) client = VBucketAwareMemcached(rest, 'default') # set a key client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,json.dumps({'value':'value1'})) # figure out which node it is on mc_active = client.memcached(KEY_NAME) mc_replica = client.memcached( KEY_NAME, replica_index=0 ) # set the CAS to -2 and then mutate to increment to -1 and then it should stop there self._corrupt_max_cas(mc_active,KEY_NAME) # CAS should be 0 now, do some gets and sets to verify that nothing bad happens resp = mc_active.get(KEY_NAME) self.log.info( 'get for {0} is {1}'.format(KEY_NAME, resp)) # remove that node self.log.info('Remove the node with -1 max cas') rebalance = self.cluster.async_rebalance(self.servers[-1:], [] ,[self.master]) #rebalance = self.cluster.async_rebalance([self.master], [], self.servers[-1:]) rebalance.result() replica_CAS = mc_replica.getMeta(KEY_NAME)[4] # add the node back self.log.info('Add the node back, the max_cas should be healed') rebalance = self.cluster.async_rebalance(self.servers[-1:], [self.master], []) #rebalance = self.cluster.async_rebalance([self.master], self.servers[-1:],[]) rebalance.result() # verify the CAS is good client = VBucketAwareMemcached(rest, 'default') mc_active = client.memcached(KEY_NAME) active_CAS = mc_active.getMeta(KEY_NAME)[4] self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS,replica_CAS))
def _test_view_on_multiple_docs(self, num_docs, params={"stale":"update_after"}, delay=10): self.log.info("description : create a view on {0} documents".format(num_docs)) master = self.servers[0] rest = RestConnection(master) bucket = "default" view_name = "dev_test_view_on_{1}_docs-{0}".format(str(uuid.uuid4())[:7], self.num_docs) map_fn = "function (doc) {if(doc.name.indexOf(\"" + view_name + "\") != -1) { emit(doc.name, doc);}}" rest.create_view(view_name, bucket, [View(view_name, map_fn, dev_view=False)]) self.created_views[view_name] = bucket rest = RestConnection(self.servers[0]) smart = VBucketAwareMemcached(rest, bucket) doc_names = [] prefix = str(uuid.uuid4())[:7] total_time = 0 self.log.info("inserting {0} json objects".format(num_docs)) for i in range(0, num_docs): key = doc_name = "{0}-{1}-{2}".format(view_name, prefix, i) doc_names.append(doc_name) value = {"name": doc_name, "age": 1000} smart.set(key, 0, 0, json.dumps(value)) self.log.info("inserted {0} json documents".format(len(doc_names))) time.sleep(10) results = ViewBaseTests._get_view_results(self, rest, bucket, view_name, len(doc_names), extra_params=params) view_time = results['view_time'] keys = ViewBaseTests._get_keys(self, results) RebalanceHelper.wait_for_persistence(master, bucket, 0) total_time = view_time # Keep trying this for maximum 5 minutes start_time = time.time() # increase timeout to 600 seconds for windows testing while (len(keys) != len(doc_names)) and (time.time() - start_time < 900): msg = "view returned {0} items , expected to return {1} items" self.log.info(msg.format(len(keys), len(doc_names))) self.log.info("trying again in {0} seconds".format(delay)) time.sleep(delay) results = ViewBaseTests._get_view_results(self, rest, bucket, view_name, len(doc_names), extra_params=params) view_time = results['view_time'] total_time += view_time keys = ViewBaseTests._get_keys(self, results) self.log.info("View time: {0} secs".format(total_time)) # Only if the lengths are not equal, look for missing keys if len(keys) != len(doc_names): not_found = list(set(doc_names) - set(keys)) ViewBaseTests._print_keys_not_found(self, not_found, 10) self.fail("map function did not return docs for {0} keys".format(len(not_found)))
def corrupt_cas_is_healed_on_reboot(self): self.log.info('Start corrupt_cas_is_healed_on_reboot') KEY_NAME = 'key1' rest = RestConnection(self.master) # set a key client = VBucketAwareMemcached(rest, 'default') client.memcached(KEY_NAME).set(KEY_NAME, 0, 0, json.dumps({'value': 'value1'})) # client.memcached(KEY_NAME).set('k2', 0, 0,json.dumps({'value':'value2'})) # figure out which node it is on mc_active = client.memcached(KEY_NAME) # set the CAS to -2 and then mutate to increment to -1 and then it should stop there self._corrupt_max_cas(mc_active, KEY_NAME) corrupt_cas = mc_active.getMeta(KEY_NAME)[4] # self._restart_memcache('default') remote = RemoteMachineShellConnection(self.master) remote.stop_server() time.sleep(30) remote.start_server() time.sleep(30) client = VBucketAwareMemcached(rest, 'default') mc_active = client.memcached(KEY_NAME) curr_cas = mc_active.getMeta(KEY_NAME)[4] self.assertTrue( curr_cas == corrupt_cas, 'Corrupted cas (%s) != curr_cas (%s)' % (corrupt_cas, curr_cas))
def corrupt_cas_is_healed_on_reboot(self): self.log.info('Start corrupt_cas_is_healed_on_reboot') KEY_NAME = 'key1' rest = RestConnection(self.master) client = VBucketAwareMemcached(rest, 'default') # set a key client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,json.dumps({'value':'value1'})) # client.memcached(KEY_NAME).set('k2', 0, 0,json.dumps({'value':'value2'})) # figure out which node it is on mc_active = client.memcached(KEY_NAME) # set the CAS to -2 and then mutate to increment to -1 and then it should stop there self._corrupt_max_cas(mc_active,KEY_NAME) # print 'max cas k2', mc_active.getMeta('k2')[4] # CAS should be 0 now, do some gets and sets to verify that nothing bad happens # self._restart_memcache('default') remote = RemoteMachineShellConnection(self.master) remote.stop_server() time.sleep(30) remote.start_server() time.sleep(30) rest = RestConnection(self.master) client = VBucketAwareMemcached(rest, 'default') mc_active = client.memcached(KEY_NAME) maxCas = mc_active.getMeta(KEY_NAME)[4] self.assertTrue(maxCas == 0, 'max cas after reboot is not 0 it is {0}'.format(maxCas))
def key_not_exists_test(self): self.assertTrue(len(self.buckets) > 0, 'at least 1 bucket required') bucket = self.buckets[0].name client = VBucketAwareMemcached(RestConnection(self.master), bucket) KEY_NAME = 'key' for i in range(1500): client.set(KEY_NAME, 0, 0, "x") # delete and verify get fails client.delete(KEY_NAME) err = None try: rc = client.get(KEY_NAME) except MemcachedError as error: # It is expected to raise MemcachedError because the key is deleted. err = error.status self.assertTrue(err == ERR_NOT_FOUND, 'expected key to be deleted {0}'.format(KEY_NAME)) #cas errors do not sleep the test for 10 seconds, plus we need to check that the correct #error is being thrown err = None try: #For some reason replace instead of cas would not reproduce the bug mc_active = client.memcached(KEY_NAME) mc_active.replace(KEY_NAME, 0, 10, "value") except MemcachedError as error: err = error.status self.assertTrue( err == ERR_NOT_FOUND, 'was able to replace cas on removed key {0}'.format(KEY_NAME))
def _verify_es_results(self, bucket='default'): esrest_conn = EsRestConnection(self.dest_master) es_docs = esrest_conn.all_docs() self.log.info("Retrieved ES Docs") rest_conn = RestConnection(self.src_master) memcached_conn = VBucketAwareMemcached(rest_conn, bucket) self.log.info("Comparing CB and ES data") for doc in es_docs: es_data = doc['doc'] mc_active = memcached_conn.memcached(str(es_data['_id'])) cb_flags, cb_cas, cb_data = mc_active.get(str(es_data['_id'])) self.assertDictEqual(es_data, json.loads(cb_data), "Data mismatch found - es data: {0} cb data: {1}". format(str(es_data), str(cb_data))) self.log.info("Data verified")
def _verify_data_all_buckets(self, gen_check): for bucket in self.buckets: self.log.info("Check bucket %s" % bucket.name) gen = copy.deepcopy(gen_check) rest = RestConnection(self.server_recovery) client = VBucketAwareMemcached(rest, bucket) while gen.has_next(): key, value = next(gen) try: _, _, d = client.get(key) self.assertEqual(d.decode("utf-8"), value,\ 'Key: %s expected. Value expected %s. Value actual %s' % (key, value, d)) except Exception as ex: raise Exception('Key %s not found %s' % (key, str(ex)))
def getr_negative_corrupted_keys_test(self): key = self.input.param("key", '') gen = DocumentGenerator('test_docs', '{{"age": {0}}}', xrange(5), start=0, end=self.num_items) self.perform_docs_ops(self.master, [gen], 'create') self.log.info("Checking replica read") client = VBucketAwareMemcached(RestConnection(self.master), self.default_bucket_name) try: o, c, d = client.getr(key) except Exception, ex: if self.error and str(ex).find(self.error) != -1: self.log.info("Expected error %s appeared as expected" % self.error) else: raise ex
def _verify_data_all_buckets(self, gen_check): for bucket in self.buckets: self.log.info("Check bucket %s" % bucket.name) gen = copy.deepcopy(gen_check) rest = RestConnection(self.server_recovery) client = VBucketAwareMemcached(rest, bucket) while gen.has_next(): key, value = gen.next() try: _, _, d = client.get(key) self.assertEquals(d, value, 'Key: %s expected. Value expected %s. Value actual %s' % ( key, value, d)) except Exception, ex: raise Exception('Key %s not found %s' % (key, str(ex)))
def do_basic_ops(self): KEY_NAME = 'key1' KEY_NAME2 = 'key2' CAS = 1234 self.log.info('Starting basic ops') rest = RestConnection(self.master) client = VBucketAwareMemcached(rest, 'default') mcd = client.memcached(KEY_NAME) # MB-17231 - incr with full eviction rc = mcd.incr(KEY_NAME, 1) print 'rc for incr', rc # MB-17289 del with meta rc = mcd.set(KEY_NAME, 0,0, json.dumps({'value':'value2'})) print 'set is', rc cas = rc[1] # wait for it to persist persisted = 0 while persisted == 0: opaque, rep_time, persist_time, persisted, cas = client.observe(KEY_NAME) try: rc = mcd.evict_key(KEY_NAME) except MemcachedError as exp: self.fail("Exception with evict meta - {0}".format(exp) ) CAS = 0xabcd # key, value, exp, flags, seqno, remote_cas try: #key, exp, flags, seqno, cas rc = mcd.del_with_meta(KEY_NAME2, 0, 0, 2, CAS) except MemcachedError as exp: self.fail("Exception with del_with meta - {0}".format(exp) )
def getr_rebalance_test(self): gen = DocumentGenerator('test_docs', '{{"age": {0}}}', xrange(5), start=0, end=self.num_items) self.perform_docs_ops(self.master, [gen], 'create') self.log.info("Checking replica read") client = VBucketAwareMemcached(RestConnection(self.master), self.default_bucket_name) rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], self.servers[self.nodes_init : self.nodes_init + self.nodes_in], []) try: while gen.has_next(): key, _ = gen.next() o, c, d = client.getr(key) finally: rebalance.result()
def test_capi_with_checkpointing(self): repl_id = self._start_es_replication(xdcr_params={"checkpointInterval":"60"}) rest_conn = RestConnection(self.src_master) rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true') gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}', xrange(100), start=0, end=self._num_items) self.src_cluster.load_all_buckets_from_generator(gen) rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false') self.sleep(120) vb0_node = None nodes = self.src_cluster.get_nodes() ip = VBucketAwareMemcached(rest_conn,'default').vBucketMap[0].split(':')[0] for node in nodes: if ip == node.ip: vb0_node = node if not vb0_node: raise XDCRCheckpointException("Error determining the node containing active vb0") vb0_conn = RestConnection(vb0_node) try: checkpoint_record = vb0_conn.get_recent_xdcr_vb_ckpt(repl_id) self.log.info("Checkpoint record : {0}".format(checkpoint_record)) except Exception as e: raise XDCRCheckpointException("Error retrieving last checkpoint document - {0}".format(e)) self._verify_es_results()
def common_setup(self, replica): self._input = TestInputSingleton.input self._servers = self._input.servers first = self._servers[0] self.log = logger.Logger().get_logger() self.log.info(self._input) rest = RestConnection(first) for server in self._servers: RestHelper(RestConnection(server)).is_ns_server_running() ClusterOperationHelper.cleanup_cluster(self._servers) BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self) ClusterOperationHelper.add_all_nodes_or_assert(self._servers[0], self._servers, self._input.membase_settings, self) nodes = rest.node_statuses() otpNodeIds = [] for node in nodes: otpNodeIds.append(node.id) info = rest.get_nodes_self() bucket_ram = info.mcdMemoryReserved * 3 / 4 rest.create_bucket(bucket="default", ramQuotaMB=int(bucket_ram), replicaNumber=replica, proxyPort=rest.get_nodes_self().moxi) msg = "wait_for_memcached fails" ready = BucketOperationHelper.wait_for_memcached(first, "default"), self.assertTrue(ready, msg) rebalanceStarted = rest.rebalance(otpNodeIds, []) self.assertTrue(rebalanceStarted, "unable to start rebalance on master node {0}".format(first.ip)) self.log.info('started rebalance operation on master node {0}'.format(first.ip)) rebalanceSucceeded = rest.monitorRebalance() # without a bucket this seems to fail self.assertTrue(rebalanceSucceeded, "rebalance operation for nodes: {0} was not successful".format(otpNodeIds)) self.awareness = VBucketAwareMemcached(rest, "default")
def test_capi_with_checkpointing(self): self.setup_xdcr() self.src_cluster.pause_all_replications() gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}', range(100), start=0, end=self._num_items) self.src_cluster.load_all_buckets_from_generator(gen) self.src_cluster.resume_all_replications() self._wait_for_replication_to_catchup() self.sleep(120) vb0_node = None nodes = self.src_cluster.get_nodes() ip = VBucketAwareMemcached(self.rest, 'default').vBucketMap[0].split(':')[0] for node in nodes: if ip == node.ip: vb0_node = node if not vb0_node: raise XDCRCheckpointException("Error determining the node containing active vb0") vb0_conn = RestConnection(vb0_node) try: repl = vb0_conn.get_replication_for_buckets('default', 'default') checkpoint_record = vb0_conn.get_recent_xdcr_vb_ckpt(repl['id']) self.log.info("Checkpoint record : {0}".format(checkpoint_record)) except Exception as e: raise XDCRCheckpointException("Error retrieving last checkpoint document - {0}".format(e)) self._verify_es_results()
def verify_missing_keys(self, prefix, count): """ verify that none of the keys exist with specified prefix. """ client = VBucketAwareMemcached(RestConnection(self.servers[0]), self.buckets[0]) for i in range(count): key = "{0}{1}".format(prefix, i) try: client.get(key) except MemcachedError as error: self.assertEqual( error.status, constants.ERR_NOT_FOUND, "expected error NOT_FOUND, got {0}".format(error.status)) else: self.fail("Able to retrieve expired document")
def _load_ops(self, ops=None, mutations=1, master=None, bucket=None): if master: self.rest = RestConnection(master) if bucket: self.client = VBucketAwareMemcached(self.rest, bucket) k=0 payload = MemcachedClientHelper.create_value('*', self.value_size) while k < self.items: key = "{0}{1}".format(self.prefix, k) k += 1 for i in range(mutations): if ops=='set': #print 'set' self.client.memcached(key).set(key, 0, 0,payload) elif ops=='add': #print 'add' self.client.memcached(key).add(key, 0, 0,payload) elif ops=='replace': self.client.memcached(key).replace(key, 0, 0,payload) #print 'Replace' elif ops=='delete': #print 'delete' self.client.memcached(key).delete(key) elif ops=='expiry': #print 'expiry' self.client.memcached(key).set(key, self.expire_time ,0, payload) elif ops=='touch': #print 'touch' self.client.memcached(key).touch(key, 10) self.log.info("Done with specified {0} ops".format(ops))
def verify_meta_data_when_not_enabled(self): self.log.info('\n\nStarting verify_meta_data_when_not_enabled') client = VBucketAwareMemcached(RestConnection(self.master), 'default') vbucket_id = client._get_vBucket_id(LWW_EP_Engine.TEST_KEY) mc_master = client.memcached_for_vbucket( vbucket_id ) mc_master.set(LWW_EP_Engine.TEST_KEY, 0, 0, LWW_EP_Engine.TEST_VALUE) get_meta_resp = mc_master.getMeta(LWW_EP_Engine.TEST_KEY, request_extended_meta_data=True) self.assertTrue( get_meta_resp[5] == 0, msg='Metadata indicate conflict resolution is set') self.log.info('\n\nEnding verify_meta_data_when_not_enabled')
def verify_meta_data_when_not_enabled(self): self.log.info('\n\nStarting verify_meta_data_when_not_enabled') client = VBucketAwareMemcached(RestConnection(self.master), 'default') vbucket_id = client._get_vBucket_id(LWW_EP_Engine.TEST_KEY) mc_master = client.memcached_for_vbucket( vbucket_id ) mc_master.set(LWW_EP_Engine.TEST_KEY, 0, 0, LWW_EP_Engine.TEST_VALUE) get_meta_resp = mc_master.getMeta(LWW_EP_Engine.TEST_KEY, request_extended_meta_data=True) self.assertTrue( get_meta_resp[5] == 0, msg='Metadata indicate conflict resolution is set') self.log.info('\n\nEnding verify_meta_data_when_not_enabled')
def run_load(rest, bucket, task, kv_store): smart = VBucketAwareMemcached(rest, bucket) docs_iterators = task["docs"] do_sets, do_gets, do_deletes, do_with_expiration = RebalanceDataGenerator.decode_ops( task) doc_ids = [] expiration = 0 if do_with_expiration: expiration = task["expiration"] for docs_iterator in docs_iterators: for value in docs_iterator: _value = value.encode("ascii", "ignore") _json = json.loads(_value, encoding="utf-8") _id = _json["meta"]["id"].encode("ascii", "ignore") _value = json.dumps(_json["json"]).encode("ascii", "ignore") # _value = json.dumps(_json) try: RebalanceDataGenerator.do_mc(rest, smart, _id, _value, kv_store, doc_ids, do_sets, do_gets, do_deletes, do_with_expiration, expiration) except: traceback.print_exc(file=sys.stdout) #post the results into the queue return
def _check_cas(self, check_conflict_resolution=False, master=None, bucket=None, time_sync=None): self.log.info(' Verifying cas and max cas for the keys') #select_count = 20 #Verifying top 20 keys if master: self.rest = RestConnection(master) self.client = VBucketAwareMemcached(self.rest, bucket) k=0 while k < self.items: key = "{0}{1}".format(self.prefix, k) k += 1 mc_active = self.client.memcached(key) cas = mc_active.getMeta(key)[4] max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] ) #print 'max_cas is {0}'.format(max_cas) self.assertTrue(cas == max_cas, '[ERROR]Max cas is not 0 it is {0}'.format(cas)) if check_conflict_resolution: get_meta_resp = mc_active.getMeta(key,request_extended_meta_data=False) if time_sync == 'enabledWithoutDrift': self.assertTrue( get_meta_resp[5] == 1, msg='[ERROR] Metadata indicate conflict resolution is not set') elif time_sync == 'disabled': self.assertTrue( get_meta_resp[5] == 0, msg='[ERROR] Metadata indicate conflict resolution is set')
def insert_docs(self, num_of_docs, prefix='doc', extra_values={}, return_docs=False): random.seed(12345) rest = RestConnection(self.master) smart = VBucketAwareMemcached(rest, self.bucket) doc_names = [] for i in range(0, num_of_docs): key = doc_name = "{0}-{1}".format(prefix, i) geom = {"type": "Point", "coordinates": [random.randrange(-180, 180), random.randrange(-90, 90)]} value = { "name": doc_name, "age": random.randrange(1, 1000), "geometry": geom, "array" :[0,1,2,3,4,5,6,7,8,9,20], "isDict" : True, "dict_value" : {"name":"abc", "age":1}, "height": random.randrange(1, 13000), "bloom": random.randrange(1, 6), "shed_leaves": random.randrange(6, 13)} value.update(extra_values) if not return_docs: doc_names.append(doc_name) else: doc_names.append(value) # loop till value is set fail_count = 0 while True: try: smart.set(key, 0, 0, json.dumps(value)) break except MemcachedError as e: fail_count += 1 if (e.status == 133 or e.status == 132) and fail_count < 60: if i == 0: self.log.error("waiting 5 seconds. error {0}" .format(e)) time.sleep(5) else: self.log.error(e) time.sleep(1) else: raise e self.log.info("Inserted {0} json documents".format(num_of_docs)) return doc_names
def _poxi(self): tServer = TestInputServer() tServer.ip = self.server_ip tServer.rest_username = "******" tServer.rest_password = "******" tServer.port = 8091 rest = RestConnection(tServer) return VBucketAwareMemcached(rest, self.bucket_name)
def delete_docs(self, num_of_docs, prefix='doc'): smart = VBucketAwareMemcached(RestConnection(self.master), self.bucket) doc_names = [] for i in range(0, num_of_docs): key = "{0}-{1}".format(prefix, i) try: smart.delete(key) except MemcachedError as e: # Don't care if we try to delete a document that doesn't exist if e.status == memcacheConstants.ERR_NOT_FOUND: continue else: raise doc_names.append(key) self.log.info("deleted {0} json documents".format(len(doc_names))) return doc_names
def delete_docs(self, num_of_docs, prefix): smart = VBucketAwareMemcached(RestConnection(self.master), self.bucket) doc_names = [] for i in range(0, num_of_docs): key = "{0}-{1}".format(prefix, i) try: smart.delete(key) except MemcachedError as e: # Don't care if we try to delete a document that doesn't exist if e.status == memcacheConstants.ERR_NOT_FOUND: continue else: raise doc_names.append(key) self.log.info("deleted {0} json documents".format(len(doc_names))) return doc_names
def _load_docs(self, num_docs, prefix, verify=True, bucket='default', expire=0, flag=0): master = self.servers[0] rest = RestConnection(master) smart = VBucketAwareMemcached(rest, bucket) doc_names = [] for i in range(0, num_docs): key = doc_name = "{0}-{1}".format(prefix, i) doc_names.append(doc_name) value = {"name": doc_name, "age": i} smart.set(key, expire, flag, json.dumps(value)) # loop till value is set RebalanceHelper.wait_for_persistence(master, bucket) self.log.info("inserted {0} json documents".format(num_docs)) if verify: ViewBaseTests._verify_keys(self, doc_names, prefix) return doc_names
def key_not_exists_test(self): self.assertTrue(len(self.buckets) > 0, 'at least 1 bucket required') bucket = self.buckets[0].name client = VBucketAwareMemcached(RestConnection(self.master), bucket) KEY_NAME = 'key' for i in range(1500): client.set(KEY_NAME, 0, 0, "x") # delete and verify get fails client.delete(KEY_NAME) err = None try: rc = client.get(KEY_NAME) except MemcachedError as error: # It is expected to raise MemcachedError because the key is deleted. err = error.status self.assertTrue(err == ERR_NOT_FOUND, 'expected key to be deleted {0}'.format(KEY_NAME)) #cas errors do not sleep the test for 10 seconds, plus we need to check that the correct #error is being thrown err = None try: #For some reason replace instead of cas would not reproduce the bug mc_active = client.memcached(KEY_NAME) mc_active.replace(KEY_NAME, 0, 10, "value") except MemcachedError as error: err = error.status self.assertTrue(err == ERR_NOT_FOUND, 'was able to replace cas on removed key {0}'.format(KEY_NAME))
def wait_for_vbuckets_ready_state(node, bucket, timeout_in_seconds=300, log_msg=''): log = logger.Logger.get_logger() start_time = time.time() end_time = start_time + timeout_in_seconds ready_vbuckets = {} rest = RestConnection(node) servers = rest.get_nodes() RestHelper(rest).vbucket_map_ready(bucket, 60) vbucket_count = len(rest.get_vbuckets(bucket)) vbuckets = rest.get_vbuckets(bucket) obj = VBucketAwareMemcached(rest, bucket) memcacheds, vbucket_map, vbucket_map_replica = obj.request_map(rest, bucket) #Create dictionary with key:"ip:port" and value: a list of vbuckets server_dict = defaultdict(list) for everyID in range(0, vbucket_count): memcached_ip_port = str(vbucket_map[everyID]) server_dict[memcached_ip_port].append(everyID) while time.time() < end_time and len(ready_vbuckets) < vbucket_count: for every_ip_port in server_dict: #Retrieve memcached ip and port ip, port = every_ip_port.split(":") client = MemcachedClient(ip, int(port), timeout=30) client.vbucket_count = len(vbuckets) bucket_info = rest.get_bucket(bucket) client.sasl_auth_plain(bucket_info.name.encode('ascii'), bucket_info.saslPassword.encode('ascii')) for i in server_dict[every_ip_port]: try: (a, b, c) = client.get_vbucket_state(i) except mc_bin_client.MemcachedError as e: ex_msg = str(e) if "Not my vbucket" in log_msg: log_msg = log_msg[:log_msg.find("vBucketMap") + 12] + "..." if "Not my vbucket" in ex_msg: #reduce output ex_msg = str(e)[:str(e).find('Not my vbucket') + 14] + "..." log.error("%s: %s" % (log_msg, ex_msg)) continue if c.find("\x01") > 0 or c.find("\x02") > 0: ready_vbuckets[i] = True elif i in ready_vbuckets: log.warning("vbucket state changed from active to {0}".format(c)) del ready_vbuckets[i] client.close() return len(ready_vbuckets) == vbucket_count
def verify_missing_keys(self, prefix, count): """ verify that none of the keys exist with specified prefix. """ client = VBucketAwareMemcached( RestConnection(self.servers[0]), self.buckets[0]) for i in xrange(count): key = "{0}{1}".format(prefix, i) try: client.get(key) except MemcachedError as error: self.assertEquals( error.status, constants.ERR_NOT_FOUND, "expected error NOT_FOUND, got {0}".format(error.status)) else: self.fail("Able to retrieve expired document")
def setUp(self): super(OpsChangeCasTests, self).setUp() self.prefix = "test_" self.expire_time = self.input.param("expire_time", 35) self.item_flag = self.input.param("item_flag", 0) self.value_size = self.input.param("value_size", 256) self.items = self.input.param("items", 20) self.rest = RestConnection(self.master) self.client = VBucketAwareMemcached(self.rest, self.bucket)
def test_monotonic_cas(self): self.log.info('\n\nStarting test_monotonic_cas') client = VBucketAwareMemcached(RestConnection(self.master), 'default') vbucket_id = client._get_vBucket_id(LWW_EP_Engine.TEST_KEY) mc_master = client.memcached_for_vbucket( vbucket_id ) result = mc_master.set_time_drift_counter_state(vbucket_id, 0, 1) old_cas = mc_master.stats('vbucket-details')['vb_' + str(vbucket_id) + ':max_cas'] for i in range(10): mc_master.set(LWW_EP_Engine.TEST_KEY, 0, 0, LWW_EP_Engine.TEST_VALUE) cas = mc_master.stats('vbucket-details')['vb_' + str(vbucket_id) + ':max_cas'] self.assertTrue( cas > old_cas, msg='CAS did not increase. Old {0} new {1}'.format(old_cas, cas)) old_cas = cas self.log.info('\n\nComplete test_monotonic_cas')
def connect_host_port(self, host, port, user, pswd): from membase.api.rest_client import RestConnection from memcached.helper.data_helper import VBucketAwareMemcached info = { "ip": host, "port": port, 'username': user or 'Administrator', 'password': pswd or 'password' } rest = RestConnection(info) self.awareness = VBucketAwareMemcached(rest, user or 'default', info) self.backoff = 0 self.xfer_sent = 0 self.xfer_recv = 0
def verify_one_node_has_time_sync_and_one_does_not(self): # need to explicitly enable and disable sync when it is supported self.log.info('\n\nStarting verify_one_node_has_time_sync_and_one_does_not') client = VBucketAwareMemcached(RestConnection(self.master), 'default') vbucket_id = client._get_vBucket_id(LWW_EP_Engine.TEST_KEY) mc_master = client.memcached_for_vbucket( vbucket_id ) mc_replica = client.memcached_for_replica_vbucket(vbucket_id) # set for master but not for the replica result = mc_master.set_time_drift_counter_state(vbucket_id, 0, 1) mc_master.set(LWW_EP_Engine.TEST_KEY, 0, 0, LWW_EP_Engine.TEST_VALUE) get_meta_resp = mc_master.getMeta(LWW_EP_Engine.TEST_KEY, request_extended_meta_data=True) self.assertTrue( get_meta_resp[5] ==1, msg='Metadata indicates conflict resolution is not set') self.log.info('\n\nEnding verify_one_node_has_time_sync_and_one_does_not')