def reset_vbucket(self, rest, key): vBucketId = crc32.crc32_hash(key) & (len(self.vBucketMap) - 1) forward_map = rest.get_bucket(self.bucket).forward_map if not forward_map: forward_map = rest.get_vbuckets(self.bucket) nodes = rest.get_nodes() for vBucket in forward_map: if vBucketId == vBucket.id: self.vBucketMap[vBucket.id] = vBucket.master # it has changed , then to different server or a new server masterIp = vBucket.master.split(":")[0] masterPort = int(vBucket.master.split(":")[1]) if self.vBucketMap[vBucketId] not in self.memcacheds: server = TestInputServer() server.rest_username = rest.username server.rest_password = rest.password for node in nodes: if node.ip == masterIp and node.memcached == masterPort: server.port = node.port server.ip = masterIp self.log.info("Recevied forward map, reset vbucket map, new direct_client") self.memcacheds[vBucket.master] = MemcachedClientHelper.direct_client(server, self.bucket) return True else: # if no one is using that memcached connection anymore just close the connection return True return False
def set(self, key, exp, flags, val, vbucket=-1): """Set a value in the memcached server.""" if vbucket == -1: self.vbucketId = crc32.crc32_hash(key) & (self.vbucket_count - 1) else: self.vbucketId = vbucket return self._mutate(MemcachedConstants.CMD_SET, key, exp, flags, 0, val)
def __load_chain(self, start_num=0): for i, cluster in enumerate(self.get_cb_clusters()): if self._rdirection == REPLICATION_DIRECTION.BIDIRECTION: if i > len(self.get_cb_clusters()) - 1: break else: if i >= len(self.get_cb_clusters()) - 1: break if not self._dgm_run: for bucket in cluster.get_buckets(): client = SDKClient(scheme="couchbase", hosts=[cluster.get_master_node().ip], bucket=bucket.name).cb for i in range(start_num, start_num + self._num_items): key = 'k_%s_%s' % (i, str(cluster).replace(' ', '_'). replace('.', '_').replace(',', '_').replace(':', '_')) value = {'xattr_%s' % i:'value%s' % i} client.upsert(key, value) client.mutate_in(key, SD.upsert('xattr_%s' % i, 'value%s' % i, xattr=True, create_parents=True)) partition = bucket.kvs[1].acquire_partition(key)#["partition"] if self.only_store_hash: value = str(crc32.crc32_hash(value)) res = client.get(key) partition.set(key, json.dumps(value), 0, res.flags) bucket.kvs[1].release_partition(key) else: cluster.load_all_buckets_till_dgm( active_resident_threshold=self._active_resident_threshold, items=self._num_items)
def test_not_your_vbucket(self): self.common_setup(1) keys = ["{0}-{1}".format(str(uuid.uuid4()), i) for i in range(0, 100)] value = MemcachedClientHelper.create_value("*", 1024) for k in keys: vBucket = crc32.crc32_hash(k) mc = self.awareness.memcached(k) mc.set(k, 0, 0, value) not_your_vbucket_mc = self.awareness.not_my_vbucket_memcached(k) try: count = 0 expected_error = 0 while count < 100: a, b, response = not_your_vbucket_mc.sync_replication( [{ "key": k, "vbucket": vBucket }], 1) count += 1 self.log.info("response : {0}".format(response)) if response and response[0]["event"] != "invalid key": expected_error += 1 if expected_error is not 100: self.fail( msg= "server did not raise an error when running sync_replication with invalid vbucket" ) except MemcachedError as error: self.log.error(error)
def set(self, key, exp, flags, val, vbucket=-1): if vbucket == -1: self.vbucketId = crc32.crc32_hash(key) & (self.vbucket_count - 1) else: self.vbucketId = vbucket """Set a value in the memcached server.""" return self._mutate(memcacheConstants.CMD_SET, key, exp, flags, 0, val)
def memcached(self, key, fastforward=False): self._vBucketMap_lock.acquire() self._vBucketMapFastForward_lock.acquire() vBucketId = crc32.crc32_hash(key) & (len(self._vBucketMap) - 1) if fastforward and vBucketId in self._vBucketMapFastForward: # only try the fastforward if we have an entry # otherwise we just wait for the main map to update self.start_vbucket_fastforward_connection(vBucketId) self._vBucketMap[vBucketId] = self._vBucketMapFastForward[vBucketId] if vBucketId not in self._vBucketMap: msg = "vbucket map does not have an entry for vb : {0}" self._vBucketMapFastForward_lock.release() self._vBucketMap_lock.release() raise Exception(msg.format(vBucketId)) if self._vBucketMap[vBucketId] not in self._memcacheds: msg = "smart client does not have a mc connection for server : {0}" self._vBucketMapFastForward_lock.release() self._vBucketMap_lock.release() raise Exception(msg.format(self._vBucketMap[vBucketId])) r = self._memcacheds[self._vBucketMap[vBucketId]] self._vBucketMapFastForward_lock.release() self._vBucketMap_lock.release() return r
def __load_chain(self, start_num=0): for i, cluster in enumerate(self.get_cb_clusters()): if self._rdirection == REPLICATION_DIRECTION.BIDIRECTION: if i > len(self.get_cb_clusters()) - 1: break else: if i >= len(self.get_cb_clusters()) - 1: break if not self._dgm_run: for bucket in cluster.get_buckets(): client = SDKClient(scheme="couchbase", hosts=[cluster.get_master_node().ip], bucket=bucket.name).cb for i in xrange(start_num, start_num + self._num_items): key = 'k_%s_%s' % (i, str(cluster).replace(' ','_'). replace('.','_').replace(',','_').replace(':','_')) value = {'xattr_%s' % i:'value%s' % i} client.upsert(key, value) client.mutate_in(key, SD.upsert('xattr_%s' % i, 'value%s' % i, xattr=True, create_parents=True)) partition = bucket.kvs[1].acquire_partition(key)#["partition"] if self.only_store_hash: value = str(crc32.crc32_hash(value)) res = client.get(key) partition.set(key, json.dumps(value), 0, res.flags) bucket.kvs[1].release_partition(key) else: cluster.load_all_buckets_till_dgm( active_resident_threshold=self._active_resident_threshold, items=self._num_items)
def test_10k_items(self): keys = ["{0}-{1}".format(str(uuid.uuid4()), i) for i in range(0, 100)] value = MemcachedClientHelper.create_value("*", 1024) for k in keys: mc = self.smartclient vBucket = crc32.crc32_hash(k) & (mc.vbucket_count - 1) mc.set(k, 0, 0, value) mc.sync_persistence([{"key": k, "vbucket": vBucket}])
def memcached(self, key): vBucketId = crc32.crc32_hash(key) & (len(self._vBucketMap) - 1) if vBucketId not in self._vBucketMap: msg = "vbucket map does not have an entry for vb : {0}" raise Exception(msg.format(vBucketId)) if self._vBucketMap[vBucketId] not in self._memcacheds: msg = "smart client does not have a mc connection for server : {0}" raise Exception(msg.format(self._vBucketMap[vBucketId])) return self._memcacheds[self._vBucketMap[vBucketId]]
def _test_delete_key_and_backup_and_restore_body(self): bucket = "default" BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket, test_case=self) ready = BucketOperationHelper.wait_for_memcached(self.master, bucket) self.assertTrue(ready, "wait_for_memcached failed") self.add_nodes_and_rebalance() client = MemcachedClientHelper.direct_client(self.master, "default") expiry = 2400 test_uuid = uuid.uuid4() keys = ["key_%s_%d" % (test_uuid, i) for i in range(500)] self.log.info("pushing keys with expiry set to {0}".format(expiry)) for key in keys: try: client.set(key, expiry, 0, "1") except mc_bin_client.MemcachedError as error: msg = "unable to push key : {0} to bucket : {1} error : {2}" self.log.error(msg.format(key, client.vbucketId, error.status)) self.fail(msg.format(key, client.vbucketId, error.status)) self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry)) client.delete(keys[0]) ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") #let's create a unique folder in the remote location for server in self.servers: shell = RemoteMachineShellConnection(server) output, error = shell.execute_command(self.perm_command) shell.log_command_output(output, error) node = RestConnection(server).get_nodes_self() BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder) shell.disconnect() for server in self.servers: BackupHelper(server, self).restore(self.remote_tmp_folder) time.sleep(10) self.log.info('verifying that all those keys...') missing_keys = [] verify_keys = [] for key in keys: vBucketId = crc32.crc32_hash(key) & 1023 # or & 0x3FF client.vbucketId = vBucketId if key == keys[0]: missing_keys.append(key) else: verify_keys.append(key) self.assertTrue(BucketOperationHelper.keys_dont_exist(self.master, missing_keys, self), "Keys are not empty") self.assertTrue(BucketOperationHelper.verify_data(self.master, verify_keys, False, False, 11210, self), "Missing keys")
def getr(self, key, vbucket=-1): """Get the value for a given key within the memcached server from a replica vbucket.""" if vbucket == -1: self.vbucketId = crc32.crc32_hash(key) & (self.vbucket_count - 1) else: self.vbucketId = vbucket parts=self._doCmd(memcacheConstants.CMD_GET_REPLICA, key, '') return self.__parseGet(parts, len(key))
def get(self, key, vbucket=-1): """Get the value for a given key within the memcached server.""" if vbucket == -1: self.vbucketId = crc32.crc32_hash(key) & (self.vbucket_count - 1) else: self.vbucketId = vbucket parts = self._doCmd(MemcachedConstants.CMD_GET, key, '') return self.__parseGet(parts)
def header(self, op, key, val, opaque=0, extra='', cas=0, dtype=0, vbucketId=0, fmt=REQ_PKT_FMT, magic=REQ_MAGIC_BYTE): vbuckets = self.cfg.get("vbuckets", 0) if vbuckets > 0: vbucketId = crc32.crc32_hash(key) & (vbuckets - 1) return struct.pack(fmt, magic, op, len(key), len(extra), dtype, vbucketId, len(key) + len(extra) + len(val), opaque, cas), vbucketId
def test_one_replica(self): self.common_setup(1) keys = ["{0}-{1}".format(str(uuid.uuid4()), i) for i in range(0, 100)] value = MemcachedClientHelper.create_value("*", 1024) for k in keys: vBucket = crc32.crc32_hash(k) mc = self.awareness.memcached(k) mc.set(k, 0, 0, value) mc.sync_replication(1, [{"key": k, "vbucket": vBucket}]) for k in keys: mc = self.awareness.memcached(k) mc.get(k)
def test_one_replica(self): self.common_setup(1) keys = ["{0}-{1}".format(str(uuid.uuid4()), i) for i in range(0, 100)] value = MemcachedClientHelper.create_value("*", 1024) for k in keys: vBucket = crc32.crc32_hash(k) mc = self.awareness.memcached(k) mc.set(k, 0, 0, value) mc.sync_replication([{"key": k, "vbucket": vBucket}], 1) for k in keys: mc = self.awareness.memcached(k) mc.get(k)
def verify_data(server, keys, value_equal_to_key, verify_flags, test, debug=False, bucket="default", scope=None, collection=None): log = logger.Logger.get_logger() log_error_count = 0 # verify all the keys client = MemcachedClientHelper.direct_client(server, bucket) vbucket_count = len(RestConnection(server).get_vbuckets(bucket)) # populate key index = 0 all_verified = True keys_failed = [] for key in keys: try: index += 1 vbucketId = crc32.crc32_hash(key) & (vbucket_count - 1) client.vbucketId = vbucketId flag, keyx, value = client.get(key=key, scope=scope, collection=collection) if value_equal_to_key: test.assertEqual(value.decode(), key, msg='values dont match') if verify_flags: actual_flag = socket.ntohl(flag) expected_flag = ctypes.c_uint32(zlib.adler32(value)).value test.assertEqual(actual_flag, expected_flag, msg='flags dont match') if debug: log.info("verified key #{0} : {1}".format(index, key)) except mc_bin_client.MemcachedError as error: if debug: log_error_count += 1 if log_error_count < 100: log.error(error) log.error( "memcachedError : {0} - unable to get a pre-inserted key : {0}" .format(error.status, key)) keys_failed.append(key) all_verified = False client.close() if len(keys_failed) > 0: log.error('unable to verify #{0} keys'.format(len(keys_failed))) return all_verified
def get_doc_metadata(self, num_vbuckets, key): vid = crc32.crc32_hash(key) & (num_vbuckets - 1) mc = self.memcached(key) metadatastats = None try: metadatastats = mc.stats("vkey {0} {1}".format(key, vid)) except MemcachedError: msg = "key {0} doesn't exist in memcached".format(key) self.log.info(msg) return metadatastats
def test_10k_items_during_load(self): keys = ["{0}-{1}".format(str(uuid.uuid4()), i) for i in range(0, 100)] value = MemcachedClientHelper.create_value("*", 1024) prefix = str(uuid.uuid4()) working_set_size = 10 * 1000 self.load_thread = Thread(target=self._insert_data_till_stopped, args=("default", prefix, working_set_size)) self.load_thread.start() for k in keys: mc = self.smartclient vBucket = crc32.crc32_hash(k) & (mc.vbucket_count - 1) mc.set(k, 0, 0, value) mc.sync_persistence([{"key": k, "vbucket": vBucket}]) self.shutdown_load_data = True self.load_thread.join()
def load_some_data(serverInfo, fill_ram_percentage=10.0, bucket_name='default', scope=None, collection=None): log = logger.Logger.get_logger() if fill_ram_percentage <= 0.0: fill_ram_percentage = 5.0 client = MemcachedClientHelper.direct_client(serverInfo, bucket_name) # populate key rest = RestConnection(serverInfo) RestHelper(rest).vbucket_map_ready(bucket_name, 60) vbucket_count = len(rest.get_vbuckets(bucket_name)) testuuid = uuid.uuid4() info = rest.get_bucket(bucket_name) emptySpace = info.stats.ram - info.stats.memUsed log.info('emptySpace : {0} fill_ram_percentage : {1}'.format( emptySpace, fill_ram_percentage)) fill_space = (emptySpace * fill_ram_percentage) / 100.0 log.info("fill_space {0}".format(fill_space)) # each packet can be 10 KB packetSize = int(10 * 1024) number_of_buckets = int(fill_space) // packetSize log.info('packetSize: {0}'.format(packetSize)) log.info('memory usage before key insertion : {0}'.format( info.stats.memUsed)) log.info('inserting {0} new keys to memcached @ {0}'.format( number_of_buckets, serverInfo.ip)) keys = ["key_%s_%d" % (testuuid, i) for i in range(number_of_buckets)] inserted_keys = [] for key in keys: vbucketId = crc32.crc32_hash(key) & (vbucket_count - 1) client.vbucketId = vbucketId try: client.set(key, 0, 0, key, scope=scope, collection=collection) inserted_keys.append(key) except mc_bin_client.MemcachedError as error: log.error(error) client.close() log.error("unable to push key : {0} to vbucket : {1}".format( key, client.vbucketId)) if test: test.fail( "unable to push key : {0} to vbucket : {1}".format( key, client.vbucketId)) else: break client.close() return inserted_keys
def keys_dont_exist(server, keys, bucket): log = logger.Logger.get_logger() #verify all the keys client = MemcachedClientHelper.direct_client(server, bucket) vbucket_count = len(RestConnection(server).get_vbuckets(bucket)) #populate key for key in keys: try: vbucketId = crc32.crc32_hash(key) & (vbucket_count - 1) client.vbucketId = vbucketId client.get(key=key) client.close() log.error('key {0} should not exist in the bucket'.format(key)) return False except mc_bin_client.MemcachedError as error: log.error(error) log.error("expected memcachedError : {0} - unable to get a pre-inserted key : {1}".format(error.status, key)) client.close() return True
def keys_dont_exist(server, keys, bucket): log = logger.Logger.get_logger() #verify all the keys client = MemcachedClientHelper.direct_client(server, bucket) vbucket_count = len(BucketHelper(server).get_vbuckets(bucket)) #populate key for key in keys: try: vbucketId = crc32.crc32_hash(key) & (vbucket_count - 1) client.vbucketId = vbucketId client.get(key=key) client.close() log.error('key {0} should not exist in the bucket'.format(key)) return False except mc_bin_client.MemcachedError as error: log.error(error) log.error("expected memcachedError : {0} - unable to get a pre-inserted key : {1}".format(error.status, key)) client.close() return True
def _unsupported_replicas(self, replica): self.common_setup(1) keys = ["{0}-{1}".format(str(uuid.uuid4()), i) for i in range(0, 100)] value = MemcachedClientHelper.create_value("*", 102400) for k in keys: vBucket = crc32.crc32_hash(k) mc = self.awareness.memcached(k) mc.set(k, 0, 0, value) mc.get(k) try: mc.sync_replication(replica, [{"key": k, "vbucket": vBucket}]) msg = "server did not raise an error when running sync_replication with {0} replicas" self.fail(msg.format(replica)) except MemcachedError as error: self.log.info("error {0} {1} as expected".format(error.status, error.msg)) for k in keys: mc = self.awareness.memcached(k) mc.get(k)
def _unsupported_replicas(self, replica): self.common_setup(1) keys = ["{0}-{1}".format(str(uuid.uuid4()), i) for i in range(0, 100)] value = MemcachedClientHelper.create_value("*", 102400) for k in keys: vBucket = crc32.crc32_hash(k) mc = self.awareness.memcached(k) mc.set(k, 0, 0, value) mc.get(k) try: mc.sync_replication([{"key": k, "vbucket": vBucket}], replica) msg = "server did not raise an error when running sync_replication with {0} replicas" self.fail(msg.format(replica)) except MemcachedError as error: self.log.info("error {0} {1} as expected".format( error.status, error.msg)) for k in keys: mc = self.awareness.memcached(k) mc.get(k)
def load_some_data(serverInfo, fill_ram_percentage=10.0, bucket_name='default'): log = logger.Logger.get_logger() if fill_ram_percentage <= 0.0: fill_ram_percentage = 5.0 client = MemcachedClientHelper.direct_client(serverInfo, bucket_name) #populate key rest = RestConnection(serverInfo) RestHelper(rest).vbucket_map_ready(bucket_name, 60) vbucket_count = len(rest.get_vbuckets(bucket_name)) testuuid = uuid.uuid4() info = rest.get_bucket(bucket_name) emptySpace = info.stats.ram - info.stats.memUsed log.info('emptySpace : {0} fill_ram_percentage : {1}'.format(emptySpace, fill_ram_percentage)) fill_space = (emptySpace * fill_ram_percentage) / 100.0 log.info("fill_space {0}".format(fill_space)) # each packet can be 10 KB packetSize = int(10 * 1024) number_of_buckets = int(fill_space) / packetSize log.info('packetSize: {0}'.format(packetSize)) log.info('memory usage before key insertion : {0}'.format(info.stats.memUsed)) log.info('inserting {0} new keys to memcached @ {0}'.format(number_of_buckets, serverInfo.ip)) keys = ["key_%s_%d" % (testuuid, i) for i in range(number_of_buckets)] inserted_keys = [] for key in keys: vbucketId = crc32.crc32_hash(key) & (vbucket_count - 1) client.vbucketId = vbucketId try: client.set(key, 0, 0, key) inserted_keys.append(key) except mc_bin_client.MemcachedError as error: log.error(error) client.close() log.error("unable to push key : {0} to vbucket : {1}".format(key, client.vbucketId)) if test: test.fail("unable to push key : {0} to vbucket : {1}".format(key, client.vbucketId)) else: break client.close() return inserted_keys
def test_not_your_vbucket(self): self.common_setup(1) keys = ["{0}-{1}".format(str(uuid.uuid4()), i) for i in range(0, 100)] value = MemcachedClientHelper.create_value("*", 1024) for k in keys: vBucket = crc32.crc32_hash(k) mc = self.awareness.memcached(k) mc.set(k, 0, 0, value) not_your_vbucket_mc = self.awareness.not_my_vbucket_memcached(k) try: count = 0 expected_error = 0 while count < 100: a, b, response = not_your_vbucket_mc.sync_replication([{"key": k, "vbucket": vBucket}], 1) count += 1 self.log.info("response : {0}".format(response)) if response and response[0]["event"] != "invalid key": expected_error += 1 if expected_error is not 100: self.fail(msg="server did not raise an error when running sync_replication with invalid vbucket") except MemcachedError as error: self.log.error(error)
def verify_data(server, keys, value_equal_to_key, verify_flags, test, debug=False, bucket="default"): log = logger.Logger.get_logger() log_error_count = 0 # verify all the keys client = MemcachedClientHelper.direct_client(server, bucket) vbucket_count = len(RestConnection(server).get_vbuckets(bucket)) # populate key index = 0 all_verified = True keys_failed = [] for key in keys: try: index += 1 vbucketId = crc32.crc32_hash(key) & (vbucket_count - 1) client.vbucketId = vbucketId flag, keyx, value = client.get(key=key) if value_equal_to_key: test.assertEquals(value, key, msg='values dont match') if verify_flags: actual_flag = socket.ntohl(flag) expected_flag = ctypes.c_uint32(zlib.adler32(value)).value test.assertEquals(actual_flag, expected_flag, msg='flags dont match') if debug: log.info("verified key #{0} : {1}".format(index, key)) except mc_bin_client.MemcachedError as error: if debug: log_error_count += 1 if log_error_count < 100: log.error(error) log.error( "memcachedError : {0} - unable to get a pre-inserted key : {0}".format(error.status, key)) keys_failed.append(key) all_verified = False client.close() if len(keys_failed) > 0: log.error('unable to verify #{0} keys'.format(len(keys_failed))) return all_verified
def delete(self, key, cas=0, vbucket=-1): """Delete the value for a given key within the memcached server.""" if vbucket == -1: self.vbucketId = crc32.crc32_hash(key) & (self.vbucket_count - 1) return self._doCmd(MemcachedConstants.CMD_DELETE, key, '', '', cas)
def delete(self, key, cas=0, vbucket=-1): if vbucket == -1: self.vbucketId = crc32.crc32_hash(key) & (self.vbucket_count - 1) """Delete the value for a given key within the memcached server.""" return self._doCmd(memcacheConstants.CMD_DELETE, key, '', '', cas)
def memcached(self, key, replica_index=None): vBucketId = crc32.crc32_hash(key) & (len(self.vBucketMap) - 1) if replica_index is None: return self.memcached_for_vbucket(vBucketId) else: return self.memcached_for_replica_vbucket(vBucketId, replica_index)
def get_vbucket_id(key, num_vbuckets): vbucketId = 0 if num_vbuckets > 0: vbucketId = crc32_hash(key) & (num_vbuckets - 1) return vbucketId
def vbucketid(self, key): self._vBucketMap_lock.acquire() r = crc32.crc32_hash(key) & (len(self._vBucketMap) - 1) self._vBucketMap_lock.release() return r
def send_set(self, key, exp, flags, val): """Set a value in the memcached server without handling the response""" self.vbucketId = crc32.crc32_hash(key) & (self.vbucket_count - 1) opaque = self.r.randint(0, 2 ** 32) self._sendCmd(memcacheConstants.CMD_SET, key, val, opaque, struct.pack(SET_PKT_FMT, flags, exp), 0)
def not_my_vbucket_memcached(self, key): vBucketId = crc32.crc32_hash(key) & (len(self.vBucketMap) - 1) which_mc = self.vBucketMap[vBucketId] for server in self.memcacheds: if server != which_mc: return self.memcacheds[server]