class Collections_Stats(object): def __init__(self, node): self.log = lib.logger.Logger.get_logger() self.shell = RemoteMachineShellConnection(node) def get_collection_stats(self, bucket): output, error = self.shell.execute_cbstats(bucket, "collections") return output, error def get_collection_vb_stats(self, bucket, vbid=None): cmd = "collections-details" if vbid: cmd += vbid output, error = self.shell.execute_cbstats(bucket, cmd) return output, error def get_scope_stats(self, bucket): output, error = self.shell.execute_cbstats(bucket, "scopes") return output, error def get_scope_vb_stats(self, bucket, vbid=None): cmd = "scopes-details" if vbid: cmd += vbid output, error = self.shell.execute_cbstats(bucket, cmd) return output, error
def test_items_append(self): self.desired_item_size = self.input.param("desired_item_size", 2048) self.append_size = self.input.param("append_size", 1024) self.fixed_append_size = self.input.param("fixed_append_size", True) self.append_ratio = self.input.param("append_ratio", 0.5) self._load_all_buckets(self.master, self.gen_create, "create", 0, batch_size=1000, pause_secs=5, timeout_secs=100) for bucket in self.buckets: self.value_size = self.input.param("value_size", 512) verify_dict = {} vkeys, dkeys = bucket.kvs[1].key_set() key_count = len(vkeys) app_ratio = self.append_ratio * key_count selected_keys = [] i = 0 for key in vkeys: i += 1 if i >= app_ratio: break selected_keys.append(key) awareness = VBucketAwareMemcached(RestConnection(self.master), bucket.name) if self.kv_verify: for key in selected_keys: value = awareness.memcached(key).get(key)[2] verify_dict[key] = value self.log.info("Bucket: {0}".format(bucket.name)) self.log.info("Appending to have items whose initial size was " + "{0} to equal or cross a size of {1}".format( self.value_size, self.desired_item_size)) self.log.info("Item-appending of {0} items starting ..".format( len(selected_keys) + 1)) index = 3 while self.value_size < self.desired_item_size: str_len = self.append_size if not self.fixed_append_size: str_len = int(math.pow(2, index)) for key in selected_keys: random_string = self.random_str_generator(str_len) awareness.memcached(key).append(key, random_string) if self.kv_verify: verify_dict[key] = verify_dict[key] + random_string self.log.info( "for {0} items size was increased to {1} Bytes".format( len(selected_keys) + 1, self.value_size)) self.value_size += str_len index += 1 self.log.info("The appending of {0} items ended".format( len(selected_keys) + 1)) for bucket in self.buckets: msg = "Bucket:{0}".format(bucket.name) self.log.info("VERIFICATION <" + msg + ">: Phase 0 - Check the gap between " + "mem_used by the bucket and total_allocated_bytes") stats = StatsCommon() mem_used_stats = stats.get_stats(self.servers, bucket, 'memory', 'mem_used') total_allocated_bytes_stats = stats.get_stats( self.servers, bucket, 'memory', 'total_allocated_bytes') total_fragmentation_bytes_stats = stats.get_stats( self.servers, bucket, 'memory', 'total_fragmentation_bytes') for server in self.servers: self.log.info( "In {0} bucket {1}, total_fragmentation_bytes + the total_allocated_bytes = {2}" .format(server.ip, bucket.name, (int(total_fragmentation_bytes_stats[server]) + int(total_allocated_bytes_stats[server])))) self.log.info("In {0} bucket {1}, mem_used = {2}".format( server.ip, bucket.name, mem_used_stats[server])) self.log.info( "In {0} bucket {1}, the difference between actual memory used by memcached and mem_used is {2} times" .format( server.ip, bucket.name, float( int(total_fragmentation_bytes_stats[server]) + int(total_allocated_bytes_stats[server])) / float(mem_used_stats[server]))) self.log.info( "VERIFICATION <" + msg + ">: Phase1 - Check if any of the " + "selected keys have value less than the desired value size") for key in selected_keys: value = awareness.memcached(key).get(key)[2] if len(value) < self.desired_item_size: self.fail( "Failed to append enough to make value size surpass the " + "size {0}, key {1} has size {2}".format( self.desired_item_size, key, len(value))) if self.kv_verify: self.log.info("VERIFICATION <" + msg + ">: Phase2 - Check if the content " + "after the appends match what's expected") for k in verify_dict: if awareness.memcached(k).get(k)[2] != verify_dict[k]: self.fail( "Content at key {0}: not what's expected.".format( k)) self.log.info("VERIFICATION <" + msg + ">: Successful") shell = RemoteMachineShellConnection(self.master) shell.execute_cbstats("", "raw", keyname="allocator", vbid="") shell.disconnect()
class CollectionsStats(object): def __init__(self, node): self.node = node self.shell = RemoteMachineShellConnection(self.node) def get_collection_stats(self, bucket): output, error = self.shell.execute_cbstats( bucket, "collections", cbadmin_user="******") return output, error def get_collection_vb_stats(self, bucket, vbid=None): cmd = "collections-details" if vbid: cmd += vbid output, error = self.shell.execute_cbstats(bucket, cmd) return output, error def get_scope_stats(self, bucket): output, error = self.shell.execute_cbstats(bucket, "scopes") return output, error def get_scope_vb_stats(self, bucket, vbid=None): cmd = "scopes-details" if vbid: cmd += vbid output, error = self.shell.execute_cbstats(bucket, cmd) return output, error def get_scope_id(self, bucket, scope, cbstats=None): if not cbstats: cbstats, _ = self.get_collection_stats(bucket) for stat in cbstats: stat = stat.replace(' ', '') if ":scope_name:" + scope in stat: return stat.split(":")[0] return None def get_collection_id(self, bucket, scope, collection, cbstats=None): if not cbstats: cbstats, _ = self.get_collection_stats(bucket) scope_id = self.get_scope_id(bucket, scope, cbstats) for stat in cbstats: stat = stat.replace(' ', '') if ":name:" + collection in stat: if stat.split(":")[0] == scope_id: return stat.split(":name:")[0] return None def get_scope_item_count(self, bucket, scope, node=None): count = 0 if not node: nodes = [self.node] elif isinstance(node, list): nodes = node else: nodes = [node] for node in nodes: cbstats, _ = RemoteMachineShellConnection( node).get_collection_stats(bucket) scope_id = self.get_scope_id(bucket, scope, cbstats) id_counts = {} for stat in cbstats: if ":items:" in stat: stat = stat.replace(' ', '') id_counts[stat.split(":items:")[0]] = int( stat.split(":items:")[1]) for id in id_counts.keys(): if id.split(':')[0] == scope_id: count += id_counts[id] return count def get_collection_item_count(self, bucket, scope, collection, node=None): count = 0 if not node: nodes = [self.node] elif isinstance(node, list): nodes = node else: nodes = [node] for node in nodes: cbstats, _ = RemoteMachineShellConnection(node).execute_cbstats( bucket, "collections", cbadmin_user="******") collection_id = self.get_collection_id(bucket, scope, collection, cbstats) id_counts = {} for stat in cbstats: if ":items:" in stat: stat = stat.replace(' ', '') id_count = stat.split(":items:") id_counts[id_count[0].strip()] = int(id_count[1]) for id in id_counts.keys(): if id == collection_id: count += id_counts[id] return count
def test_items_append(self): self.desired_item_size = self.input.param("desired_item_size", 2048) self.append_size = self.input.param("append_size", 1024) self.fixed_append_size = self.input.param("fixed_append_size", True) self.append_ratio = self.input.param("append_ratio", 0.5) self._load_all_buckets(self.master, self.gen_create, "create", 0, batch_size=1000, pause_secs=5, timeout_secs=100) for bucket in self.buckets: self.value_size = self.input.param("value_size", 512) verify_dict = {} vkeys, dkeys = bucket.kvs[1].key_set() key_count = len(vkeys) app_ratio = self.append_ratio * key_count selected_keys = [] i = 0 for key in vkeys: i += 1 if i >= app_ratio: break selected_keys.append(key) awareness = VBucketAwareMemcached(RestConnection(self.master), bucket.name) if self.kv_verify: for key in selected_keys: value = awareness.memcached(key).get(key)[2] verify_dict[key] = value self.log.info("Bucket: {0}".format(bucket.name)) self.log.info("Appending to have items whose initial size was " + "{0} to equal or cross a size of {1}".format(self.value_size, self.desired_item_size)) self.log.info("Item-appending of {0} items starting ..".format(len(selected_keys) + 1)) index = 3 while self.value_size < self.desired_item_size: str_len = self.append_size if not self.fixed_append_size: str_len = int(math.pow(2, index)) for key in selected_keys: random_string = self.random_str_generator(str_len) awareness.memcached(key).append(key, random_string) if self.kv_verify: verify_dict[key] = verify_dict[key] + random_string self.log.info("for {0} items size was increased to {1} Bytes".format(len(selected_keys) + 1, self.value_size)) self.value_size += str_len index += 1 self.log.info("The appending of {0} items ended".format(len(selected_keys) + 1)) for bucket in self.buckets: msg = "Bucket:{0}".format(bucket.name) self.log.info("VERIFICATION <" + msg + ">: Phase 0 - Check the gap between " + "mem_used by the bucket and total_allocated_bytes") stats = StatsCommon() mem_used_stats = stats.get_stats(self.servers, bucket, 'memory', 'mem_used') total_allocated_bytes_stats = stats.get_stats(self.servers, bucket, 'memory', 'total_allocated_bytes') total_fragmentation_bytes_stats = stats.get_stats(self.servers, bucket, 'memory', 'total_fragmentation_bytes') for server in self.servers: self.log.info("In {0} bucket {1}, total_fragmentation_bytes + the total_allocated_bytes = {2}" .format(server.ip, bucket.name, (int(total_fragmentation_bytes_stats[server]) + int(total_allocated_bytes_stats[server])))) self.log.info("In {0} bucket {1}, mem_used = {2}".format(server.ip, bucket.name, mem_used_stats[server])) self.log.info("In {0} bucket {1}, the difference between actual memory used by memcached and mem_used is {2} times" .format(server.ip, bucket.name, float(int(total_fragmentation_bytes_stats[server]) + int(total_allocated_bytes_stats[server])) / float(mem_used_stats[server]))) self.log.info("VERIFICATION <" + msg + ">: Phase1 - Check if any of the " + "selected keys have value less than the desired value size") for key in selected_keys: value = awareness.memcached(key).get(key)[2] if len(value) < self.desired_item_size: self.fail("Failed to append enough to make value size surpass the " + "size {0}, key {1} has size {2}".format(self.desired_item_size, key, len(value))) if self.kv_verify: self.log.info("VERIFICATION <" + msg + ">: Phase2 - Check if the content " + "after the appends match what's expected") for k in verify_dict: if awareness.memcached(k).get(k)[2] != verify_dict[k]: self.fail("Content at key {0}: not what's expected.".format(k)) self.log.info("VERIFICATION <" + msg + ">: Successful") shell = RemoteMachineShellConnection(self.master) shell.execute_cbstats("", "raw", keyname="allocator", vbid="") shell.disconnect()