def collect_vbucket_stats(self,buckets,servers,collect_vbucket = True,collect_vbucket_seqno = True,collect_vbucket_details = True,perNode = True): """ Method to extract the vbuckets stats given by cbstats tool Paramters: buckets: bucket information servers: server information collect_vbucket: take vbucket type stats collect_vbucket_seqno: take vbucket-seqno type stats collect_vbucket_details: take vbucket-details type stats perNode: if True collects data per node else takes a union across nodes Returns: The output can be in two formats if we are doing per node data collection Vbucket Information :: {bucket { node : [vbucket_seqno {key:value} U vbucket_details {key:value} U vbucket {key:value}]}} if we are not doing per node data collection Vbucket Information :: {bucket : [vbucket_seqno {key:value} U vbucket_details {key:value} U vbucket {key:value}]} """ bucketMap = {} vbucket = [] vbucket_seqno = [] vbucket_details = [] for bucket in buckets: bucketMap[bucket.name] = {} for bucket in buckets: dataMap = {} for server in servers: map_data = {} rest = RestConnection(server) port = rest.get_memcached_port() client = MemcachedClient(host=server.ip, port=port) if collect_vbucket: vbucket=client.stats('vbucket') self.createMapVbucket(vbucket,map_data) if collect_vbucket_seqno: vbucket_seqno=client.stats('vbucket-seqno') self.createMapVbucket(vbucket_seqno,map_data) if collect_vbucket_details: vbucket_details=client.stats('vbucket-details') self.createMapVbucket(vbucket_details,map_data) if perNode: dataMap[server.ip] = map_data else: dataMap.update(map_data) bucketMap[bucket.name] = dataMap return bucketMap
def test_xattr_compression(self): # MB-32669 # subdoc.subdoc_simple_dataset.SubdocSimpleDataset.test_xattr_compression,compression=active mc = MemcachedClient(self.master.ip, 11210) mc.sasl_auth_plain(self.master.rest_username, self.master.rest_password) mc.bucket_select('default') self.key = "test_xattr_compression" self.nesting_level = 5 array = {'i_add': 0, 'i_sub': 1, 'a_i_a': [0, 1], 'ai_sub': [0, 1]} base_json = self.generate_json_for_nesting() nested_json = self.generate_nested(base_json, array, self.nesting_level) jsonDump = json.dumps(nested_json) stats = mc.stats() self.assertEquals(stats['ep_compression_mode'], 'active') scheme = "http" host = "{0}:{1}".format(self.master.ip, self.master.port) self.sdk_client = SDKClient(scheme=scheme, hosts=[host], bucket="default") self.sdk_client.set(self.key, value=jsonDump, ttl=60) rv = self.sdk_client.cb.mutate_in(self.key, SD.upsert('my.attr', "value", xattr=True, create_parents=True), ttl=60) self.assertTrue(rv.success) # wait for it to persist and then evict the key persisted = 0 while persisted == 0: opaque, rep_time, persist_time, persisted, cas = mc.observe( self.key) mc.evict_key(self.key) time.sleep(65) try: self.client.get(self.key) self.fail("the key should get expired") except mc_bin_client.MemcachedError as error: self.assertEquals(error.status, 1) stats = mc.stats() self.assertEquals(int(stats['curr_items']), 0) self.assertEquals(int(stats['curr_temp_items']), 0)
def collect_vbucket_num_stats(self,servers, buckets): """ Method to extract the failovers stats given by cbstats tool Paramters: buckets: bucket informaiton servers: server information Returns: Failover stats as follows: if not collecting per node :: {bucket : [{key:value}]} if collecting per node :: {bucket : {node:[{key:value}]}} """ active_bucketMap = {} replica_bucketMap = {} for bucket in buckets: active_map_data = {} replica_map_data = {} for server in servers: rest = RestConnection(server) port = rest.get_memcached_port() client = MemcachedClient(host=server.ip, port=port) stats = client.stats('') for key in stats.keys(): if key == 'vb_active_num': active_map_data[server.ip] = int(stats[key]) if key == 'vb_replica_num': replica_map_data[server.ip] = int(stats[key]) active_bucketMap[bucket.name] = active_map_data replica_bucketMap[bucket.name] = replica_map_data return active_bucketMap,replica_bucketMap
def wait_for_warmup(host, port): while True: client = McdClient(host, port) try: response = client.stats() if response['ep_degraded_mode'] == '0': break except: pass time.sleep(1)
def collect_failovers_stats(self,buckets,servers,perNode = True): """ Method to extract the failovers stats given by cbstats tool Paramters: buckets: bucket informaiton servers: server information perNode: if set collect per node information else all Returns: Failover stats as follows: if not collecting per node :: {bucket : [{key:value}]} if collecting per node :: {bucket : {node:[{key:value}]}} """ bucketMap = {} for bucket in buckets: bucketMap[bucket.name] = {} for bucket in buckets: dataMap = {} for server in servers: rest = RestConnection(server) port = rest.get_memcached_port() client = MemcachedClient(host=server.ip, port=port) stats = client.stats('failovers') map_data = {} num_map ={} for o in stats.keys(): tokens = o.split(":") vb = tokens[0] key = tokens[1] value = stats[o].split() num = -1 if len(tokens) == 3: vb = tokens[0] num = int(tokens[1]) key = tokens[2] if vb in map_data.keys() and (num == num_map[vb] or num > num_map[vb]): map_data[vb][key] = value[0] num_map[vb] = num elif vb in map_data.keys() and key == "num_entries": map_data[vb][key] = value[0] elif vb not in map_data.keys(): m = {} m[key] = value[0] map_data[vb] = m num_map[vb] = num if perNode: dataMap[server.ip] = map_data else: dataMap.update(map_data) bucketMap[bucket.name] = dataMap return bucketMap
def wait_for_warmup(self, host, port): if self.bucket_type == 'ephemeral': return while True: client = McdClient(host, port) try: client.bucket_select("default") response = client.stats() # check the old style or new style (as of 4.5) results mode = response.get('ep_degraded_mode') if mode is not None: if mode == '0' or mode == 'false': break except Exception as ex: pass time.sleep(1)
def collect_compare_dcp_stats(self,buckets,servers,perNode = True, stat_name = 'unacked_bytes', compare_value = 0, flow_control_buffer_size = 20971520, filter_list = []): """ Method to extract the failovers stats given by cbstats tool Paramters: buckets: bucket informaiton servers: server information stat_name: stat we are searching to compare compare_value: the comparison value to be satisfied Returns: map of bucket informing if stat matching was satisfied/not satisfied example:: unacked_bytes in dcp """ bucketMap = {} for bucket in buckets: bucketMap[bucket.name] = True for bucket in buckets: dataMap = {} for server in servers: rest = RestConnection(server) port = rest.get_memcached_port() client = MemcachedClient(host=server.ip, port=port) stats = client.stats('dcp') map_data = {} for key in stats.keys(): filter = False if stat_name in key: for filter_key in filter_list: if filter_key in key: filter = True value = int(stats[key]) if not filter: if value != compare_value: if "eq_dcpq:mapreduce_view" in key: if value >= flow_control_buffer_size: bucketMap[bucket] = False else: bucketMap[bucket] = False return bucketMap