class Collections_Stats(object): def __init__(self, node): self.log = logger.Logger.get_logger() self.shell = RemoteMachineShellConnection(node) def get_collection_stats(self, bucket): output, error = self.shell.execute_cbstats(bucket, "collections") return output, error def get_collection_vb_stats(self, bucket, vbid=None): cmd = "collections-details" if vbid: cmd += vbid output, error = self.shell.execute_cbstats(bucket, cmd) return output, error def get_scope_stats(self, bucket): output, error = self.shell.execute_cbstats(bucket, "scopes") return output, error def get_scope_vb_stats(self, bucket, vbid=None): cmd = "scopes-details" if vbid: cmd += vbid output, error = self.shell.execute_cbstats(bucket, cmd) return output, error
def mutate_and_checkpoint(self, n=3, skip_validation=False): count = 1 # get vb0 active source node active_src_node = self.get_active_vb0_node(self.src_master) shell = RemoteMachineShellConnection(active_src_node) while count <= n: remote_vbuuid, remote_highseqno = self.get_failover_log(self.dest_master) local_vbuuid, local_highseqno = self.get_failover_log(self.src_master) self.log.info("Local failover log: [{0}, {1}]".format(local_vbuuid, local_highseqno)) self.log.info("Remote failover log: [{0}, {1}]".format(remote_vbuuid, remote_highseqno)) self.log.info("################ New mutation:{0} ##################".format(self.key_counter + 1)) self.load_one_mutation_into_source_vb0(active_src_node) self.sleep(60) if local_highseqno == "0": # avoid checking very first/empty checkpoint record count += 1 continue stats_count = [] output, error = shell.execute_cbstats(self.src_cluster.get_bucket_by_name('default'), "checkpoint", print_results=False) for stat in output: if re.search("num_checkpoint_items:", stat): stats_count.append(int(stat.split(": ")[1])) if max(stats_count) > 0: self.log.info("Checkpoint recorded as expected") if not skip_validation: self.log.info("Validating latest checkpoint") self.get_and_validate_latest_checkpoint() else: self.log.info("Checkpointing failed - may not be an error if vb_uuid changed ") return False count += 1 return True
def get_checkpoint_call_history(self, node): stats_count = [] shell = RemoteMachineShellConnection(node) output, error = shell.execute_cbstats( self.dest_cluster.get_bucket_by_name('default'), "checkpoint", print_results=False) for stat in output: if re.search("num_checkpoint_items:", stat): stats_count.append(int(stat.split(": ")[1])) if max(stats_count) > 0: total_successful_chkpts = max(stats_count) else: total_successful_chkpts = 0 self.log.info(total_successful_chkpts) chkpts, count = NodeHelper.check_goxdcr_log(node, "num_failedckpts", log_name="stats.log", print_matches=False, timeout=10) if count > 0: total_failed_chkpts = int( (chkpts[-1].split('num_failedckpts,')[1]).rstrip('},')) else: total_failed_chkpts = 0 return total_successful_chkpts + total_failed_chkpts, total_successful_chkpts, total_failed_chkpts
def __verify_dcp_priority(self, server, expected_priority): shell = RemoteMachineShellConnection(server) rest = RestConnection(server) match = False for bucket in rest.get_buckets(): repl = rest.get_replication_for_buckets(bucket.name, bucket.name) # Taking 10 samples of DCP priority ~5 seconds apart. # cbstats takes ~4 secs + 2 seconds sleep for sample in range(10): output, error = shell.execute_cbstats(bucket, "dcp", print_results=False) for stat in output: if re.search("eq_dcpq:xdcr:dcp_" + repl['id'] + ".*==:priority:", stat): actual_priority = stat.split("==:priority:")[1].lstrip() if actual_priority in expected_priority[bucket.name]: match = True self.log.info("Sample {0}:".format(sample + 1)) self.print_status(bucket.name, server.ip, "dcp priority", actual_priority, expected_priority[bucket.name], match=match) time.sleep(2)
def check(self): # check bucket compaction status across all nodes nodes = self.rest.get_nodes() current_compaction_count = {} for node in nodes: current_compaction_count[node.ip] = 0 s = TestInputServer() s.ip = node.ip s.ssh_username = self.server.ssh_username s.ssh_password = self.server.ssh_password shell = RemoteMachineShellConnection(s) res = shell.execute_cbstats("", "raw", keyname="kvtimings", vbid="") shell.disconnect() for i in res[0]: # check for lines that look like # rw_0:compact_131072,262144: 8 if 'compact' in i: current_compaction_count[node.ip] += int(i.split(':')[2]) if cmp(current_compaction_count, self.compaction_count) == 1: # compaction count has increased self.set_result(True) self.state = FINISHED else: if self.retries > 0: # retry self.retries = self.retries - 1 self.task_manager.schedule(self, 10) else: # never detected a compaction task running self.set_result(False) self.state = FINISHED