def main(): # parse env vars len(sys.argv) >= 3 or exit_with_help() make = sys.argv[1] ini = sys.argv[2] conf = sys.argv[3] verbose = get_verbosity() debug = get_debug() num_nodes = parse_ini_servers(ini) # setup cluster run nodes ns_clean(make, verbose) crm = CRManager(num_nodes) assert crm.start_nodes() # run test rc = run_test(ini, conf, verbose, debug) # done crm.stop_nodes() sys.exit(rc)
def __init__(self, args): super(DCPBase, self).__init__(args) self.is_setup = False self.crm = None self.input = TestInputSingleton.input self.use_cluster_run = self.input.param('dev', False) self.test = self.input.param('test', None) self.stopped_nodes = [] self.doc_num = 0 if self.use_cluster_run: num_nodes = self.input.param('num_nodes', 4) self.crm = CRManager(num_nodes, 0)
def __init__(self, args): super(BreakpadBase, self).__init__(args) self.is_setup = False self.crm = None self.input = TestInputSingleton.input self.use_cluster_run = self.input.param('dev', True) self.test = self.input.param('test', None) self.log_pollers = [] self.num_nodes = 0 self.doc_num = 0 if self.use_cluster_run: self.num_nodes = self.input.param('num_nodes', NS_NUM_NODES) self.crm = CRManager(self.num_nodes, 0)
class DCPBase(BaseTestCase): def __init__(self, args): super(DCPBase, self).__init__(args) self.is_setup = False self.crm = None self.input = TestInputSingleton.input self.use_cluster_run = self.input.param('dev', False) self.test = self.input.param('test', None) self.stopped_nodes = [] self.doc_num = 0 if self.use_cluster_run: num_nodes = self.input.param('num_nodes', 4) self.crm = CRManager(num_nodes, 0) def setUp(self): if self.test: if self.test != self._testMethodName: self.skipTest("disabled") self.is_setup = True if self.use_cluster_run: assert self.crm.clean() assert self.crm.start_nodes() time.sleep(5) super(DCPBase, self).setUp() self.is_setup = False def tearDown(self): for index in self.stopped_nodes: self.start_node(index) if self.use_cluster_run and not self.is_setup: assert self.crm.stop_nodes() self.cluster.shutdown(force=True) else: super(DCPBase, self).tearDown() def load_docs(self, node, vbucket, num_docs, bucket = 'default', password = '', exp = 0, flags = 0, update = False): """ using direct mcd client to control vbucket seqnos. keeps track of vbucket and keys stored """ mcd_client = self.mcd_client( node, vbucket, auth_user = bucket, auth_password = password) for i in range(num_docs): key = "key%s"%self.doc_num rc = mcd_client.set(key, exp, flags, "val", vbucket) if not update: self.doc_num += 1 def dcp_client( self, node, connection_type = PRODUCER, vbucket = None, name = None, auth_user = None, auth_password = ''): """ create an dcp client from Node spec and opens connnection of specified type""" client = self.client_helper(node, DCP, vbucket) if auth_user: client.sasl_auth_plain(auth_user, auth_password) assert connection_type in (PRODUCER, CONSUMER, NOTIFIER) name = name or DEFAULT_CONN_NAME if connection_type == PRODUCER: response = client.open_producer(name) if connection_type == CONSUMER: response = client.open_consumer(name) if connection_type == NOTIFIER: response = client.open_notifier(name) assert response['status'] == SUCCESS return client def mcd_client( self, node, vbucket = None, auth_user = None, auth_password = None): """ create a mcd client from Node spec """ client = self.client_helper(node, MCD, vbucket) if auth_user: client.sasl_auth_plain(auth_user, auth_password) return client def client_helper(self, node, type_, vbucket): assert type_ in (MCD, DCP) client = None ip = None port = None rest = RestConnection(node) if vbucket is not None: host = self.vbucket_host(rest, vbucket) ip = host.split(':')[0] port = int(host.split(':')[1]) else: client_node = rest.get_nodes_self() ip = client_node.ip port = client_node.memcached if type_ == MCD: client = MemcachedClient(ip, port) else: client = DcpClient(ip, port) return client def vbucket_host(self, rest, vbucket): info = rest.get_bucket_json() return info['vBucketServerMap']['serverList']\ [info['vBucketServerMap']['vBucketMap'][vbucket][0]] def vbucket_host_index(self, rest, vbucket): info = rest.get_bucket_json() host = self.vbucket_host(rest, vbucket) return info['vBucketServerMap']['serverList'].index(host) def flow_control_info(self, node, connection = None): connection = connection or DEFAULT_CONN_NAME mcd_client = self.mcd_client(node) stats = mcd_client.stats(DCP) acked = 'eq_dcpq:{0}:total_acked_bytes'.format(connection) unacked = 'eq_dcpq:{0}:unacked_bytes'.format(connection) sent = 'eq_dcpq:{0}:total_bytes_sent'.format(connection) return int(stats[acked]), int(stats[sent]), int(stats[unacked]) def all_vb_info(self, node, table_entry = 0, bucket = 'default', password = ''): vbInfoMap = {} clientVbMap = {} rest = RestConnection(node) vbuckets = rest.get_vbuckets() mcd_client = self.mcd_client( node, auth_user = bucket, auth_password = password) failoverStats = mcd_client.stats(FAILOVER_STAT) seqnoStats = mcd_client.stats(VBSEQNO_STAT) for vb in vbuckets: vbucket = vb.id id_key = 'vb_{0}:{1}:id'.format(vbucket, table_entry) seq_key = 'vb_{0}:{1}:seq'.format(vbucket, table_entry) hi_key = 'vb_{0}:high_seqno'.format(vbucket) vb_uuid, seqno, high_seqno =\ (long(failoverStats[id_key]), long(failoverStats[seq_key]), long(seqnoStats[hi_key])) vbInfoMap[vbucket] = (vb_uuid, seqno, high_seqno) return vbInfoMap def vb_info(self, node, vbucket, table_entry = 0, bucket = 'default', password = ''): vb_uuid, seqno = self.vb_failover_entry( node, vbucket, table_entry, bucket, password) high_seqno = self.vb_seqno( node, vbucket, bucket, password) return vb_uuid, seqno, high_seqno def vb_failover_entry(self, node, vbucket, table_entry = 0, bucket = 'default', password = ''): mcd_client = self.mcd_client( node, vbucket, auth_user = bucket, auth_password = password) stats = mcd_client.stats(FAILOVER_STAT) assert len(stats) > vbucket, ENO_STAT id_key = 'vb_{0}:{1}:id'.format(vbucket, table_entry) seq_key = 'vb_{0}:{1}:seq'.format(vbucket, table_entry) return long(stats[id_key]), long(stats[seq_key]) def vb_seqno(self, node, vbucket, bucket = 'default', password = ''): mcd_client = self.mcd_client( node, vbucket, auth_user = bucket, auth_password = password) stats = mcd_client.stats(VBSEQNO_STAT) assert len(stats) > vbucket, ENO_STAT id_key = 'vb_{0}:high_seqno'.format(vbucket) return long(stats[id_key]) def stop_node(self, index): status = False if self.use_cluster_run: status = self.crm.stop(index) elif len(self.servers) >= index: node = self.servers[index] shell = RemoteMachineShellConnection(node) shell.stop_couchbase() shell.disconnect() status = True return status def start_node(self, index): status = False if self.use_cluster_run: status = self.crm.start(index) elif len(self.servers) >= index: node = self.servers[index] shell = RemoteMachineShellConnection(node) shell.start_couchbase() shell.disconnect() status = True return status
class BreakpadBase(BaseTestCase): def __init__(self, args): super(BreakpadBase, self).__init__(args) self.is_setup = False self.crm = None self.input = TestInputSingleton.input self.use_cluster_run = self.input.param('dev', True) self.test = self.input.param('test', None) self.log_pollers = [] self.num_nodes = 0 self.doc_num = 0 if self.use_cluster_run: self.num_nodes = self.input.param('num_nodes', NS_NUM_NODES) self.crm = CRManager(self.num_nodes, 0) def setUp(self): if self.test: if self.test != self._testMethodName: self.skipTest("disabled") self.is_setup = True if self.use_cluster_run: assert self.crm.clean() assert self.crm.start_nodes() # poll logs until bucket ready on each node for i in range(self.num_nodes): logp = NSLogPoller(i) logp.start() logp.setNSStartedEventFlag(True) assert logp.getEventQItem() self.log_pollers.append(logp) super(BreakpadBase, self).setUp() self.is_setup = False def tearDown(self): if self.use_cluster_run and not self.is_setup: assert self.crm.stop_nodes() self.cluster.shutdown(force=True) # join polling threads which indicate node exited for i in range(self.num_nodes): self.log_pollers[i].join(30) else: super(BreakpadBase, self).tearDown() def load_docs(self, node, num_docs, bucket='default', password='', exp=0, flags=0): host = node.ip + ":" + node.port client = SDKClient(bucket="default", hosts=[host], scheme="http") for i in range(num_docs): key = "key%s" % i rc = client.upsert(key, "value") def kill_memcached(self, index, sig=6, wait=10): #NIX killed = False pid = self.mc_pid(index) if pid is None: return False # no pid to kill #kill p4 = subprocess.Popen(["kill", "-" + str(sig), pid], stdout=subprocess.PIPE, stderr=subprocess.PIPE) rc, err = p4.communicate() err = err.rstrip() if err == '': killed = True return killed def mc_pid(self, index): pid = None node = "n_" + str(index) p = subprocess.Popen(["pgrep", "-a", "memcached"], stdout=subprocess.PIPE) rv, err = p.communicate() if err is None: m = re.search('([0-9]+) .*' + node, rv) if m: pid = m.group(1) return pid def dmp_to_core(self, dmp_path): f_core = dmp_path + '.core' f = open(f_core, 'w') cmd = subprocess.Popen([MD_PATH + "/minidump-2-core", dmp_path], stdout=f) return f_core def verify_core(self, f_core): pc = subprocess.Popen(["file", f_core], stdout=subprocess.PIPE) f_info, err = pc.communicate() f_info = f_info.rstrip() return "core file" in f_info
class BreakpadBase(BaseTestCase): def __init__(self, args): super(BreakpadBase, self).__init__(args) self.is_setup = False self.crm = None self.input = TestInputSingleton.input self.use_cluster_run = self.input.param('dev', True) self.test = self.input.param('test', None) self.log_pollers = [] self.num_nodes = 0 self.doc_num = 0 if self.use_cluster_run: self.num_nodes = self.input.param('num_nodes', NS_NUM_NODES) self.crm = CRManager(self.num_nodes, 0) def setUp(self): if self.test: if self.test != self._testMethodName: self.skipTest("disabled") self.is_setup = True if self.use_cluster_run: assert self.crm.clean() assert self.crm.start_nodes() # poll logs until bucket ready on each node for i in range(self.num_nodes): logp = NSLogPoller(i) logp.start() logp.setNSStartedEventFlag(True) assert logp.getEventQItem() self.log_pollers.append(logp) super(BreakpadBase, self).setUp() self.is_setup = False def tearDown(self): if self.use_cluster_run and not self.is_setup: assert self.crm.stop_nodes() self.cluster.shutdown(force=True) # join polling threads which indicate node exited for i in range(self.num_nodes): self.log_pollers[i].join(30) else: super(BreakpadBase, self).tearDown() def load_docs(self, node, num_docs, bucket = 'default', password = '', exp = 0, flags = 0): host = node.ip+":"+node.port client = SDKClient(bucket = "default", hosts = [host], scheme = "http") for i in range(num_docs): key = "key%s"%i rc = client.upsert(key, "value") def kill_memcached(self, index, sig=6, wait=10): #NIX killed = False pid = self.mc_pid(index) if pid is None: return False# no pid to kill #kill p4 = subprocess.Popen(["kill", "-"+str(sig), pid], stdout=subprocess.PIPE, stderr=subprocess.PIPE) rc, err = p4.communicate() err=err.rstrip() if err == '': killed = True return killed def mc_pid(self, index): pid = None node = "n_"+str(index) p = subprocess.Popen(["pgrep", "-a", "memcached"], stdout=subprocess.PIPE) rv, err = p.communicate() if err is None: m = re.search('([0-9]+) .*'+node, rv) if m: pid = m.group(1) return pid def dmp_to_core(self, dmp_path): f_core = dmp_path+'.core' f = open(f_core, 'w') cmd = subprocess.Popen( [MD_PATH+"/minidump-2-core", dmp_path], stdout = f) return f_core def verify_core(self, f_core): pc = subprocess.Popen(["file", f_core], stdout=subprocess.PIPE) f_info , err = pc.communicate() f_info = f_info.rstrip() return "core file" in f_info
class DCPBase(BaseTestCase): def __init__(self, args): super(DCPBase, self).__init__(args) self.is_setup = False self.crm = None self.input = TestInputSingleton.input self.use_cluster_run = self.input.param('dev', False) self.test = self.input.param('test', None) self.stopped_nodes = [] self.doc_num = 0 if self.use_cluster_run: num_nodes = self.input.param('num_nodes', 4) self.crm = CRManager(num_nodes, 0) def setUp(self): if self.test: if self.test != self._testMethodName: self.skipTest("disabled") self.is_setup = True if self.use_cluster_run: assert self.crm.clean() assert self.crm.start_nodes() time.sleep(5) super(DCPBase, self).setUp() self.is_setup = False def tearDown(self): for index in self.stopped_nodes: self.start_node(index) if self.use_cluster_run and not self.is_setup: assert self.crm.stop_nodes() self.cluster.shutdown(force=True) else: super(DCPBase, self).tearDown() for server in self.servers: ClusterOperationHelper.cleanup_cluster(self.servers, master=server) def load_docs(self, node, vbucket, num_docs, bucket='default', password='', exp=0, flags=0, update=False): """ using direct mcd client to control vbucket seqnos. keeps track of vbucket and keys stored """ mcd_client = self.mcd_client(node, vbucket, auth_user=True) for i in range(num_docs): t = 0 while False or t < 5: key = "key%s" % self.doc_num try: mcd_client.set(key, exp, flags, "val", vbucket) break except MemcachedError: self.sleep(0.5 * t) t += 1 mcd_client = self.mcd_client(node, vbucket, auth_user=True) if not update: self.doc_num += 1 def dcp_client(self, node, connection_type=PRODUCER, vbucket=None, name=None, auth_user="******", auth_password="******", bucket_name="default"): """ create an dcp client from Node spec and opens connnection of specified type""" client = self.client_helper(node, DCP, vbucket) if auth_user: client.sasl_auth_plain(auth_user, auth_password) client.bucket_select(bucket_name) assert connection_type in (PRODUCER, CONSUMER, NOTIFIER) name = name or DEFAULT_CONN_NAME if connection_type == PRODUCER: response = client.open_producer(name) if connection_type == CONSUMER: response = client.open_consumer(name) if connection_type == NOTIFIER: response = client.open_notifier(name) assert response['status'] == SUCCESS return client def mcd_client(self, node, vbucket=None, auth_user=None, auth_password=None): """ create a mcd client from Node spec """ client = self.client_helper(node, MCD, vbucket) if auth_user: # admin_user='******',admin_pass='******' client.sasl_auth_plain('cbadminbucket', 'password') client.bucket_select('default') return client def client_helper(self, node, type_, vbucket): assert type_ in (MCD, DCP) client = None ip = None port = None rest = RestConnection(node) if vbucket is not None: host = self.vbucket_host(rest, vbucket) ip = host.split(':')[0] port = int(host.split(':')[1]) else: client_node = rest.get_nodes_self() ip = client_node.hostname.split(':')[0] port = client_node.memcached if type_ == MCD: client = MemcachedClient(ip, port) else: client = DcpClient(ip, port) return client def vbucket_host(self, rest, vbucket): info = rest.get_bucket_json() return info['vBucketServerMap']['serverList'] \ [info['vBucketServerMap']['vBucketMap'][vbucket][0]] def vbucket_host_index(self, rest, vbucket): info = rest.get_bucket_json() host = self.vbucket_host(rest, vbucket) return info['vBucketServerMap']['serverList'].index(host) def flow_control_info(self, node, connection=None): connection = connection or DEFAULT_CONN_NAME mcd_client = self.mcd_client(node) stats = mcd_client.stats(DCP) acked = 'eq_dcpq:{0}:total_acked_bytes'.format(connection) unacked = 'eq_dcpq:{0}:unacked_bytes'.format(connection) sent = 'eq_dcpq:{0}:total_bytes_sent'.format(connection) return int(stats[acked]), int(stats[sent]), int(stats[unacked]) def all_vb_info(self, node, table_entry=0, bucket='default', password=''): print '*****in all vbinfo' vbInfoMap = {} rest = RestConnection(node) vbuckets = rest.get_vbuckets() mcd_client = self.mcd_client(node, auth_user=bucket, auth_password=password) failoverStats = mcd_client.stats(FAILOVER_STAT) seqnoStats = mcd_client.stats(VBSEQNO_STAT) for vb in vbuckets: vbucket = vb.id id_key = 'vb_{0}:{1}:id'.format(vbucket, table_entry) seq_key = 'vb_{0}:{1}:seq'.format(vbucket, table_entry) hi_key = 'vb_{0}:high_seqno'.format(vbucket) vb_uuid, seqno, high_seqno = \ (long(failoverStats[id_key]), long(failoverStats[seq_key]), long(seqnoStats[hi_key])) vbInfoMap[vbucket] = (vb_uuid, seqno, high_seqno) return vbInfoMap def vb_info(self, node, vbucket, table_entry=0, bucket='default', password=''): vb_uuid, seqno = self.vb_failover_entry(node, vbucket, table_entry, bucket, password) high_seqno = self.vb_seqno(node, vbucket, bucket, password) return vb_uuid, seqno, high_seqno def vb_failover_entry(self, node, vbucket, table_entry=0, bucket='default', password=''): mcd_client = self.mcd_client(node, vbucket, auth_user=bucket, auth_password=password) stats = mcd_client.stats(FAILOVER_STAT) assert len(stats) > vbucket, ENO_STAT id_key = 'vb_{0}:{1}:id'.format(vbucket, table_entry) seq_key = 'vb_{0}:{1}:seq'.format(vbucket, table_entry) return long(stats[id_key]), long(stats[seq_key]) def vb_seqno(self, node, vbucket, bucket='default', password=''): mcd_client = self.mcd_client(node, vbucket, auth_user=bucket, auth_password=password) stats = mcd_client.stats(VBSEQNO_STAT) assert len(stats) > vbucket, ENO_STAT id_key = 'vb_{0}:high_seqno'.format(vbucket) return long(stats[id_key]) def stop_node(self, index): status = False if self.use_cluster_run: status = self.crm.stop(index) elif len(self.servers) >= index: node = self.servers[index] shell = RemoteMachineShellConnection(node) shell.stop_couchbase() shell.disconnect() status = True return status def start_node(self, index): status = False if self.use_cluster_run: status = self.crm.start(index) elif len(self.servers) >= index: node = self.servers[index] shell = RemoteMachineShellConnection(node) shell.start_couchbase() shell.disconnect() status = True return status