def setUp(self): super(EvictionDCP, self).setUp() self.dcp_client = DcpClient(self.master.ip, int(11210)) self.dcp_client.sasl_auth_plain(self.master.rest_username, self.master.rest_password) self.dcp_client.bucket_select('default') self.dcp_client.open_producer(name='eviction', delete_times=True) self.rest = RestConnection(self.servers[0]) self.client = MemcachedClientHelper.direct_client(self.master, 'default')
def initialise_cluster_connections(self): self.dcp_client = self.connect() config_json = json.loads(DcpClient.get_config(self.dcp_client)[2]) self.vb_map = config_json['vBucketServerMap']['vBucketMap'] self.dcp_client_dict = dict() if self.log_path: self.log_path = os.path.normpath(self.log_path) self.dcp_log_data = LogData(self.log_path, self.vbuckets, self.keep_logs) # TODO: Remove globals and restructure (possibly into a class) to allow for multiple # instances of the client (allowing multiple bucket connections) node_list = [] for index, server in enumerate( config_json['vBucketServerMap']['serverList']): host = self.check_valid_host( server.split(':')[0], 'Server Config Cluster Map') node_list.append(host) port = config_json['nodesExt'][index]['services'].get('kv') if port is not None: node = '{0}:{1}'.format(host, port) if 'thisNode' in config_json['nodesExt'][index]: self.dcp_client_dict[index] = { 'stream': self.dcp_client, 'node': node } else: self.dcp_client_dict[index] = { 'stream': self.dcp_client, 'node': node } return self.dcp_client
def handle_rollback(self, dcpStream): updated_dcpStreams = [] vb = dcpStream.vbucket requested_rollback_no = dcpStream.rollback_seqno # If argument to use JSON log files if self.failover_logging: log_fetch = self.dcp_log_data.get_failover_logs([vb]) if log_fetch.get(str(vb), None) is not None: # If the failover log is not empty, use it data = log_fetch[str(vb)] failover_values = sorted(data, key=lambda x: x[1], reverse=True) else: failover_fetch = DcpClient.get_failover_log( self.select_dcp_client(vb), vb) failover_values = failover_fetch.get('value') # Otherwise get failover log from server else: failover_fetch = DcpClient.get_failover_log( self.select_dcp_client(vb), vb) failover_values = failover_fetch.get('value') new_seq_no = [] new_uuid = [] failover_seq_num = 0 # Default values so that if they don't get set failover_vbucket_uuid = 0 # inside the for loop, 0 is used. for failover_log_entry in failover_values: if failover_log_entry[1] <= requested_rollback_no: failover_seq_num = failover_log_entry[ 1] # closest Seq number in log failover_vbucket_uuid = failover_log_entry[0] # and its UUID break new_seq_no.append(failover_seq_num) new_uuid.append(failover_vbucket_uuid) self.log.info( 'Retrying stream add on vb %s with seqs %s and uuids %s' % (vb, new_seq_no, new_uuid)) # NOTE: This can cause continuous rollbacks making client side recursive dependent on failover logs. self.log.info("check for recursive loop") return self.add_streams([vb], new_seq_no, self.end_seq_no, new_uuid, self.vb_retry, self.filter_file)
def client_helper(self, node, type_, vbucket): self.assertTrue(type_ in (MCD, DCP), msg="type not found") client = None ip = None port = None rest = RestConnection(node) if vbucket is not None: host = self.vbucket_host(rest, vbucket) ip = host.split(':')[0] port = int(host.split(':')[1]) else: client_node = rest.get_nodes_self() ip = client_node.hostname.split(':')[0] port = client_node.memcached if type_ == MCD: client = MemcachedClient(ip, port) else: client = DcpClient(ip, port) return client
def initiate_connection(self): self.host = self.cluster.master.ip self.port = int(11210) timeout = self.timeout dcp_client = DcpClient(self.host, self.port, timeout=timeout, do_auth=False) self.log.info("DCP client created") try: response = dcp_client.sasl_auth_plain( self.cluster.master.rest_username, self.cluster.master.rest_password) except MemcachedError as err: self.log.info("DCP connection failure") self.check_for_features(dcp_client) dcp_client.bucket_select(str(self.bucket.name)) self.log.info("Successfully AUTHed to %s" % self.bucket) name = "simple_dcp_client " + str(uuid.uuid4()) response = dcp_client.open_producer(name, xattr=self.xattrs, delete_times=self.delete_times, collections=self.collections) assert response['status'] == SUCCESS self.log.info("Opened DCP consumer connection") response = dcp_client.general_control("enable_noop", "true") assert response['status'] == SUCCESS self.log.info("Enabled NOOP") if self.noop_interval: noop_interval = str(self.noop_interval) response2 = dcp_client.general_control("set_noop_interval", noop_interval) assert response2['status'] == SUCCESS self.log.info("NOOP interval set to %s" % noop_interval) if self.opcode_dump: dcp_client.opcode_dump_control(True) if (self.compression > 1): response = dcp_client.general_control("force_value_compression", "true") assert response['status'] == SUCCESS self.log.info("Forcing compression on connection") if self.enable_expiry: response = dcp_client.general_control("enable_expiry_opcode", "true") assert response['status'] == SUCCESS self.log.info("Enabled Expiry Output") if self.enable_stream_id: response = dcp_client.general_control("enable_stream_id", "true") assert response['status'] == SUCCESS self.log.info("Enabled Stream-ID") return dcp_client
class EvictionDCP(EvictionBase, DCPBase): def setUp(self): super(EvictionDCP, self).setUp() self.dcp_client = DcpClient(self.master.ip, int(11210)) self.dcp_client.sasl_auth_plain(self.master.rest_username, self.master.rest_password) self.dcp_client.bucket_select('default') self.dcp_client.open_producer(name='eviction', delete_times=True) self.rest = RestConnection(self.servers[0]) self.client = MemcachedClientHelper.direct_client( self.master, 'default') def tearDown(self): super(EvictionDCP, self).tearDown() def test_stream_eviction(self): # eviction.evictionkv.EvictionDCP.test_stream_eviction,dgm_run=True,eviction_policy=fullEviction vbuckets = self.rest.get_vbuckets() doc_gen = BlobGenerator('dcpdata', 'dcpdata-', self.value_size, end=self.num_items) self._load_all_buckets(self.master, doc_gen, "create", 10) # sleep for 10 seconds time.sleep(10) # get the item count item_count = self.rest.get_bucket(self.buckets[0]).stats.itemCount self.assertEquals(item_count, self.num_items) expired_keys = [] # check if all the keys expired keys = [] for i in range(1000): keys.append("dcpdata" + str(i)) time.sleep(10) for key in keys: try: self.client.get(key=key) msg = "expiry was set to {0} but key: {1} did not expire after waiting for {2}+ seconds" self.log.info(msg.format(10, key, 10)) except mc_bin_client.MemcachedError as error: self.assertEquals(error.status, 1) for vb in vbuckets[0:self.vbuckets]: vbucket = vb.id vb_uuid, _, high_seqno = self.vb_info(self.servers[0], vbucket, bucket=self.buckets[0]) stream = self.dcp_client.stream_req(vbucket, 0, 0, high_seqno, vb_uuid) self.dcp_client.general_control("enable_expiry_opcode", "true") responses = stream.run() for i in responses: if i['opcode'] == constants.CMD_EXPIRATION: expired_keys.append(i['key']) item_count = self.rest.get_bucket(self.buckets[0]).stats.itemCount self.assertEquals(item_count, 0) check_values = set(keys).intersection( expired_keys) # check if any key is not expired self.assertEquals(len(check_values), self.num_items) def test_stream_deletioneviction(self): # eviction.evictionkv.EvictionDCP.test_stream_deletioneviction,dgm_run=True,eviction_policy=fullEviction # delete some keys and expire other keys vbuckets = self.rest.get_vbuckets() KEY_ROOT = 'dcpdata' doc_gen = BlobGenerator(KEY_ROOT, 'dcpdata-', self.value_size, end=self.num_items) self._load_all_buckets(self.master, doc_gen, "create", 40) time.sleep(10) # get the item count item_count = self.rest.get_bucket(self.buckets[0]).stats.itemCount self.assertEquals(item_count, self.num_items) keys = [] for i in range(1000): keys.append("dcpdata" + str(i)) # delete few keys keys_to_be_deleted = [] for i in range(100): int = random.randint(0, 1000) key = KEY_ROOT + str(int) try: self.client.delete(key) keys_to_be_deleted.append(key) keys.remove(key) except mc_bin_client.MemcachedError as error: self.assertEquals( error.status, 1) # if key is already deleted then ignore the error expired_keys = [] deleted_keys = [] time.sleep(40) # check if other the keys expired for key in keys: try: self.client.get(key=key) msg = "expiry was set to {0} but key: {1} did not expire after waiting for {2}+ seconds" self.log.info(msg.format(40, key, 40)) except mc_bin_client.MemcachedError as error: self.assertEquals(error.status, 1) for vb in vbuckets[0:self.vbuckets]: vbucket = vb.id vb_uuid, _, high_seqno = self.vb_info(self.servers[0], vbucket, bucket=self.buckets[0]) stream = self.dcp_client.stream_req(vbucket, 0, 0, high_seqno, vb_uuid) self.dcp_client.general_control("enable_expiry_opcode", "true") responses = stream.run() for i in responses: if i['opcode'] == constants.CMD_EXPIRATION: expired_keys.append(i['key']) elif i['opcode'] == constants.CMD_DELETION: deleted_keys.append(i['key']) item_count = self.rest.get_bucket(self.buckets[0]).stats.itemCount self.assertEquals(item_count, 0) self.assertEquals(len(keys_to_be_deleted), len(deleted_keys)) self.assertEquals(len(keys), len(expired_keys))