def test_MB_32114(self): try: from sdk_client import SDKClient except: from sdk_client3 import SDKClient import couchbase.subdocument as SD rest = RestConnection(self.master) client = VBucketAwareMemcached(rest, 'default') if self.maxttl: self._expiry_pager(self.master) sdk_client = SDKClient(scheme='couchbase', hosts=[self.master.ip], bucket='default') KEY_NAME = 'key1' for i in range(1000): mcd = client.memcached(KEY_NAME + str(i)) rc = mcd.set(KEY_NAME + str(i), 0, 0, json.dumps({'value': 'value2'})) sdk_client.mutate_in( KEY_NAME + str(i), SD.upsert("subdoc_key", "subdoc_val", xattr=True, create_parents=True)) # wait for it to persist persisted = 0 while persisted == 0: opaque, rep_time, persist_time, persisted, cas = client.observe( KEY_NAME + str(i)) start_time = time.time() self._load_doc_data_all_buckets(batch_size=1000) end_time = time.time() for i in range(1000): try: mcd = client.memcached(KEY_NAME + str(i)) _, flags, exp, seqno, cas = client.memcached( KEY_NAME + str(i)).getMeta(KEY_NAME + str(i)) rc = mcd.del_with_meta(KEY_NAME + str(i), 0, 0, 2, cas + 1) except MemcachedError as exp: self.fail("Exception with del_with meta - {0}".format(exp)) self.cluster.compact_bucket(self.master, "default") if self.maxttl: time_to_sleep = (self.maxttl - (end_time - start_time)) + 20 self.sleep(int(time_to_sleep)) else: self.sleep(60) active_bucket_items = rest.get_active_key_count("default") replica_bucket_items = rest.get_replica_key_count("default") print('active_bucket_items ', active_bucket_items) print('replica_bucket_items ', replica_bucket_items) if active_bucket_items * self.num_replicas != replica_bucket_items: self.fail("Mismatch in data !!!")
def do_basic_ops(self): KEY_NAME = 'key1' KEY_NAME2 = 'key2' CAS = 1234 self.log.info('Starting basic ops') rest = RestConnection(self.master) client = VBucketAwareMemcached(rest, 'default') mcd = client.memcached(KEY_NAME) # MB-17231 - incr with full eviction rc = mcd.incr(KEY_NAME, 1) print 'rc for incr', rc # MB-17289 del with meta rc = mcd.set(KEY_NAME, 0,0, json.dumps({'value':'value2'})) print 'set is', rc cas = rc[1] # wait for it to persist persisted = 0 while persisted == 0: opaque, rep_time, persist_time, persisted, cas = client.observe(KEY_NAME) try: rc = mcd.evict_key(KEY_NAME) except MemcachedError as exp: self.fail("Exception with evict meta - {0}".format(exp) ) CAS = 0xabcd # key, value, exp, flags, seqno, remote_cas try: #key, exp, flags, seqno, cas rc = mcd.del_with_meta(KEY_NAME2, 0, 0, 2, CAS) except MemcachedError as exp: self.fail("Exception with del_with meta - {0}".format(exp) )
def test_MB_36087(self): try: from sdk_client import SDKClient except: from sdk_client3 import SDKClient import couchbase.subdocument as SD g_key = "test_doc" bucket_name = "default" sdk_client = SDKClient(scheme='couchbase', hosts=[self.master.ip], bucket=bucket_name) rest = RestConnection(self.master) client = VBucketAwareMemcached(rest, bucket_name) for i in range(self.num_items): key = g_key + str(i) mcd = client.memcached(key) rc = mcd.set(key, 0, 0, json.dumps({'value': 'value2'})) sdk_client.mutate_in( key, SD.upsert("subdoc_key", "subdoc_val", xattr=True, create_parents=True)) # Wait for key to persist persisted = 0 while persisted == 0: opaque, rep_time, persist_time, persisted, cas = \ client.observe(key) time.sleep(10) # Evict the key try: rc = mcd.evict_key(key) except MemcachedError as exp: self.fail("Exception with evict meta - %s" % exp) # Perform del_with_meta try: mcd = client.memcached(key) _, flags, exp, seqno, cas = client.memcached(key).getMeta(key) rc = mcd.del_with_meta(key, 0, 0, 2, cas + 1) except MemcachedError as exp: self.fail("Exception with del_with meta - {0}".format(exp))
def do_basic_ops(self): KEY_NAME = 'key1' KEY_NAME2 = 'key2' CAS = 1234 self.log.info('Starting basic ops') rest = RestConnection(self.master) client = VBucketAwareMemcached(rest, 'default') mcd = client.memcached(KEY_NAME) # MB-17231 - incr with full eviction rc = mcd.incr(KEY_NAME, 1) print('rc for incr', rc) # MB-17289 del with meta rc = mcd.set(KEY_NAME, 0, 0, json.dumps({'value': 'value2'})) print('set is', rc) cas = rc[1] # wait for it to persist persisted = 0 while persisted == 0: opaque, rep_time, persist_time, persisted, cas = client.observe( KEY_NAME) time.sleep(10) try: rc = mcd.evict_key(KEY_NAME) except MemcachedError as exp: self.fail("Exception with evict meta - {0}".format(exp)) CAS = 0xabcd # key, value, exp, flags, seqno, remote_cas try: #key, exp, flags, seqno, cas rc = mcd.del_with_meta(KEY_NAME2, 0, 0, 2, CAS) except MemcachedError as exp: self.fail("Exception with del_with meta - {0}".format(exp))
def _run_observe(self): tasks = [] query_set = "true" persisted = 0 mutated = False count = 0 for bucket in self.buckets: self.cluster.create_view(self.master, self.default_design_doc, self.default_view, bucket , self.wait_timeout * 2) client = VBucketAwareMemcached(RestConnection(self.master), bucket) self.max_time = timedelta(microseconds=0) if self.mutate_by == "multi_set": key_val = self._create_multi_set_batch() client.setMulti(0, 0, key_val) keys = ["observe%s" % (i) for i in xrange(self.num_items)] for key in keys: mutated = False while not mutated and count < 60: try: if self.mutate_by == "set": # client.memcached(key).set(key, 0, 0, "set") client.set(key, 0, 0, "setvalue") elif self.mutate_by == "append": client.memcached(key).append(key, "append") elif self.mutate_by == "prepend" : client.memcached(key).prepend(key, "prepend") elif self.mutate_by == "incr": client.memcached(key).incr(key, 1) elif self.mutate_by == "decr": client.memcached(key).decr(key) mutated = True t_start = datetime.now() except MemcachedError as error: if error.status == 134: loaded = False self.log.error("Memcached error 134, wait for 5 seconds and then try again") count += 1 time.sleep(5) while persisted == 0: opaque, rep_time, persist_time, persisted, cas = client.observe(key) t_end = datetime.now() self.log.info("##########key:-%s################" % (key)) self.log.info("Persisted:- %s" % (persisted)) self.log.info("Persist_Time:- %s" % (rep_time)) self.log.info("Time2:- %s" % (t_end - t_start)) if self.max_time <= (t_end - t_start): self.max_time = (t_end - t_start) self.log.info("Max Time taken for observe is :- %s" % self.max_time) self.log.info("Cas Value:- %s" % (cas)) query = {"stale" : "false", "full_set" : "true", "connection_timeout" : 60000} self.cluster.query_view(self.master, "dev_Doc1", self.default_view.name, query, self.num_items, bucket, timeout=self.wait_timeout) self.log.info("Observe Validation:- view: %s in design doc dev_Doc1 and in bucket %s" % (self.default_view, bucket)) # check whether observe has to run with delete and delete parallel with observe or not if len (self.observe_with) > 0 : if self.observe_with == "delete" : self.log.info("Deleting 0- %s number of items" % (self.num_items / 2)) self._load_doc_data_all_buckets('delete', 0, self.num_items / 2) query_set = "true" elif self.observe_with == "delete_parallel": self.log.info("Deleting Parallel 0- %s number of items" % (self.num_items / 2)) tasks = self._async_load_doc_data_all_buckets('delete', 0, self.num_items / 2) query_set = "false" for key in keys: opaque, rep_time, persist_time, persisted, cas = client.memcached(key).observe(key) self.log.info("##########key:-%s################" % (key)) self.log.info("Persisted:- %s" % (persisted)) if self.observe_with == "delete_parallel": for task in tasks: task.result() query = {"stale" : "false", "full_set" : query_set, "connection_timeout" : 60000} self.cluster.query_view(self.master, "dev_Doc1", self.default_view.name, query, self.num_items / 2, bucket, timeout=self.wait_timeout) self.log.info("Observe Validation:- view: %s in design doc dev_Doc1 and in bucket %s" % (self.default_view, self.default_bucket_name)) """test_observe_basic_data_load_delete will test observer basic scenario
def _run_observe(self): tasks = [] query_set = "true" persisted = 0 mutated = False count = 0 for bucket in self.buckets: self.cluster.create_view(self.master, self.default_design_doc, self.default_view, bucket, self.wait_timeout * 2) client = VBucketAwareMemcached(RestConnection(self.master), bucket) self.max_time = timedelta(microseconds=0) if self.mutate_by == "multi_set": key_val = self._create_multi_set_batch() client.setMulti(0, 0, key_val) keys = ["observe%s" % (i) for i in range(self.num_items)] for key in keys: mutated = False while not mutated and count < 60: try: if self.mutate_by == "set": # client.memcached(key).set(key, 0, 0, "set") client.set(key, 0, 0, "setvalue") elif self.mutate_by == "append": client.memcached(key).append(key, "append") elif self.mutate_by == "prepend": client.memcached(key).prepend(key, "prepend") elif self.mutate_by == "incr": client.memcached(key).incr(key, 1) elif self.mutate_by == "decr": client.memcached(key).decr(key) mutated = True t_start = datetime.now() except MemcachedError as error: if error.status == 134: loaded = False self.log.error( "Memcached error 134, wait for 5 seconds and then try again" ) count += 1 time.sleep(5) while persisted == 0: opaque, rep_time, persist_time, persisted, cas = client.observe( key) t_end = datetime.now() #self.log.info("##########key:-%s################" % (key)) #self.log.info("Persisted:- %s" % (persisted)) #self.log.info("Persist_Time:- %s" % (rep_time)) #self.log.info("Time2:- %s" % (t_end - t_start)) if self.max_time <= (t_end - t_start): self.max_time = (t_end - t_start) self.log.info("Max Time taken for observe is :- %s" % self.max_time) self.log.info("Cas Value:- %s" % (cas)) query = { "stale": "false", "full_set": "true", "connection_timeout": 600000 } self.cluster.query_view(self.master, "dev_Doc1", self.default_view.name, query, self.num_items, bucket, timeout=self.wait_timeout) self.log.info( "Observe Validation:- view: %s in design doc dev_Doc1 and in bucket %s" % (self.default_view, bucket)) # check whether observe has to run with delete and delete parallel with observe or not if len(self.observe_with) > 0: if self.observe_with == "delete": self.log.info("Deleting 0- %s number of items" % (self.num_items // 2)) self._load_doc_data_all_buckets('delete', 0, self.num_items // 2) query_set = "true" elif self.observe_with == "delete_parallel": self.log.info("Deleting Parallel 0- %s number of items" % (self.num_items // 2)) tasks = self._async_load_doc_data_all_buckets( 'delete', 0, self.num_items // 2) query_set = "false" for key in keys: opaque, rep_time, persist_time, persisted, cas = client.memcached( key).observe(key) self.log.info("##########key:-%s################" % (key)) self.log.info("Persisted:- %s" % (persisted)) if self.observe_with == "delete_parallel": for task in tasks: task.result() query = { "stale": "false", "full_set": query_set, "connection_timeout": 600000 } self.cluster.query_view(self.master, "dev_Doc1", self.default_view.name, query, self.num_items // 2, bucket, timeout=self.wait_timeout) self.log.info( "Observe Validation:- view: %s in design doc dev_Doc1 and in bucket %s" % (self.default_view, self.default_bucket_name)) """test_observe_basic_data_load_delete will test observer basic scenario