Esempio n. 1
0
    def test_doc_key_size(self):
        """
        Insert document key with min and max key size on each available
        collection and validate
        :return:
        """
        min_doc_gen = doc_generator("test_min_key_size", 0, self.num_items,
                                    key_size=1,
                                    doc_size=self.doc_size,
                                    mix_key_size=False,
                                    randomize_doc_size=False)
        max_doc_gen = doc_generator("test_max_key_size", 0, self.num_items,
                                    key_size=245,
                                    doc_size=self.doc_size,
                                    mix_key_size=False,
                                    randomize_doc_size=False)
        # Set to keep track of all inserted CAS values
        # Format know_cas[CAS] = list(vb_lists)
        known_cas = dict()

        # Client to insert docs under different collections
        client = SDKClient([self.cluster.master], self.bucket,
                           compression_settings=self.sdk_compression)

        for doc_gen in [min_doc_gen, max_doc_gen]:
            while doc_gen.has_next():
                key, value = doc_gen.next()
                for _, scope in self.bucket.scopes.items():
                    for _, collection in scope.collections.items():
                        client.select_collection(scope.name, collection.name)
                        result = client.crud("create", key, value, self.maxttl,
                                             durability=self.durability_level,
                                             timeout=self.sdk_timeout,
                                             time_unit="seconds")
                        if result["status"] is False:
                            self.log_failure("Doc create failed for key '%s' "
                                             "collection::scope %s::%s - %s"
                                             % (key,
                                                scope.name,
                                                collection.name,
                                                result))
                        else:
                            self.__validate_cas_for_key(key, result, known_cas)
                            collection.num_items += 1
        # Close SDK connection
        client.close()

        # Validate doc count as per bucket collections
        self.bucket_util.validate_docs_per_collections_all_buckets()
        self.validate_test_failure()
Esempio n. 2
0
    def __init__(self,
                 cluster,
                 bucket_util,
                 num_idx=10,
                 server_port=8095,
                 querycount=100,
                 batch_size=50):
        self.port = server_port
        self.failed_count = 0
        self.success_count = 0
        self.rejected_count = 0
        self.error_count = 0
        self.cancel_count = 0
        self.timeout_count = 0
        self.total_query_count = 0
        self.concurrent_batch_size = batch_size
        self.total_count = querycount
        self.num_indexes = num_idx
        self.bucket_util = bucket_util
        self.cluster = cluster

        self.sdkClient = SDKClient(cluster.query_nodes, None)
        self.cluster_conn = self.sdkClient.cluster
        self.stop_run = False
        self.queries = list()
        self.indexes = dict()
        i = 0
        while i < self.num_indexes:
            for b in self.cluster.buckets:
                for s in self.bucket_util.get_active_scopes(b,
                                                            only_names=True):
                    for c in sorted(
                            self.bucket_util.get_active_collections(
                                b, s, only_names=True)):
                        self.idx_q = indexes[i % len(indexes)].format(
                            "idx", i, b.name, s, c)
                        self.indexes.update(
                            {"idx" + str(i): (self.idx_q, b.name, s, c)})
                        self.queries.append(queries[i % len(indexes)].format(
                            b.name, s, c))
                        i += 1
                        if i >= self.num_indexes:
                            break
                    if i >= self.num_indexes:
                        break
                if i >= self.num_indexes:
                    break
Esempio n. 3
0
 def _direct_client(self, server, bucket, timeout=30):
     # CREATE SDK CLIENT
     if self.sdk_client_type == "java":
         try:
             from sdk_client3 import SDKClient
             scheme = "couchbase"
             host = self.cluster.master.ip
             if self.cluster.master.ip == "127.0.0.1":
                 scheme = "http"
                 host = "{0}:{1}".format(self.cluster.master.ip,
                                         self.cluster.master.port)
             return SDKClient(scheme=scheme,
                              hosts=[host],
                              bucket=bucket,
                              password=self.cluster.master.rest_password)
         except Exception, ex:
             self.log.error(
                 "cannot load sdk client due to error {0}".format(str(ex)))
Esempio n. 4
0
    def key_not_exists_test(self):
        client = SDKClient(self.rest, self.bucket)
        load_gen = doc_generator(self.key, 0, 1, doc_size=256)
        key, val = load_gen.next()

        for _ in range(1500):
            result = client.crud("create",
                                 key,
                                 val,
                                 durability=self.durability_level,
                                 timeout=self.sdk_timeout)
            if result["status"] is False:
                self.log_failure("Create failed: %s" % result)
            create_cas = result["cas"]

            # Delete and verify get fails
            result = client.crud("delete",
                                 key,
                                 durability=self.durability_level,
                                 timeout=self.sdk_timeout)
            if result["status"] is False:
                self.log_failure("Delete failed: %s" % result)
            elif result["cas"] <= create_cas:
                self.log_failure("Delete returned invalid cas: %s" % result)

            result = client.crud("read", key, timeout=self.sdk_timeout)
            if result["status"] is True:
                self.log_failure("Read succeeded after delete: %s" % result)
            elif DurableExceptions.KeyNotFoundException \
                    not in str(result["error"]):
                self.log_failure("Invalid exception during read "
                                 "for non-exists key: %s" % result)

            # cas errors do not sleep the test for 10 seconds,
            # plus we need to check that the correct error is being thrown
            result = client.crud("replace",
                                 key,
                                 val,
                                 exp=60,
                                 timeout=self.sdk_timeout,
                                 cas=create_cas)
            if result["status"] is True:
                self.log_failure("Replace succeeded after delete: %s" % result)
            if DurableExceptions.KeyNotFoundException \
                    not in str(result["error"]):
                self.log_failure("Invalid exception during read "
                                 "for non-exists key: %s" % result)

            self.validate_test_failure()
Esempio n. 5
0
    def setUp(self):
        super(basic_ops, self).setUp()
        self.test_log = logging.getLogger("test")
        self.key = 'test_docs'.rjust(self.key_size, '0')

        nodes_init = self.cluster.servers[1:self.nodes_init] if self.nodes_init != 1 else []
        self.task.rebalance([self.cluster.master], nodes_init, [])
        self.cluster.nodes_in_cluster.extend([self.cluster.master] + nodes_init)
        self.bucket_util.add_rbac_user()
        
        if self.default_bucket:
            self.bucket_util.create_default_bucket(replica=self.num_replicas,
                                               compression_mode=self.compression_mode, ram_quota=100)
           
        time.sleep(10)
        self.def_bucket= self.bucket_util.get_all_buckets()
        self.client = VBucketAwareMemcached(RestConnection(self.cluster.master), self.def_bucket[0])
        self.__durability_level()
        self.create_Transaction()
        self._stop = threading.Event()
        self.log.info("==========Finished Basic_ops base setup========")
Esempio n. 6
0
    def corrupt_cas_is_healed_on_reboot(self):
        self.log.info('Start corrupt_cas_is_healed_on_reboot')

        KEY_NAME = 'key1'

        rest = RestConnection(self.master)
        client = SDKClient(rest, 'default')

        # set a key
        client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,
                                       json.dumps({'value': 'value1'}))

        # figure out which node it is on
        mc_active = client.memcached(KEY_NAME)

        # set the CAS to -2 and then mutate to increment to -1
        # and then it should stop there
        self._corrupt_max_cas(mc_active, KEY_NAME)

        # print 'max cas k2', mc_active.getMeta('k2')[4]

        # CAS should be 0 now, do some gets and sets to verify
        # that nothing bad happens
        # self._restart_memcache('default')
        remote = RemoteMachineShellConnection(self.master)
        remote.stop_server()
        time.sleep(30)
        remote.start_server()
        time.sleep(30)

        rest = RestConnection(self.master)
        client = SDKClient(rest, 'default')
        mc_active = client.memcached(KEY_NAME)

        maxCas = mc_active.getMeta(KEY_NAME)[4]
        self.assertTrue(maxCas == 0,
                        'max cas after reboot is {0} != 0'.format(maxCas))
Esempio n. 7
0
    def setUp(self):
        super(basic_ops, self).setUp()
        self.test_log = logging.getLogger("test")
        nodes_init = self.cluster.servers[
            1:self.nodes_init] if self.nodes_init != 1 else []
        self.task.rebalance([self.cluster.master], nodes_init, [])
        self.cluster.nodes_in_cluster.extend([self.cluster.master] +
                                             nodes_init)
        self.bucket_util.add_rbac_user()

        if self.default_bucket:
            self.bucket_util.create_default_bucket(
                replica=self.num_replicas,
                compression_mode=self.compression_mode,
                ram_quota=100,
                bucket_type=self.bucket_type)

        time.sleep(10)
        self.def_bucket = self.bucket_util.get_all_buckets()
        self.client = VBucketAwareMemcached(
            RestConnection(self.cluster.master), self.def_bucket[0])
        self.__durability_level()

        self.operation = self.input.param("operation", "afterAtrPending")
        # create load
        self.value = {'value': 'value1'}
        self.content = self.client.translate_to_json_object(self.value)

        self.docs = []
        self.keys = []
        for i in range(self.num_items):
            key = "test_docs-" + str(i)
            doc = Tuples.of(key, self.content)
            self.keys.append(key)
            self.docs.append(doc)

        self.transaction_config = Transaction().createTransactionConfig(
            self.transaction_timeout, self.durability)
        self.log.info("==========Finished Basic_ops base setup========")
Esempio n. 8
0
    def basic_concurrency(self):
        self.crash = self.input.param("crash", False)
        
        self.doc_gen(self.num_items)

        # run transaction
        thread = threading.Thread(target=self.__thread_to_transaction, args=(self.transaction, "create", self.docs, self.transaction_commit, self.update_count, True, False))
        thread.start()
        self.sleep(1)
        
        if self.crash:
            self.client.cluster.shutdown() 
            self.transaction.close()
            print "going to create a new transaction"
            self.client1 = VBucketAwareMemcached(RestConnection(self.cluster.master), self.def_bucket[0])
            self.create_Transaction(self.client1)
            self.sleep(self.transaction_timeout+60)
            exception = Transaction().RunTransaction(self.transaction, [self.client1.collection], self.docs, [], [], self.transaction_commit, self.sync, self.update_count)
            if exception:
                time.sleep(60)
                
            self.verify_doc(self.num_items, self.client1)
            self.client1.close() 

        else:
            key = "test_docs-0"
            # insert will fail
            result = self.client.insert(key, "value")
            self.assertEqual(result["status"], False)
            
            # Update should pass
            result = self.client.upsert(key,"value")
            self.assertEqual(result["status"], True) 
            
            # delete should pass
            result = self.client.delete(key)
            self.assertEqual(result["status"], True) 
        
        thread.join()
Esempio n. 9
0
 def perform_create_deletes():
     index = 0
     client = SDKClient([self.cluster.master], self.cluster.buckets[0])
     self.log.info("Starting ops to create tomb_stones")
     while not self.stop_thread:
         key = "temp_key--%s" % index
         result = client.crud("create", key, "")
         if result["status"] is False:
             self.log_failure("Key %s create failed: %s" %
                              (key, result))
             break
         result = client.crud("delete", key)
         if result["status"] is False:
             self.log_failure("Key %s delete failed: %s" %
                              (key, result))
             break
         index += 1
     client.close()
     self.log.info("Total keys deleted: %s" % index)
Esempio n. 10
0
    def MB36948(self):
        node_to_stop = self.servers[0]
        self.log.info("Adding index/query node")
        self.task.rebalance([self.cluster.master], [self.servers[2]], [],
                            services=["n1ql,index"])
        self.log.info("Creating SDK client connection")
        client = SDKClient([self.cluster.master],
                           self.bucket_util.buckets[0],
                           compression_settings=self.sdk_compression)

        self.log.info("Stopping memcached on: %s" % node_to_stop)
        ssh_conn = RemoteMachineShellConnection(node_to_stop)
        err_sim = CouchbaseError(self.log, ssh_conn)
        err_sim.create(CouchbaseError.STOP_MEMCACHED)

        result = client.crud("create", "abort1", "abort1_val")
        if not result["status"]:
            self.log_failure("Async SET failed")

        result = client.crud("update",
                             "abort1",
                             "abort1_val",
                             durability=self.durability_level,
                             timeout=3,
                             time_unit="seconds")
        if result["status"]:
            self.log_failure("Sync write succeeded")
        if SDKException.DurabilityAmbiguousException not in result["error"]:
            self.log_failure("Invalid exception for sync_write: %s" % result)

        self.log.info("Resuming memcached on: %s" % node_to_stop)
        err_sim.revert(CouchbaseError.STOP_MEMCACHED)

        self.bucket_util._wait_for_stats_all_buckets()
        self.bucket_util.verify_stats_all_buckets(1)

        self.log.info("Closing ssh & SDK connections")
        ssh_conn.disconnect()
        client.close()

        self.validate_test_failure()
Esempio n. 11
0
    def do_basic_ops(self):
        KEY_NAME = 'key1'
        KEY_NAME2 = 'key2'
        self.log.info('Starting basic ops')

        default_bucket = self.bucket_util.get_all_buckets()[0]
        sdk_client = SDKClient([self.cluster.master],
                               default_bucket,
                               compression_settings=self.sdk_compression)
        # mcd = client.memcached(KEY_NAME)

        # MB-17231 - incr with full eviction
        rc = sdk_client.incr(KEY_NAME, delta=1)
        self.log.info('rc for incr: {0}'.format(rc))

        # MB-17289 del with meta
        rc = sdk_client.set(KEY_NAME, 0, 0, json.dumps({'value': 'value2'}))
        self.log.info('set is: {0}'.format(rc))
        # cas = rc[1]

        # wait for it to persist
        persisted = 0
        while persisted == 0:
            opaque, rep_time, persist_time, persisted, cas = \
                sdk_client.observe(KEY_NAME)

        try:
            rc = sdk_client.evict_key(KEY_NAME)
        except MemcachedError as exp:
            self.fail("Exception with evict meta - {0}".format(exp))

        CAS = 0xabcd
        try:
            # key, exp, flags, seqno, cas
            rc = mcd.del_with_meta(KEY_NAME2, 0, 0, 2, CAS)
        except MemcachedError as exp:
            self.fail("Exception with del_with meta - {0}".format(exp))
Esempio n. 12
0
class basic_ops(BaseTestCase):
    def setUp(self):
        super(basic_ops, self).setUp()
        self.test_log = logging.getLogger("test")
        self.key = 'test_docs'.rjust(self.key_size, '0')

        nodes_init = self.cluster.servers[1:self.nodes_init] if self.nodes_init != 1 else []
        self.task.rebalance([self.cluster.master], nodes_init, [])
        self.cluster.nodes_in_cluster.extend([self.cluster.master] + nodes_init)
        self.bucket_util.add_rbac_user()
        
        if self.default_bucket:
            self.bucket_util.create_default_bucket(replica=self.num_replicas,
                                               compression_mode=self.compression_mode, ram_quota=100)
           
        time.sleep(10)
        self.def_bucket= self.bucket_util.get_all_buckets()
        self.client = VBucketAwareMemcached(RestConnection(self.cluster.master), self.def_bucket[0])
        self.__durability_level()
        self.create_Transaction()
        self._stop = threading.Event()
        self.log.info("==========Finished Basic_ops base setup========")

    def tearDown(self):
        self.client.close()
        super(basic_ops, self).tearDown()
        
    def __durability_level(self):
        if self.durability_level == "MAJORITY":
            self.durability = 1
        elif self.durability_level == "MAJORITY_AND_PERSIST_ON_MASTER":
            self.durability = 2
        elif self.durability_level == "PERSIST_TO_MAJORITY":
            self.durability = 3
        elif self.durability_level == "ONLY_NONE":
            self.durability = 4
        else:
            self.durability = 0

    def get_doc_generator(self, start, end):
        age = range(5)
        first = ['james', 'sharon']
        body = [''.rjust(self.doc_size - 10, 'a')]
        template = '{{ "age": {0}, "first_name": "{1}", "body": "{2}"}}'
        generator = DocumentGenerator(self.key, template, age, first, body, start=start,
                                      end=end)
        return generator
    
    def set_exception(self, exception):
        self.exception = exception
        raise BaseException("Got an exception {}".format(self.exception))
        
    def __chunks(self, l, n):
        """Yield successive n-sized chunks from l."""
        for i in range(0, len(l), n):
            yield l[i:i + n]
            
    def create_Transaction(self, client=None):
        if not client:
            client = self.client
        transaction_config = Transaction().createTransactionConfig(self.transaction_timeout, self.durability)
        try:
            self.transaction = Transaction().createTansaction(client.cluster, transaction_config)
        except Exception as e:
            self.set_exception(e)
        
    def __thread_to_transaction(self, transaction, op_type, doc, txn_commit, update_count=1, sync=True, set_exception=True, client=None):
        if not client:
           client = self.client 
        if op_type == "create":
            exception = Transaction().RunTransaction(transaction, [client.collection], doc, [], [], txn_commit, sync, update_count)
        elif op_type == "update":
            self.test_log.info("updating all the keys through threads")
            exception = Transaction().RunTransaction(transaction, [client.collection], [], doc, [], txn_commit, sync, update_count)
        elif op_type == "delete":
            exception = Transaction().RunTransaction(transaction, [client.collection], [], [], doc, txn_commit, sync, update_count)
        if set_exception:
            if exception:
                self.set_exception("Failed")
 
        
    def doc_gen(self, num_items, start=0, value={'value':'value1'}):
        self.docs = []
        self.keys = []
        self.content = self.client.translate_to_json_object(value)
        for i in range(start, self.num_items):
            key = "test_docs-" + str(i)
            doc = Tuples.of(key, self.content)
            self.keys.append(key)
            self.docs.append(doc)
            
    def verify_doc(self, num_items, client):
        for i in range(num_items):
            key = "test_docs-" + str(i)
            result = client.read(key)
            actual_val = self.client.translate_to_json_object(result['value'])
            self.assertEquals(self.content, actual_val)
        
        
    def test_MultiThreadTxnLoad(self):
        # Atomicity.basic_retry.basic_ops.test_MultiThreadTxnLoad,num_items=1000
        ''' Load data through txn, update half the items through different threads 
        and delete half the items through different threads. if update_retry then update and delete 
        the same key in two different transaction and make sure update fails '''
        
        self.num_txn = self.input.param("num_txn", 9)
        self.update_retry = self.input.param("update_retry", False)
        
        self.doc_gen(self.num_items)
        threads = []
         
        # create the docs   
        exception = Transaction().RunTransaction(self.transaction, [self.client.collection], self.docs, [], [], self.transaction_commit, True, self.update_count)
        if exception:
            self.set_exception("Failed")
            
        if self.update_retry:
            threads.append(threading.Thread(target=self.__thread_to_transaction, args=(self.transaction, "delete", self.keys, self.transaction_commit, self.update_count)))
            threads.append(threading.Thread(target=self.__thread_to_transaction, args=(self.transaction, "update", self.keys, self.transaction_commit, self.update_count)))    
        
        else:
            update_docs = self.__chunks(self.keys[:self.num_items/2], self.num_txn)    
            delete_docs = self.__chunks(self.keys[self.num_items/2:], self.num_txn)
                
            for keys in update_docs:
                threads.append(threading.Thread(target=self.__thread_to_transaction, args=(self.transaction, "update", keys, self.transaction_commit, self.update_count)))
            
            for keys in delete_docs:
                threads.append(threading.Thread(target=self.__thread_to_transaction, args=(self.transaction, "delete", keys, self.transaction_commit, self.update_count)))
        
        for thread in threads:
            thread.start()
            
        for thread in threads:
            thread.join() 
        
        self.sleep(60)
        if self.update_retry: 
            for key in self.keys:
                result = self.client.read(key)
                self.assertEquals(result['status'], False)
                
        else:   
            self.value = {'mutated':1, 'value':'value1'}
            self.content = self.client.translate_to_json_object(self.value)
                        
            self.verify_doc(self.num_items/2, self.client)
                
            for key in self.keys[self.num_items/2:]:
                result = self.client.read(key)
                self.assertEquals(result['status'], False)
                       
 
    def test_basic_retry(self):
        ''' Load set of data to the cluster, update through 2 different threads, make sure transaction maintains the order of update'''
        self.write_conflict = self.input.param("write_conflict", 2)
        
        self.test_log.info("going to create and execute the task")
        self.gen_create = self.get_doc_generator(0, self.num_items)
        task = self.task.async_load_gen_docs_atomicity(self.cluster, self.def_bucket,
                                             self.gen_create, "create" , exp=0,
                                             batch_size=10,
                                             process_concurrency=8,
                                             replicate_to=self.replicate_to,
                                             persist_to=self.persist_to, timeout_secs=self.sdk_timeout,
                                             retries=self.sdk_retries,update_count=self.update_count, transaction_timeout=self.transaction_timeout, 
                                             commit=True,durability=self.durability_level,sync=self.sync)
    
        self.task.jython_task_manager.get_task_result(task)
        
        self.test_log.info("get all the keys in the cluster")
        
        self.doc_gen(self.num_items)
                
        threads = []
        for update_count in [2, 4, 6]:
            threads.append(threading.Thread(target=self.__thread_to_transaction, args=(self.transaction, "update", self.keys, self.transaction_commit, update_count)))
        # Add verification task
        if self.transaction_commit:
            self.update_count = 6
        else:
            self.update_count = 0
            
        for thread in threads:
            thread.start()
            self.sleep(2)
         
        for thread in threads:
            thread.join()
        
        self.sleep(10)    
            
        task = self.task.async_load_gen_docs_atomicity(self.cluster, self.def_bucket,
                                             self.gen_create, "verify" , exp=0,
                                             batch_size=10,
                                             process_concurrency=8,
                                             replicate_to=self.replicate_to,
                                             persist_to=self.persist_to, timeout_secs=self.sdk_timeout,
                                             retries=self.sdk_retries,update_count=self.update_count, transaction_timeout=self.transaction_timeout, 
                                             commit=True,durability=self.durability_level)
    
        self.task.jython_task_manager.get_task_result(task)
        
    def test_basic_retry_async(self):
        self.test_log.info("going to create and execute the task")
        self.gen_create = self.get_doc_generator(0, self.num_items)
        task = self.task.async_load_gen_docs_atomicity(self.cluster, self.def_bucket,
                                             self.gen_create, "create" , exp=0,
                                             batch_size=10,
                                             process_concurrency=1,
                                             replicate_to=self.replicate_to,
                                             persist_to=self.persist_to, timeout_secs=self.sdk_timeout,
                                             retries=self.sdk_retries,update_count=self.update_count, transaction_timeout=self.transaction_timeout, 
                                             commit=True,durability=self.durability_level,sync=True,num_threads=1)
    
        self.task.jython_task_manager.get_task_result(task)
        
        
        self.test_log.info("get all the keys in the cluster")
        keys = ["test_docs-0"]*20
        
        exception = Transaction().RunTransaction(self.transaction, [self.client.collection], [], keys, [], self.transaction_commit, False, 0)
        if exception:
            self.set_exception(Exception(exception)) 

        
    def basic_concurrency(self):
        self.crash = self.input.param("crash", False)
        
        self.doc_gen(self.num_items)

        # run transaction
        thread = threading.Thread(target=self.__thread_to_transaction, args=(self.transaction, "create", self.docs, self.transaction_commit, self.update_count, True, False))
        thread.start()
        self.sleep(1)
        
        if self.crash:
            self.client.cluster.shutdown() 
            self.transaction.close()
            print "going to create a new transaction"
            self.client1 = VBucketAwareMemcached(RestConnection(self.cluster.master), self.def_bucket[0])
            self.create_Transaction(self.client1)
            self.sleep(self.transaction_timeout+60)
            exception = Transaction().RunTransaction(self.transaction, [self.client1.collection], self.docs, [], [], self.transaction_commit, self.sync, self.update_count)
            if exception:
                time.sleep(60)
                
            self.verify_doc(self.num_items, self.client1)
            self.client1.close() 

        else:
            key = "test_docs-0"
            # insert will fail
            result = self.client.insert(key, "value")
            self.assertEqual(result["status"], False)
            
            # Update should pass
            result = self.client.upsert(key,"value")
            self.assertEqual(result["status"], True) 
            
            # delete should pass
            result = self.client.delete(key)
            self.assertEqual(result["status"], True) 
        
        thread.join()
            
    def test_stop_loading(self):
        ''' Load through transactions and close the transaction abruptly, create a new transaction sleep for 60 seconds and
        perform create on the same set of docs '''
        self.num_txn = self.input.param("num_txn", 9)
        self.doc_gen(self.num_items)
        threads = []
        
        docs = list(self.__chunks(self.docs, len(self.docs)/self.num_txn))
        
        for doc in docs: 
            threads.append(threading.Thread(target=self.__thread_to_transaction, args=(self.transaction, "create", doc, self.transaction_commit, self.update_count, True, False)))
          
        for thread in threads:
            thread.start()
        
        self.client.cluster.shutdown()       
        self.transaction.close()
          
        self.client1 = VBucketAwareMemcached(RestConnection(self.cluster.master), self.def_bucket[0])
        self.create_Transaction(self.client1)
        self.sleep(self.transaction_timeout+60) # sleep for 60 seconds so that transaction cleanup can happen
        
        self.test_log.info("going to start the load")  
        for doc in docs:
            exception = Transaction().RunTransaction(self.transaction, [self.client1.collection], doc, [], [], self.transaction_commit, self.sync, self.update_count)
            if exception:
                time.sleep(60)

        self.verify_doc(self.num_items, self.client1)  
        self.client1.close()   
            
    def __insert_sub_doc_and_validate(self, doc_id, op_type, key, value):
        _, failed_items = self.client.crud(
            op_type,
            doc_id,
            [key, value],
            durability=self.durability_level,
            timeout=self.sdk_timeout,
            time_unit="seconds",
            create_path=True,
            xattr=True)
        self.assertFalse(failed_items, "Subdoc Xattr insert failed")
    
    def __read_doc_and_validate(self, doc_id, expected_val, subdoc_key=None):
        if subdoc_key:
            success, failed_items = self.client.crud("subdoc_read",
                                                     doc_id,
                                                     subdoc_key,
                                                     xattr=True)
            self.assertFalse(failed_items, "Xattr read failed")
            self.assertEqual(expected_val,
                             str(success[doc_id]["value"][0]),
                             "Sub_doc value mismatch: %s != %s"
                             % (success[doc_id]["value"][0],
                                expected_val))  
                  
    def test_TxnWithXattr(self):
        self.system_xattr = self.input.param("system_xattr", False)
        
        if self.system_xattr:
            xattr_key = "my._attr"
        else:
            xattr_key="my.attr"
        
        val = "v" * self.doc_size
        self.doc_gen(self.num_items)
            
        threads = threading.Thread(target=self.__thread_to_transaction, args=(self.transaction, "create", self.docs, self.transaction_commit, self.update_count))
        
        threads.start()
        self.sleep(1)
        
        self.__insert_sub_doc_and_validate("test_docs-0", "subdoc_insert",
                                           xattr_key, val)
        
        threads.join()
        
        if self.transaction_commit:
            self.__read_doc_and_validate("test_docs-0", val, xattr_key)
            
        self.sleep(60)
        self.verify_doc(self.num_items, self.client) 
        
    def test_TxnWithMultipleXattr(self):
        xattrs_to_insert = [["my.attr", "value"],
                            ["new_my.attr", "new_value"]]

        self.doc_gen(self.num_items)
        threads = threading.Thread(target=self.__thread_to_transaction, args=(self.transaction, "create", self.docs, self.transaction_commit, self.update_count))
        
        threads.start()
        self.sleep(1)
        
        for key, val in xattrs_to_insert:
            self.__insert_sub_doc_and_validate("test_docs-0", "subdoc_insert",
                                           key, val)
        
        threads.join()
        
        if self.transaction_commit:
            for key, val in xattrs_to_insert:
                self.__read_doc_and_validate("test_docs-0", val, key)
        
        self.sleep(60)
        self.verify_doc(self.num_items, self.client) 
    def test_sync_write_in_progress(self):
        doc_ops = self.input.param("doc_ops", "create;create").split(';')
        shell_conn = dict()
        cbstat_obj = dict()
        error_sim = dict()
        vb_info = dict()
        active_vbs = dict()
        replica_vbs = dict()

        # Override d_level, error_simulation type based on d_level
        self.__get_d_level_and_error_to_simulate()

        # Acquire SDK client from the pool for performing doc_ops locally
        client = SDKClient([self.cluster.master], self.bucket)

        target_nodes = DurabilityHelper.getTargetNodes(self.cluster,
                                                       self.nodes_init,
                                                       self.num_nodes_affected)
        for node in target_nodes:
            shell_conn[node.ip] = RemoteMachineShellConnection(node)
            cbstat_obj[node.ip] = Cbstats(node)
            vb_info["init"] = dict()
            vb_info["init"][node.ip] = cbstat_obj[node.ip].vbucket_seqno(
                self.bucket.name)
            error_sim[node.ip] = CouchbaseError(self.log, shell_conn[node.ip])
            # Fetch affected nodes' vb_num which are of type=replica
            active_vbs[node.ip] = cbstat_obj[node.ip].vbucket_list(
                self.bucket.name, vbucket_type="active")
            replica_vbs[node.ip] = cbstat_obj[node.ip].vbucket_list(
                self.bucket.name, vbucket_type="replica")

        if self.durability_level \
                == Bucket.DurabilityLevel.MAJORITY_AND_PERSIST_TO_ACTIVE:
            target_vbs = active_vbs
            target_vbuckets = list()
            for target_node in target_nodes:
                target_vbuckets += target_vbs[target_node.ip]
        else:
            target_vbuckets = replica_vbs[target_nodes[0].ip]
            if len(target_nodes) > 1:
                index = 1
                while index < len(target_nodes):
                    target_vbuckets = list(
                        set(target_vbuckets).intersection(
                            set(replica_vbs[target_nodes[index].ip])))
                    index += 1

        doc_load_spec = dict()
        doc_load_spec["doc_crud"] = dict()
        doc_load_spec["doc_crud"][MetaCrudParams.DocCrud.COMMON_DOC_KEY] \
            = "test_collections"
        doc_load_spec[MetaCrudParams.TARGET_VBUCKETS] = target_vbuckets
        doc_load_spec[MetaCrudParams.COLLECTIONS_CONSIDERED_FOR_CRUD] = 5
        doc_load_spec[MetaCrudParams.SCOPES_CONSIDERED_FOR_CRUD] = "all"
        doc_load_spec[MetaCrudParams.DURABILITY_LEVEL] = self.durability_level
        doc_load_spec[MetaCrudParams.SDK_TIMEOUT] = 60

        if doc_ops[0] == DocLoading.Bucket.DocOps.CREATE:
            doc_load_spec["doc_crud"][
                MetaCrudParams.DocCrud.CREATE_PERCENTAGE_PER_COLLECTION] = 1
        elif doc_ops[0] == DocLoading.Bucket.DocOps.UPDATE:
            doc_load_spec["doc_crud"][
                MetaCrudParams.DocCrud.UPDATE_PERCENTAGE_PER_COLLECTION] = 1
        elif doc_ops[0] == DocLoading.Bucket.DocOps.REPLACE:
            doc_load_spec["doc_crud"][
                MetaCrudParams.DocCrud.REPLACE_PERCENTAGE_PER_COLLECTION] = 1
        elif doc_ops[0] == DocLoading.Bucket.DocOps.DELETE:
            doc_load_spec["doc_crud"][
                MetaCrudParams.DocCrud.DELETE_PERCENTAGE_PER_COLLECTION] = 1

        # Induce error condition for testing
        for node in target_nodes:
            error_sim[node.ip].create(self.simulate_error,
                                      bucket_name=self.bucket.name)
            self.sleep(3, "Wait for error simulation to take effect")

        doc_loading_task = \
            self.bucket_util.run_scenario_from_spec(
                self.task,
                self.cluster,
                self.cluster.buckets,
                doc_load_spec,
                async_load=True)

        self.sleep(5, "Wait for doc ops to reach server")

        for bucket, s_dict in doc_loading_task.loader_spec.items():
            for s_name, c_dict in s_dict["scopes"].items():
                for c_name, c_meta in c_dict["collections"].items():
                    client.select_collection(s_name, c_name)
                    for op_type in c_meta:
                        key, value = c_meta[op_type]["doc_gen"].next()
                        if self.with_non_sync_writes:
                            fail = client.crud(doc_ops[1],
                                               key,
                                               value,
                                               exp=0,
                                               timeout=2,
                                               time_unit="seconds")
                        else:
                            fail = client.crud(
                                doc_ops[1],
                                key,
                                value,
                                exp=0,
                                durability=self.durability_level,
                                timeout=2,
                                time_unit="seconds")

                        expected_exception = \
                            SDKException.AmbiguousTimeoutException
                        retry_reason = \
                            SDKException.RetryReason.KV_SYNC_WRITE_IN_PROGRESS
                        if doc_ops[0] == DocLoading.Bucket.DocOps.CREATE \
                                and doc_ops[1] in \
                                [DocLoading.Bucket.DocOps.DELETE,
                                 DocLoading.Bucket.DocOps.REPLACE]:
                            expected_exception = \
                                SDKException.DocumentNotFoundException
                            retry_reason = None

                        # Validate the returned error from the SDK
                        if expected_exception not in str(fail["error"]):
                            self.log_failure("Invalid exception for %s: %s" %
                                             (key, fail["error"]))
                        if retry_reason \
                                and retry_reason not in str(fail["error"]):
                            self.log_failure(
                                "Invalid retry reason for %s: %s" %
                                (key, fail["error"]))

                        # Try reading the value in SyncWrite state
                        fail = client.crud("read", key)
                        if doc_ops[0] == "create":
                            # Expected KeyNotFound in case of CREATE op
                            if fail["status"] is True:
                                self.log_failure(
                                    "%s returned value during SyncWrite %s" %
                                    (key, fail))
                        else:
                            # Expects prev val in case of other operations
                            if fail["status"] is False:
                                self.log_failure(
                                    "Key %s read failed for prev value: %s" %
                                    (key, fail))

        # Revert the introduced error condition
        for node in target_nodes:
            error_sim[node.ip].revert(self.simulate_error,
                                      bucket_name=self.bucket.name)

        # Wait for doc_loading to complete
        self.task_manager.get_task_result(doc_loading_task)
        self.bucket_util.validate_doc_loading_results(doc_loading_task)
        if doc_loading_task.result is False:
            self.log_failure("Doc CRUDs failed")

        # Release the acquired SDK client
        client.close()
        self.validate_test_failure()
Esempio n. 14
0
    def test_update_single_doc_n_times(self):
        """
        Update same document n times,  where n is number which
        gets derived from given fragmentation value and after
        updates check for space amplification and data
        validation
        """
        count = 0
        self.assertIs((self.fragmentation <= 0 or self.fragmentation >= 100),
                      False,
                      msg="Fragmentation value can't be <=0 or >=100")
        update_count = int(
            math.ceil(
                float(self.fragmentation * self.num_items) /
                (100 - self.fragmentation)))
        self.log.info("{} is the count with which doc will be updated \
        ".format(update_count))
        self.doc_ops = "update"

        self.client = SDKClient([self.cluster.master],
                                self.bucket_util.buckets[0],
                                scope=CbServer.default_scope,
                                collection=CbServer.default_collection)
        self.gen_update = doc_generator(
            self.key,
            0,
            1,
            doc_size=self.doc_size,
            doc_type=self.doc_type,
            target_vbucket=self.target_vbucket,
            vbuckets=self.cluster_util.vbuckets,
            key_size=self.key_size,
            mutate=count,
            randomize_doc_size=self.randomize_doc_size,
            randomize_value=self.randomize_value,
            mix_key_size=self.mix_key_size,
            deep_copy=self.deep_copy)
        key, val = self.gen_update.next()

        for node in self.cluster.nodes_in_cluster:
            shell = RemoteMachineShellConnection(node)
            shell.kill_memcached()
            shell.disconnect()
            self.assertTrue(
                self.bucket_util._wait_warmup_completed(
                    [self.cluster_util.cluster.master],
                    self.bucket_util.buckets[0],
                    wait_time=self.wait_timeout * 10))

        while count < (update_count + 1):
            self.log.debug("Update Iteration count == {}".format(count))
            val.put("mutated", count + 1)
            self.client.upsert(key, val)
            count += 1
        self.bucket_util._wait_for_stats_all_buckets()

        disk_usage = self.get_disk_usage(self.bucket_util.get_all_buckets()[0],
                                         self.servers)
        _res = disk_usage[0]
        self.log.info("After all updates disk usage is {}MB\
        ".format(_res))
        usage_factor = (
            (float(self.num_items + update_count) / self.num_items) + 0.5)
        self.log.debug("Disk usage factor is {}".format(usage_factor))
        self.assertIs(
            _res > usage_factor * self.disk_usage[self.disk_usage.keys()[0]],
            False, "Disk Usage {}MB After all Updates'\n' \
            exceeds Actual'\n' \
            disk usage {}MB by {}'\n' \
            times".format(_res, self.disk_usage[self.disk_usage.keys()[0]],
                          usage_factor))
        data_validation = self.task.async_validate_docs(
            self.cluster,
            self.bucket_util.buckets[0],
            self.gen_update,
            "update",
            0,
            batch_size=self.batch_size,
            process_concurrency=self.process_concurrency,
            pause_secs=5,
            timeout_secs=self.sdk_timeout)
        self.task.jython_task_manager.get_task_result(data_validation)
        self.enable_disable_swap_space(self.servers, disable=False)
        self.log.info("====test_update_single_doc_n_times====")
Esempio n. 15
0
class BasicCrudTests(MagmaBaseTest):
    def setUp(self):
        super(BasicCrudTests, self).setUp()
        self.enable_disable_swap_space(self.servers)
        start = 0
        end = self.num_items
        start_read = 0
        end_read = self.num_items
        if self.rev_write:
            start = -int(self.num_items - 1)
            end = 1
        if self.rev_read:
            start_read = -int(self.num_items - 1)
            end_read = 1
        self.gen_create = doc_generator(
            self.key,
            start,
            end,
            doc_size=self.doc_size,
            doc_type=self.doc_type,
            target_vbucket=self.target_vbucket,
            vbuckets=self.cluster_util.vbuckets,
            key_size=self.key_size,
            randomize_doc_size=self.randomize_doc_size,
            randomize_value=self.randomize_value,
            mix_key_size=self.mix_key_size,
            deep_copy=self.deep_copy)
        self.result_task = self._load_all_buckets(self.cluster,
                                                  self.gen_create,
                                                  "create",
                                                  0,
                                                  batch_size=self.batch_size,
                                                  dgm_batch=self.dgm_batch)
        if self.active_resident_threshold != 100:
            for task in self.result_task.keys():
                self.num_items = task.doc_index
        self.log.info("Verifying num_items counts after doc_ops")
        self.bucket_util._wait_for_stats_all_buckets()
        self.bucket_util.verify_stats_all_buckets(self.num_items)
        self.disk_usage = dict()
        if self.standard_buckets == 1 or self.standard_buckets == self.magma_buckets:
            for bucket in self.bucket_util.get_all_buckets():
                disk_usage = self.get_disk_usage(bucket, self.servers)
                self.disk_usage[bucket.name] = disk_usage[0]
                self.log.info(
                    "For bucket {} disk usage after initial creation is {}MB\
                    ".format(bucket.name, self.disk_usage[bucket.name]))
        self.gen_read = doc_generator(
            self.key,
            start_read,
            end_read,
            doc_size=self.doc_size,
            doc_type=self.doc_type,
            target_vbucket=self.target_vbucket,
            vbuckets=self.cluster_util.vbuckets,
            key_size=self.key_size,
            randomize_doc_size=self.randomize_doc_size,
            randomize_value=self.randomize_value,
            mix_key_size=self.mix_key_size,
            deep_copy=self.deep_copy)
        self.cluster_util.print_cluster_stats()
        self.bucket_util.print_bucket_stats()

    def tearDown(self):
        super(BasicCrudTests, self).tearDown()

    def test_expiry(self):
        result = True
        self.gen_create = doc_generator(self.key,
                                        0,
                                        10,
                                        doc_size=20,
                                        doc_type=self.doc_type,
                                        key_size=self.key_size)

        tasks_info = self.bucket_util._async_load_all_buckets(
            self.cluster,
            self.gen_create,
            "create",
            10,
            batch_size=10,
            process_concurrency=1,
            persist_to=self.persist_to,
            replicate_to=self.replicate_to,
            durability=self.durability_level,
            pause_secs=5,
            timeout_secs=self.sdk_timeout,
            retries=self.sdk_retries,
        )
        self.task.jython_task_manager.get_task_result(tasks_info.keys()[0])
        self.sleep(20)
        self.client = SDKClient([self.cluster.master],
                                self.bucket_util.buckets[0],
                                scope=CbServer.default_scope,
                                collection=CbServer.default_collection)
        for i in range(10):
            key = (self.key + "-" +
                   str(i).zfill(self.key_size - len(self.key)))
            try:
                getReplicaResult = self.client.collection.getAnyReplica(
                    key, GetAnyReplicaOptions.getAnyReplicaOptions())
                if getReplicaResult:
                    result = False
                    try:
                        self.log.info(
                            "Able to retreive: %s" % {
                                "key": key,
                                "value": getReplicaResult.contentAsObject(),
                                "cas": getReplicaResult.cas()
                            })
                    except Exception as e:
                        print str(e)
            except DocumentUnretrievableException as e:
                pass
            if len(self.client.getFromAllReplica(key)) > 0:
                result = False
        self.client.close()
        self.assertTrue(result, "SDK is able to retrieve expired documents")

    def test_basic_create_read(self):
        """
        Write and Read docs parallely , While reading we are using
        old doc generator (self.gen_create)
        using which we already created docs in magam_base
        for writing we are creating a new doc generator.
        Befor we start read, killing memcached to make sure,
        all reads happen from magma/storage
        """
        self.log.info("Loading and Reading docs parallel")
        count = 0
        init_items = self.num_items
        while count < self.test_itr:
            self.log.info("Create Iteration count == {}".format(count))
            for node in self.cluster.nodes_in_cluster:
                shell = RemoteMachineShellConnection(node)
                shell.kill_memcached()
                shell.disconnect()
            self.doc_ops = "create:read"
            start = self.num_items
            end = self.num_items + init_items
            start_read = self.num_items
            end_read = self.num_items + init_items
            if self.rev_write:
                start = -int(self.num_items + init_items - 1)
                end = -int(self.num_items - 1)
            if self.rev_read:
                start_read = -int(self.num_items + init_items - 1)
                end_read = -int(self.num_items - 1)
            self.gen_create = doc_generator(
                self.key,
                start,
                end,
                doc_size=self.doc_size,
                doc_type=self.doc_type,
                target_vbucket=self.target_vbucket,
                vbuckets=self.cluster_util.vbuckets,
                key_size=self.key_size,
                randomize_doc_size=self.randomize_doc_size,
                randomize_value=self.randomize_value,
                mix_key_size=self.mix_key_size,
                deep_copy=self.deep_copy)
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  _sync=True)
            self.log.info("Verifying doc counts after create doc_ops")
            self.bucket_util._wait_for_stats_all_buckets()
            self.bucket_util.verify_stats_all_buckets(self.num_items)
            self.gen_read = doc_generator(
                self.key,
                start_read,
                end_read,
                doc_size=self.doc_size,
                doc_type=self.doc_type,
                target_vbucket=self.target_vbucket,
                vbuckets=self.cluster_util.vbuckets,
                key_size=self.key_size,
                randomize_doc_size=self.randomize_doc_size,
                randomize_value=self.randomize_value,
                mix_key_size=self.mix_key_size,
                deep_copy=self.deep_copy)
            if self.doc_size <= 32:
                for bucket in self.bucket_util.get_all_buckets():
                    disk_usage = self.get_disk_usage(bucket, self.servers)
                    self.assertIs(
                        disk_usage[2] > disk_usage[3], True,
                        "For Bucket {} , Disk Usage for seqIndex'\n' \
                        After new Creates count {}'\n' \
                        exceeds keyIndex disk'\n' \
                        usage".format(bucket.name, count + 1))
            if self.standard_buckets > 1 and self.standard_buckets == self.magma_buckets:
                disk_usage = dict()
                for bucket in self.bucket_util.get_all_buckets():
                    usage = self.get_disk_usage(bucket, self.servers)
                    disk_usage[bucket.name] = usage[0]
                    self.assertTrue(
                        all([
                            disk_usage[disk_usage.keys()[0]] == disk_usage[key]
                            for key in disk_usage.keys()
                        ]), '''Disk Usage for magma buckets
                        is not equal for same number of docs ''')
            count += 1
        self.log.info("====test_basic_create_read ends====")

    def test_update_multi(self):
        """
        Update all the docs n times, and after each iteration
        check for space amplificationa and data validation
        """
        count = 0
        mutated = 1
        update_doc_count = int(
            math.ceil(
                float(self.fragmentation * self.num_items) /
                (100 - self.fragmentation)))
        self.log.info("Count of docs to be updated is {}\
        ".format(update_doc_count))
        num_update = list()
        while update_doc_count > self.num_items:
            num_update.append(self.num_items)
            update_doc_count -= self.num_items
        if update_doc_count > 0:
            num_update.append(update_doc_count)
        while count < self.test_itr:
            self.log.info("Update Iteration count == {}".format(count))
            for node in self.cluster.nodes_in_cluster:
                shell = RemoteMachineShellConnection(node)
                shell.kill_memcached()
                shell.disconnect()
                self.assertTrue(
                    self.bucket_util._wait_warmup_completed(
                        [self.cluster_util.cluster.master],
                        self.bucket_util.buckets[0],
                        wait_time=self.wait_timeout * 10))
            self.log.debug("List of docs to be updated {}\
            ".format(num_update))
            for itr in num_update:
                self.doc_ops = "update"
                start = 0
                end = itr
                if self.rev_update:
                    start = -int(itr - 1)
                    end = 1
                self.gen_update = doc_generator(
                    self.key,
                    start,
                    end,
                    doc_size=self.doc_size,
                    doc_type=self.doc_type,
                    target_vbucket=self.target_vbucket,
                    vbuckets=self.cluster_util.vbuckets,
                    key_size=self.key_size,
                    mutate=mutated,
                    randomize_doc_size=self.randomize_doc_size,
                    randomize_value=self.randomize_value,
                    mix_key_size=self.mix_key_size,
                    deep_copy=self.deep_copy)
                mutated += 1
                _ = self.loadgen_docs(self.retry_exceptions,
                                      self.ignore_exceptions,
                                      _sync=True)
                self.log.info("Waiting for ep-queues to get drained")
                self.bucket_util._wait_for_stats_all_buckets()
            disk_usage = self.get_disk_usage(
                self.bucket_util.get_all_buckets()[0], self.servers)
            _res = disk_usage[0]
            self.log.info("After update count {} disk usage is {}\
            ".format(count + 1, _res))
            usage_factor = (
                (float(self.num_items + sum(num_update)) / self.num_items) +
                0.5)
            self.log.debug("Disk usage factor is {}".format(usage_factor))
            self.assertIs(
                _res >
                usage_factor * self.disk_usage[self.disk_usage.keys()[0]],
                False, "Disk Usage {}MB After Update'\n' \
                Count {} exceeds Actual'\n' \
                disk usage {}MB by {}'\n' \
                times".format(_res, count,
                              self.disk_usage[self.disk_usage.keys()[0]],
                              usage_factor))
            count += 1
        data_validation = self.task.async_validate_docs(
            self.cluster,
            self.bucket_util.buckets[0],
            self.gen_update,
            "update",
            0,
            batch_size=self.batch_size,
            process_concurrency=self.process_concurrency,
            pause_secs=5,
            timeout_secs=self.sdk_timeout)
        self.task.jython_task_manager.get_task_result(data_validation)
        self.enable_disable_swap_space(self.servers, disable=False)
        self.log.info("====test_update_multi ends====")

    def test_multi_update_delete(self):
        """
        Step 1: Kill memcached and Update all the docs update_itr times
        After each iteration check for space amplification
        and for last iteration
        of test_itr validate docs
        Step 2: Delete half the docs, check sapce amplification
        Step 3 Recreate check for space amplification.
        Repeat all above steps test_itr times
        Step 4 : Do data validation for newly create docs
        """
        count = 0
        mutated = 1
        for i in range(self.test_itr):
            while count < self.update_itr:
                self.log.debug(
                    "Iteration {}: Step 1 of test_multi_update_delete \
                ".format(self.test_itr + 1))
                for node in self.cluster.nodes_in_cluster:
                    shell = RemoteMachineShellConnection(node)
                    shell.kill_memcached()
                    shell.disconnect()
                    self.assertTrue(
                        self.bucket_util._wait_warmup_completed(
                            [self.cluster_util.cluster.master],
                            self.bucket_util.buckets[0],
                            wait_time=self.wait_timeout * 10))
                self.doc_ops = "update"
                start = 0
                end = self.num_items
                if self.rev_update:
                    start = -int(self.num_items - 1)
                    end = 1
                self.gen_update = doc_generator(
                    self.key,
                    start,
                    end,
                    doc_size=self.doc_size,
                    doc_type=self.doc_type,
                    target_vbucket=self.target_vbucket,
                    vbuckets=self.cluster_util.vbuckets,
                    key_size=self.key_size,
                    mutate=mutated,
                    randomize_doc_size=self.randomize_doc_size,
                    randomize_value=self.randomize_value,
                    mix_key_size=self.mix_key_size,
                    deep_copy=self.deep_copy)
                mutated += 1
                _ = self.loadgen_docs(self.retry_exceptions,
                                      self.ignore_exceptions,
                                      _sync=True)
                self.log.info("Waiting for ep-queues to get drained")
                self.bucket_util._wait_for_stats_all_buckets()
                disk_usage = self.get_disk_usage(
                    self.bucket_util.get_all_buckets()[0], self.servers)
                _res = disk_usage[0]
                self.log.info("After update count {} disk usage is {}MB\
                ".format(count + 1, _res))
                self.assertIs(
                    _res > 2.5 * self.disk_usage[self.disk_usage.keys()[0]],
                    False, "Disk Usage {}MB After \
                    Update Count {} exceeds Actual \
                    disk usage {}MB by 2.5\
                    times".format(_res, count,
                                  self.disk_usage[self.disk_usage.keys()[0]]))
                count += 1
            # Will check data validatio only in the last
            # iteration of test_tr to avoid multiple
            # data validation, that is why below if check
            if i + 1 == self.test_itr:
                data_validation = self.task.async_validate_docs(
                    self.cluster,
                    self.bucket_util.buckets[0],
                    self.gen_update,
                    "update",
                    0,
                    batch_size=self.batch_size,
                    process_concurrency=self.process_concurrency,
                    pause_secs=5,
                    timeout_secs=self.sdk_timeout)
                self.task.jython_task_manager.get_task_result(data_validation)

            self.update_itr += self.update_itr
            self.log.debug("Iteration {}: Step 2 of test_multi_update_delete \
            ".format(self.test_itr + 1))
            start_del = 0
            end_del = self.num_items // 2
            if self.rev_del:
                start_del = -int(self.num_items // 2 - 1)
                end_del = 1
            self.gen_delete = doc_generator(
                self.key,
                start_del,
                end_del,
                doc_size=self.doc_size,
                doc_type=self.doc_type,
                target_vbucket=self.target_vbucket,
                vbuckets=self.cluster_util.vbuckets,
                key_size=self.key_size,
                randomize_doc_size=self.randomize_doc_size,
                randomize_value=self.randomize_value,
                mix_key_size=self.mix_key_size,
                deep_copy=self.deep_copy)
            self.log.info("Deleting num_items//2 docs")
            self.doc_ops = "delete"
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  _sync=True)
            self.bucket_util._wait_for_stats_all_buckets()
            self.bucket_util.verify_stats_all_buckets(self.num_items)
            disk_usage = self.get_disk_usage(
                self.bucket_util.get_all_buckets()[0], self.servers)
            _res = disk_usage[0]
            self.log.info("After delete count {} disk usage is {}MB\
            ".format(i + 1, _res))
            self.assertIs(
                _res > 2.5 * self.disk_usage[self.disk_usage.keys()[0]], False,
                "Disk Usage {}MB After \
                Delete count {} exceeds Actual \
                disk usage {}MB by 2.5 \
                times".format(_res, i + 1,
                              self.disk_usage[self.disk_usage.keys()[0]]))

            self.log.debug("Iteration{}: Step 3 of test_multi_update_delete \
            ".format(self.test_itr + 1))
            self.gen_create = copy.deepcopy(self.gen_delete)
            self.log.info("Recreating num_items//2 docs")
            self.doc_ops = "create"
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  _sync=True)
            self.bucket_util._wait_for_stats_all_buckets()
            self.bucket_util.verify_stats_all_buckets(self.num_items)
            disk_usage = self.get_disk_usage(
                self.bucket_util.get_all_buckets()[0], self.servers)
            _res = disk_usage[0]
            self.log.info("disk usage after new create \
            is {}".format(_res))
            self.assertIs(
                _res > 2.5 * self.disk_usage[self.disk_usage.keys()[0]], False,
                "Disk Usage {}MB After \
                new Creates count {} exceeds \
                Actual disk usage {}MB by \
                2.5 times".format(_res, i + 1,
                                  self.disk_usage[self.disk_usage.keys()[0]]))
        self.log.debug("Iteration{}: Step 4 of test_multi_update_delete \
        ".format(self.test_itr + 1))
        data_validation = self.task.async_validate_docs(
            self.cluster,
            self.bucket_util.buckets[0],
            self.gen_create,
            "create",
            0,
            batch_size=self.batch_size,
            process_concurrency=self.process_concurrency,
            pause_secs=5,
            timeout_secs=self.sdk_timeout)
        self.task.jython_task_manager.get_task_result(data_validation)
        self.log.info("====test_multiUpdate_delete ends====")

    def test_update_rev_update(self):
        count = 0
        mutated = 1
        for i in range(self.test_itr):
            while count < self.update_itr:
                for node in self.cluster.nodes_in_cluster:
                    shell = RemoteMachineShellConnection(node)
                    shell.kill_memcached()
                    shell.disconnect()
                    self.assertTrue(
                        self.bucket_util._wait_warmup_completed(
                            [self.cluster_util.cluster.master],
                            self.bucket_util.buckets[0],
                            wait_time=self.wait_timeout * 10))
                tasks_info = dict()
                data_validation = []
                g_update = doc_generator(
                    self.key,
                    0,
                    self.num_items // 2,
                    doc_size=self.doc_size,
                    doc_type=self.doc_type,
                    target_vbucket=self.target_vbucket,
                    vbuckets=self.cluster_util.vbuckets,
                    key_size=self.key_size,
                    mutate=mutated,
                    randomize_doc_size=self.randomize_doc_size,
                    randomize_value=self.randomize_value,
                    mix_key_size=self.mix_key_size,
                    deep_copy=self.deep_copy)
                mutated += 1
                tem_tasks_info = self.bucket_util._async_load_all_buckets(
                    self.cluster,
                    g_update,
                    "update",
                    0,
                    batch_size=self.batch_size,
                    process_concurrency=self.process_concurrency,
                    persist_to=self.persist_to,
                    replicate_to=self.replicate_to,
                    durability=self.durability_level,
                    pause_secs=5,
                    timeout_secs=self.sdk_timeout,
                    retries=self.sdk_retries,
                    retry_exceptions=self.retry_exceptions,
                    ignore_exceptions=self.ignore_exceptions)
                tasks_info.update(tem_tasks_info.items())
                start = -(self.num_items // 2 - 1)
                end = 1
                r_update = doc_generator(
                    self.key,
                    start,
                    end,
                    doc_size=self.doc_size,
                    doc_type=self.doc_type,
                    target_vbucket=self.target_vbucket,
                    vbuckets=self.cluster_util.vbuckets,
                    key_size=self.key_size,
                    mutate=mutated,
                    randomize_doc_size=self.randomize_doc_size,
                    randomize_value=self.randomize_value,
                    mix_key_size=self.mix_key_size,
                    deep_copy=self.deep_copy)
                mutated += 1
                if self.next_half:
                    mutated -= 2
                    start = -(self.num_items - 1)
                    end = -(self.num_items // 2 - 1)
                    r_update = doc_generator(
                        self.key,
                        start,
                        end,
                        doc_size=self.doc_size,
                        doc_type=self.doc_type,
                        target_vbucket=self.target_vbucket,
                        vbuckets=self.cluster_util.vbuckets,
                        key_size=self.key_size,
                        mutate=mutated,
                        randomize_doc_size=self.randomize_doc_size,
                        randomize_value=self.randomize_value,
                        mix_key_size=self.mix_key_size,
                        deep_copy=self.deep_copy)
                    mutated += 1
                    tem_tasks_info = self.bucket_util._async_load_all_buckets(
                        self.cluster,
                        r_update,
                        "update",
                        0,
                        batch_size=self.batch_size,
                        process_concurrency=self.process_concurrency,
                        persist_to=self.persist_to,
                        replicate_to=self.replicate_to,
                        durability=self.durability_level,
                        pause_secs=5,
                        timeout_secs=self.sdk_timeout,
                        retries=self.sdk_retries,
                        retry_exceptions=self.retry_exceptions,
                        ignore_exceptions=self.ignore_exceptions)
                    tasks_info.update(tem_tasks_info.items())
                for task in tasks_info:
                    self.task_manager.get_task_result(task)
                self.bucket_util.verify_doc_op_task_exceptions(
                    tasks_info, self.cluster)
                self.bucket_util.log_doc_ops_task_failures(tasks_info)
                if not self.next_half:
                    tem_tasks_info = self.bucket_util._async_load_all_buckets(
                        self.cluster,
                        r_update,
                        "update",
                        0,
                        batch_size=self.batch_size,
                        process_concurrency=self.process_concurrency,
                        persist_to=self.persist_to,
                        replicate_to=self.replicate_to,
                        durability=self.durability_level,
                        pause_secs=5,
                        timeout_secs=self.sdk_timeout,
                        retries=self.sdk_retries,
                        retry_exceptions=self.retry_exceptions,
                        ignore_exceptions=self.ignore_exceptions)
                    for task in tem_tasks_info:
                        self.task_manager.get_task_result(task)
                    self.bucket_util.verify_doc_op_task_exceptions(
                        tem_tasks_info, self.cluster)
                    self.bucket_util.log_doc_ops_task_failures(tem_tasks_info)
                self.log.info("Waiting for ep-queues to get drained")
                self.bucket_util._wait_for_stats_all_buckets()
                if self.next_half:
                    data_validation.extend([
                        self.task.async_validate_docs(
                            self.cluster,
                            self.bucket_util.buckets[0],
                            g_update,
                            "update",
                            0,
                            batch_size=self.batch_size,
                            process_concurrency=self.process_concurrency,
                            pause_secs=5,
                            timeout_secs=self.sdk_timeout),
                        self.task.async_validate_docs(
                            self.cluster,
                            self.bucket_util.buckets[0],
                            r_update,
                            "update",
                            0,
                            batch_size=self.batch_size,
                            process_concurrency=self.process_concurrency,
                            pause_secs=5,
                            timeout_secs=self.sdk_timeout)
                    ])
                else:
                    data_validation.append(
                        self.task.async_validate_docs(
                            self.cluster,
                            self.bucket_util.buckets[0],
                            r_update,
                            "update",
                            0,
                            batch_size=self.batch_size,
                            process_concurrency=self.process_concurrency,
                            pause_secs=5,
                            timeout_secs=self.sdk_timeout))
                for task in data_validation:
                    self.task.jython_task_manager.get_task_result(task)
                disk_usage = self.get_disk_usage(
                    self.bucket_util.get_all_buckets()[0], self.servers)
                _res = disk_usage[0] - disk_usage[1]
                self.log.info("disk usage after update count {}\
                is {}".format(count + 1, _res))
                self.assertIs(
                    _res > 4 * self.disk_usage[self.disk_usage.keys()[0]],
                    False, "Disk Usage {} After \
                    Update Count {} exceeds \
                    Actual disk usage {} by four \
                    times".format(_res, count,
                                  self.disk_usage[self.disk_usage.keys()[0]]))
                count += 1
            self.update_itr += self.update_itr
            start_del = 0
            end_del = self.num_items // 2
            if self.rev_del:
                start_del = -int(self.num_items // 2 - 1)
                end_del = 1
            self.gen_delete = doc_generator(
                self.key,
                start_del,
                end_del,
                doc_size=self.doc_size,
                doc_type=self.doc_type,
                target_vbucket=self.target_vbucket,
                vbuckets=self.cluster_util.vbuckets,
                key_size=self.key_size,
                randomize_doc_size=self.randomize_doc_size,
                randomize_value=self.randomize_value,
                mix_key_size=self.mix_key_size,
                deep_copy=self.deep_copy)
            self.log.info("Deleting num_items//2 docs")
            self.doc_ops = "delete"
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  _sync=True)
            self.bucket_util._wait_for_stats_all_buckets()
            self.bucket_util.verify_stats_all_buckets(self.num_items)
            disk_usage = self.get_disk_usage(
                self.bucket_util.get_all_buckets()[0], self.servers)
            _res = disk_usage[0] - disk_usage[1]
            self.log.info("disk usage after delete is {}".format(_res))
            self.assertIs(
                _res > 4 * self.disk_usage[self.disk_usage.keys()[0]], False,
                "Disk Usage {} After \
                Delete count {} exceeds Actual \
                disk usage {} by four \
                times".format(_res, i + 1,
                              self.disk_usage[self.disk_usage.keys()[0]]))
            self.gen_create = copy.deepcopy(self.gen_delete)
            self.log.info("Recreating num_items//2 docs")
            self.doc_ops = "create"
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  _sync=True)
            self.bucket_util._wait_for_stats_all_buckets()
            self.bucket_util.verify_stats_all_buckets(self.num_items)
            d_validation = self.task.async_validate_docs(
                self.cluster,
                self.bucket_util.buckets[0],
                self.gen_create,
                "create",
                0,
                batch_size=self.batch_size,
                process_concurrency=self.process_concurrency,
                pause_secs=5,
                timeout_secs=self.sdk_timeout)
            self.task.jython_task_manager.get_task_result(d_validation)
            disk_usage = self.get_disk_usage(
                self.bucket_util.get_all_buckets()[0], self.servers)
            _res = disk_usage[0] - disk_usage[1]
            self.log.info("disk usage after new create \
            is {}".format(_res))
            self.assertIs(
                _res > 4 * self.disk_usage[self.disk_usage.keys()[0]], False,
                "Disk Usage {} After \
                new Creates count {} exceeds \
                Actual disk usage {} by four \
                times".format(_res, i + 1,
                              self.disk_usage[self.disk_usage.keys()[0]]))
        self.log.info("====test_update_rev_update ends====")

    def test_update_single_doc_n_times(self):
        """
        Update same document n times,  where n is number which
        gets derived from given fragmentation value and after
        updates check for space amplification and data
        validation
        """
        count = 0
        self.assertIs((self.fragmentation <= 0 or self.fragmentation >= 100),
                      False,
                      msg="Fragmentation value can't be <=0 or >=100")
        update_count = int(
            math.ceil(
                float(self.fragmentation * self.num_items) /
                (100 - self.fragmentation)))
        self.log.info("{} is the count with which doc will be updated \
        ".format(update_count))
        self.doc_ops = "update"

        self.client = SDKClient([self.cluster.master],
                                self.bucket_util.buckets[0],
                                scope=CbServer.default_scope,
                                collection=CbServer.default_collection)
        self.gen_update = doc_generator(
            self.key,
            0,
            1,
            doc_size=self.doc_size,
            doc_type=self.doc_type,
            target_vbucket=self.target_vbucket,
            vbuckets=self.cluster_util.vbuckets,
            key_size=self.key_size,
            mutate=count,
            randomize_doc_size=self.randomize_doc_size,
            randomize_value=self.randomize_value,
            mix_key_size=self.mix_key_size,
            deep_copy=self.deep_copy)
        key, val = self.gen_update.next()

        for node in self.cluster.nodes_in_cluster:
            shell = RemoteMachineShellConnection(node)
            shell.kill_memcached()
            shell.disconnect()
            self.assertTrue(
                self.bucket_util._wait_warmup_completed(
                    [self.cluster_util.cluster.master],
                    self.bucket_util.buckets[0],
                    wait_time=self.wait_timeout * 10))

        while count < (update_count + 1):
            self.log.debug("Update Iteration count == {}".format(count))
            val.put("mutated", count + 1)
            self.client.upsert(key, val)
            count += 1
        self.bucket_util._wait_for_stats_all_buckets()

        disk_usage = self.get_disk_usage(self.bucket_util.get_all_buckets()[0],
                                         self.servers)
        _res = disk_usage[0]
        self.log.info("After all updates disk usage is {}MB\
        ".format(_res))
        usage_factor = (
            (float(self.num_items + update_count) / self.num_items) + 0.5)
        self.log.debug("Disk usage factor is {}".format(usage_factor))
        self.assertIs(
            _res > usage_factor * self.disk_usage[self.disk_usage.keys()[0]],
            False, "Disk Usage {}MB After all Updates'\n' \
            exceeds Actual'\n' \
            disk usage {}MB by {}'\n' \
            times".format(_res, self.disk_usage[self.disk_usage.keys()[0]],
                          usage_factor))
        data_validation = self.task.async_validate_docs(
            self.cluster,
            self.bucket_util.buckets[0],
            self.gen_update,
            "update",
            0,
            batch_size=self.batch_size,
            process_concurrency=self.process_concurrency,
            pause_secs=5,
            timeout_secs=self.sdk_timeout)
        self.task.jython_task_manager.get_task_result(data_validation)
        self.enable_disable_swap_space(self.servers, disable=False)
        self.log.info("====test_update_single_doc_n_times====")

    def test_read_docs_using_multithreads(self):
        """
        Read same docs together using multithreads.
        """
        self.log.info("Reading docs parallelly using multi threading")
        tasks_info = dict()
        update_doc_count = int(
            math.ceil(
                float(self.fragmentation * self.num_items) /
                (100 - self.fragmentation)))
        self.log.info("Count of docs to be updated is {}\
        ".format(update_doc_count))
        num_update = list()
        while update_doc_count > self.num_items:
            num_update.append(self.num_items)
            update_doc_count -= self.num_items
        if update_doc_count > 0:
            num_update.append(update_doc_count)
        for itr in num_update:
            self.doc_ops = "update"
            start = 0
            end = itr
            self.gen_update = doc_generator(
                self.key,
                start,
                end,
                doc_size=self.doc_size,
                doc_type=self.doc_type,
                target_vbucket=self.target_vbucket,
                vbuckets=self.cluster_util.vbuckets,
                key_size=self.key_size,
                mutate=0,
                randomize_doc_size=self.randomize_doc_size,
                randomize_value=self.randomize_value,
                mix_key_size=self.mix_key_size,
                deep_copy=self.deep_copy)
            update_task_info = self.loadgen_docs(self.retry_exceptions,
                                                 self.ignore_exceptions,
                                                 _sync=False)
            tasks_info.update(update_task_info.items())

        count = 0
        self.doc_ops = "read"

        # if self.next_half is true then one thread will read
        # in ascending order and other in descending order

        if self.next_half:
            start = -int(self.num_items - 1)
            end = 1
            g_read = doc_generator(self.key,
                                   start,
                                   end,
                                   doc_size=self.doc_size,
                                   doc_type=self.doc_type,
                                   target_vbucket=self.target_vbucket,
                                   vbuckets=self.cluster_util.vbuckets,
                                   key_size=self.key_size,
                                   randomize_doc_size=self.randomize_doc_size,
                                   randomize_value=self.randomize_value,
                                   mix_key_size=self.mix_key_size,
                                   deep_copy=self.deep_copy)
        for node in self.cluster.nodes_in_cluster:
            shell = RemoteMachineShellConnection(node)
            shell.kill_memcached()
            shell.disconnect()

        while count < self.read_thread_count:
            read_task_info = self.loadgen_docs(self.retry_exceptions,
                                               self.ignore_exceptions,
                                               _sync=False)
            tasks_info.update(read_task_info.items())
            count += 1
            if self.next_half and count < self.read_thread_count:
                read_tasks_info = self.bucket_util._async_validate_docs(
                    self.cluster,
                    g_read,
                    "read",
                    0,
                    batch_size=self.batch_size,
                    process_concurrency=self.process_concurrency,
                    pause_secs=5,
                    timeout_secs=self.sdk_timeout,
                    retry_exceptions=self.retry_exceptions,
                    ignore_exceptions=self.ignore_exceptions)
                tasks_info.update(read_task_info.items())
                count += 1

        for task in tasks_info:
            self.task_manager.get_task_result(task)

        self.log.info("Waiting for ep-queues to get drained")
        self.bucket_util._wait_for_stats_all_buckets()
        self.log.info("test_read_docs_using_multithreads ends")

    def test_basic_create_delete(self):
        """
        CREATE(n)-> DELETE(n)->DISK_USAGE_CHECK
        REPEAT ABove test_itr_times
        """
        self.log.info("Cretaing  and Deletes docs n times ")
        count = 0
        start = 0
        end = self.num_items
        self.gen_delete = doc_generator(
            self.key,
            start,
            end,
            doc_size=self.doc_size,
            doc_type=self.doc_type,
            target_vbucket=self.target_vbucket,
            vbuckets=self.cluster_util.vbuckets,
            key_size=self.key_size,
            randomize_doc_size=self.randomize_doc_size,
            randomize_value=self.randomize_value,
            mix_key_size=self.mix_key_size,
            deep_copy=self.deep_copy)

        while count < self.test_itr:
            self.doc_ops = "delete"
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  _sync=True)
            self.log.info("Verifying doc counts after delete doc_ops")
            self.bucket_util._wait_for_stats_all_buckets()
            self.bucket_util.verify_stats_all_buckets(self.num_items)
            disk_usage = self.get_disk_usage(
                self.bucket_util.get_all_buckets()[0], self.servers)
            _res = disk_usage[0]
            self.log.info("disk usage after delete count {} \
            is {}MB".format(count + 1, _res))
            self.assertIs(
                _res > 2.5 * self.disk_usage[self.disk_usage.keys()[0]], False,
                "Disk Usage {}MB After \
                delete count {} exceeds \
                Actual disk usage {}MB by \
                2.5 times".format(_res, count + 1,
                                  self.disk_usage[self.disk_usage.keys()[0]]))
            self.doc_ops = "create"
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  _sync=True)
            self.bucket_util._wait_for_stats_all_buckets()
            self.bucket_util.verify_stats_all_buckets(self.num_items)
            count += 1
        self.log.info("====test_basic_create_delete ends====")
Esempio n. 16
0
    def test_bucket_durability_upgrade(self):
        update_task = None
        self.sdk_timeout = 60
        create_batch_size = 10000
        if self.atomicity:
            create_batch_size = 10

        # To make sure sync_write can we supported by initial cluster version
        sync_write_support = True
        if float(self.initial_version[0:3]) < 6.5:
            sync_write_support = False

        if sync_write_support:
            self.verification_dict["rollback_item_count"] = 0
            self.verification_dict["sync_write_aborted_count"] = 0

        if self.upgrade_with_data_load:
            self.log.info("Starting async doc updates")
            update_task = self.task.async_continuous_doc_ops(
                self.cluster, self.bucket, self.gen_load,
                op_type=DocLoading.Bucket.DocOps.UPDATE,
                process_concurrency=1,
                persist_to=1,
                replicate_to=1,
                timeout_secs=30)

        self.log.info("Upgrading cluster nodes to target version")
        node_to_upgrade = self.fetch_node_to_upgrade()
        while node_to_upgrade is not None:
            self.log.info("Selected node for upgrade: %s"
                          % node_to_upgrade.ip)
            self.upgrade_function[self.upgrade_type](node_to_upgrade,
                                                     self.upgrade_version)
            try:
                self.cluster.update_master_using_diag_eval(
                    self.cluster.servers[0])
            except Exception:
                self.cluster.update_master_using_diag_eval(
                    self.cluster.servers[self.nodes_init-1])

            create_gen = doc_generator(self.key, self.num_items,
                                       self.num_items+create_batch_size)
            # Validate sync_write results after upgrade
            if self.atomicity:
                sync_write_task = self.task.async_load_gen_docs_atomicity(
                    self.cluster, self.bucket_util.buckets,
                    create_gen, DocLoading.Bucket.DocOps.CREATE,
                    process_concurrency=1,
                    transaction_timeout=self.transaction_timeout,
                    record_fail=True)
            else:
                sync_write_task = self.task.async_load_gen_docs(
                    self.cluster, self.bucket, create_gen,
                    DocLoading.Bucket.DocOps.CREATE,
                    timeout_secs=self.sdk_timeout,
                    process_concurrency=4,
                    sdk_client_pool=self.sdk_client_pool,
                    skip_read_on_error=True,
                    suppress_error_table=True)
            self.task_manager.get_task_result(sync_write_task)
            self.num_items += create_batch_size

            retry_index = 0
            while retry_index < 5:
                self.sleep(3, "Wait for num_items to match")
                current_items = self.bucket_util.get_bucket_current_item_count(
                    self.cluster, self.bucket)
                if current_items == self.num_items:
                    break
                self.log.debug("Num_items mismatch. Expected: %s, Actual: %s"
                               % (self.num_items, current_items))
            # Doc count validation
            self.cluster_util.print_cluster_stats()

            self.verification_dict["ops_create"] += create_batch_size
            self.summary.add_step("Upgrade %s" % node_to_upgrade.ip)

            # Halt further upgrade if test has failed during current upgrade
            if self.test_failure:
                break

            node_to_upgrade = self.fetch_node_to_upgrade()

        if self.upgrade_with_data_load:
            # Wait for update_task to complete
            update_task.end_task()
            self.task_manager.get_task_result(update_task)
        else:
            self.verification_dict["ops_update"] = 0

        # Cb_stats vb-details validation
        failed = self.durability_helper.verify_vbucket_details_stats(
            self.bucket_util.buckets[0],
            self.cluster_util.get_kv_nodes(),
            vbuckets=self.cluster_util.vbuckets,
            expected_val=self.verification_dict)
        if failed:
            self.log_failure("Cbstat vbucket-details validation failed")
        self.summary.add_step("Cbstats vb-details verification")

        self.validate_test_failure()

        possible_d_levels = dict()
        possible_d_levels[Bucket.Type.MEMBASE] = \
            self.bucket_util.get_supported_durability_levels()
        possible_d_levels[Bucket.Type.EPHEMERAL] = [
            Bucket.DurabilityLevel.NONE,
            Bucket.DurabilityLevel.MAJORITY]
        len_possible_d_levels = len(possible_d_levels[self.bucket_type]) - 1

        if not sync_write_support:
            self.verification_dict["rollback_item_count"] = 0
            self.verification_dict["sync_write_aborted_count"] = 0

        # Perform bucket_durability update
        key, value = doc_generator("b_durability_doc", 0, 1).next()
        client = SDKClient([self.cluster.master], self.bucket_util.buckets[0])
        for index, d_level in enumerate(possible_d_levels[self.bucket_type]):
            self.log.info("Updating bucket_durability=%s" % d_level)
            self.bucket_util.update_bucket_property(
                self.bucket_util.buckets[0],
                bucket_durability=BucketDurability[d_level])
            self.bucket_util.print_bucket_stats()

            buckets = self.bucket_util.get_all_buckets()
            if buckets[0].durability_level != BucketDurability[d_level]:
                self.log_failure("New bucket_durability not taken")

            self.summary.add_step("Update bucket_durability=%s" % d_level)

            self.sleep(10, "MB-39678: Bucket_d_level change to take effect")

            if index == 0:
                op_type = DocLoading.Bucket.DocOps.CREATE
                self.verification_dict["ops_create"] += 1
            elif index == len_possible_d_levels:
                op_type = DocLoading.Bucket.DocOps.DELETE
                self.verification_dict["ops_delete"] += 1
            else:
                op_type = DocLoading.Bucket.DocOps.UPDATE
                if "ops_update" in self.verification_dict:
                    self.verification_dict["ops_update"] += 1

            result = client.crud(op_type, key, value,
                                 timeout=self.sdk_timeout)
            if result["status"] is False:
                self.log_failure("Doc_op %s failed on key %s: %s"
                                 % (op_type, key, result["error"]))
            self.summary.add_step("Doc_op %s" % op_type)
        client.close()

        # Cb_stats vb-details validation
        failed = self.durability_helper.verify_vbucket_details_stats(
            self.bucket_util.buckets[0],
            self.cluster_util.get_kv_nodes(),
            vbuckets=self.cluster_util.vbuckets,
            expected_val=self.verification_dict)
        if failed:
            self.log_failure("Cbstat vbucket-details validation failed")
        self.summary.add_step("Cbstats vb-details verification")
        self.validate_test_failure()
Esempio n. 17
0
class MagmaCrashTests(MagmaBaseTest):
    def setUp(self):
        self.input = TestInputSingleton.input
        self.input.test_params.update({"random_key": True})
        super(MagmaCrashTests, self).setUp()
        self.sdk_timeout = self.input.param("sdk_timeout", 10)
        self.time_unit = "seconds"
        self.graceful = self.input.param("graceful", False)
        self.assertTrue(self.rest.update_autofailover_settings(False, 600),
                        "AutoFailover disabling failed")
        self.crash_th = None
        self.sdk_retry_strategy = self.input.param(
            "sdk_retry_strategy", SDKConstants.RetryStrategy.FAIL_FAST)

    def tearDown(self):
        self.stop_crash = True
        if self.crash_th and self.crash_th.is_alive():
            self.crash_th.join()
        super(MagmaCrashTests, self).tearDown()

    def kill_magma_check_wal_file_size(self):
        nIter = 200
        while nIter > 0:
            shell = RemoteMachineShellConnection(self.cluster.master)
            shell.kill_memcached()
            #             self.bucket_util._wait_warmup_completed()
            self.sleep(10, "sleep of 5s so that memcached can restart")

    def test_crash_during_ops(self):
        self.graceful = self.input.param("graceful", False)
        wait_warmup = self.input.param("wait_warmup", True)
        self.log.info("====test_crash_during_ops starts====")

        self.compute_docs_ranges()

        tasks_info = dict()
        for collection in self.collections:
            self.generate_docs(doc_ops=self.doc_ops, target_vbucket=None)
            tem_tasks_info = self.loadgen_docs(
                self.retry_exceptions,
                self.ignore_exceptions,
                scope=CbServer.default_scope,
                collection=collection,
                suppress_error_table=True,
                skip_read_on_error=True,
                _sync=False,
                doc_ops=self.doc_ops,
                track_failures=False,
                sdk_retry_strategy=self.sdk_retry_strategy)
            tasks_info.update(tem_tasks_info.items())

        self.crash_th = threading.Thread(target=self.crash,
                                         kwargs=dict(graceful=self.graceful,
                                                     wait=wait_warmup))
        self.crash_th.start()
        for task in tasks_info:
            self.task_manager.get_task_result(task)

        self.stop_crash = True
        self.crash_th.join()
        self.assertFalse(self.crash_failure,
                         "CRASH | CRITICAL | WARN messages found in cb_logs")
        self.validate_seq_itr()

    def test_crash_during_recovery(self):
        self.compute_docs_ranges()
        tasks_info = dict()

        for collection in self.collections:
            self.generate_docs(doc_ops=self.doc_ops, target_vbucket=None)
            tem_tasks_info = self.loadgen_docs(
                self.retry_exceptions,
                self.ignore_exceptions,
                scope=CbServer.default_scope,
                collection=collection,
                suppress_error_table=True,
                skip_read_on_error=True,
                _sync=False,
                doc_ops=self.doc_ops,
                track_failures=False,
                sdk_retry_strategy=self.sdk_retry_strategy)
            tasks_info.update(tem_tasks_info.items())

        self.crash_th = threading.Thread(target=self.crash,
                                         kwargs={"kill_itr": 5})
        self.crash_th.start()
        for task in tasks_info:
            self.task_manager.get_task_result(task)

        self.stop_crash = True
        self.crash_th.join()
        self.assertFalse(self.crash_failure,
                         "CRASH | CRITICAL | WARN messages found in cb_logs")
        self.validate_seq_itr()

    def test_crash_before_upserts(self):
        self.log.info("test_update_multi starts")
        self.change_swap_space(self.cluster.nodes_in_cluster)

        upsert_doc_list = self.get_fragmentation_upsert_docs_list()

        count = 0
        self.mutate = 0
        while count < self.test_itr:
            self.log.info("Iteration == {}".format(count + 1))

            self.sigkill_memcached(graceful=self.graceful)

            for itr in upsert_doc_list:
                self.doc_ops = "update"
                self.update_start = 0
                self.update_end = itr

                if self.rev_update:
                    self.update_start = -int(itr - 1)
                    self.update_end = 1

                self.generate_docs(doc_ops="update")

                _ = self.loadgen_docs(self.retry_exceptions,
                                      self.ignore_exceptions,
                                      suppress_error_table=True,
                                      _sync=True,
                                      doc_ops="update")

                self.bucket_util._wait_for_stats_all_buckets(
                    self.cluster, self.cluster.buckets)

            count += 1

        self.validate_data("update", self.gen_update)
        self.change_swap_space(self.cluster.nodes_in_cluster, disable=False)
        self.validate_seq_itr()
        self.log.info("====test_update_multi ends====")

    def test_crash_before_multi_update_deletes(self):
        self.log.info("===test_crash_before_multi_update_deletes starts===")
        self.change_swap_space(self.cluster.nodes_in_cluster)

        count = 0
        self.mutate = 0
        for i in range(self.test_itr):
            self.log.info("Step 1, Iteration= {}".format(i + 1))
            while count < self.update_itr:
                self.sigkill_memcached(graceful=self.graceful)

                self.doc_ops = "update"
                self.update_start = 0
                self.update_end = self.num_items

                self.generate_docs(doc_ops="update")
                _ = self.loadgen_docs(self.retry_exceptions,
                                      self.ignore_exceptions,
                                      suppress_error_table=True,
                                      _sync=True,
                                      doc_ops="update")

                self.bucket_util._wait_for_stats_all_buckets(
                    self.cluster, self.cluster.buckets)

                count += 1
            self.update_itr += self.update_itr

            # data validation is done only for the last iteration
            if i + 1 == self.test_itr:
                self.validate_data("update", self.gen_update)

            self.log.debug("Step 2, Iteration {}".format(i + 1))
            self.sigkill_memcached()

            self.doc_ops = "delete"
            self.delete_start = 0
            self.delete_end = self.num_items // 2

            self.generate_docs(doc_ops="delete")
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  suppress_error_table=True,
                                  _sync=True,
                                  doc_ops="delete")

            self.bucket_util._wait_for_stats_all_buckets(
                self.cluster, self.cluster.buckets)
            self.bucket_util.verify_stats_all_buckets(self.cluster,
                                                      self.num_items)

            self.log.debug("Step 3, Iteration= {}".format(i + 1))
            self.sigkill_memcached()

            self.gen_create = copy.deepcopy(self.gen_delete)
            self.doc_ops = "create"
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  suppress_error_table=True,
                                  _sync=True,
                                  doc_ops="create")

            self.bucket_util._wait_for_stats_all_buckets(
                self.cluster, self.cluster.buckets)
            self.bucket_util.verify_stats_all_buckets(self.cluster,
                                                      self.num_items)

        self.validate_data("create", self.gen_create)
        self.change_swap_space(self.cluster.nodes_in_cluster, disable=False)
        self.validate_seq_itr()
        self.log.info("===test_crash_before_multi_update_deletes ends===")

    def test_crash_during_get_ops(self):

        self.log.info("test_crash_during_get_ops starts")
        self.change_swap_space(self.cluster.nodes_in_cluster)

        tasks_info = dict()
        upsert_doc_list = self.get_fragmentation_upsert_docs_list()

        for itr in upsert_doc_list:
            self.doc_ops = "update"
            self.update_start = 0
            self.update_end = itr
            self.mutate = -1
            self.generate_docs(doc_ops="update")
            update_task_info = self.loadgen_docs(self.retry_exceptions,
                                                 self.ignore_exceptions,
                                                 suppress_error_table=True,
                                                 _sync=False)

            #update_task_info = self.loadgen_docs(
            #    self.retry_exceptions,
            #    self.ignore_exceptions,
            #    _sync=False)
            tasks_info.update(update_task_info.items())

        self.doc_ops = "read"
        self.generate_docs(doc_ops="read")
        start = -int(self.num_items - 1)
        end = 1
        reverse_read_gen = self.genrate_docs_basic(start, end)

        count = 0
        while count < self.read_thread_count:
            read_task_info = self.loadgen_docs(self.retry_exceptions,
                                               self.ignore_exceptions,
                                               suppress_error_table=True,
                                               _sync=False)

            tasks_info.update(read_task_info.items())
            count += 1
            if count < self.read_thread_count:
                read_task_info = self.bucket_util._async_validate_docs(
                    self.cluster,
                    reverse_read_gen,
                    "read",
                    0,
                    batch_size=self.batch_size,
                    process_concurrency=self.process_concurrency,
                    timeout_secs=self.sdk_timeout,
                    retry_exceptions=self.retry_exceptions,
                    ignore_exceptions=self.ignore_exceptions,
                    suppress_error_table=False)
                tasks_info.update(read_task_info.items())
                count += 1

        self.crash_th = threading.Thread(target=self.crash,
                                         kwargs={"graceful": self.graceful})
        self.crash_th.start()
        for task in tasks_info:
            self.task_manager.get_task_result(task)

        self.stop_crash = True
        self.crash_th.join()
        self.assertFalse(self.crash_failure,
                         "CRASH | CRITICAL | WARN messages found in cb_logs")
        self.bucket_util._wait_for_stats_all_buckets(self.cluster,
                                                     self.cluster.buckets)

        self.change_swap_space(self.cluster.nodes_in_cluster, disable=False)
        self.validate_seq_itr()
        self.log.info("test_crash_during_get_ops ends")

    def test_crash_during_upserts_using_multithreads(self):
        self.log.info("test_crash_during_upserts_using_multithreads starts")
        self.change_swap_space(self.cluster.nodes_in_cluster)

        tasks_info = dict()
        self.doc_ops = "update"
        self.update_start = 0
        self.update_end = self.num_items

        count = 0
        while count < self.read_thread_count:
            self.generate_docs(doc_ops="update")
            update_task_info = self.loadgen_docs(
                self.retry_exceptions,
                self.ignore_exceptions,
                suppress_error_table=True,
                skip_read_on_error=True,
                _sync=False,
                track_failures=False,
                sdk_retry_strategy=self.sdk_retry_strategy)
            tasks_info.update(update_task_info.items())
            count += 1
            self.sleep(5)

        self.crash_th = threading.Thread(target=self.crash,
                                         kwargs={"graceful": self.graceful})
        self.crash_th.start()
        for task in tasks_info:
            self.task_manager.get_task_result(task)

        self.stop_crash = True
        self.crash_th.join()
        self.assertFalse(self.crash_failure,
                         "CRASH | CRITICAL | WARN messages found in cb_logs")

        self.bucket_util._wait_for_stats_all_buckets(self.cluster,
                                                     self.cluster.buckets)

        self.change_swap_space(self.cluster.nodes_in_cluster, disable=False)
        self.validate_seq_itr()
        self.log.info("test_crash_during_upserts_using_multithreads ends")

    def test_crash_during_multi_updates_of_single_doc(self):

        self.log.info(
            "==test_crash_during_multi_updates_of_single_doc starts==")
        self.change_swap_space(self.cluster.nodes_in_cluster)

        self.client = SDKClient([self.cluster.master],
                                self.cluster.buckets[0],
                                scope=CbServer.default_scope,
                                collection=CbServer.default_collection)

        self.doc_ops = "update"
        self.gen_update = self.genrate_docs_basic(start=0, end=1)
        key, val = self.gen_update.next()

        def upsert_doc(start_num, end_num, key_obj, val_obj):
            for i in range(start_num, end_num):
                val_obj.put("mutated", i)
                self.client.upsert(key_obj, val_obj)

        self.crash_th = threading.Thread(target=self.crash,
                                         kwargs={"graceful": self.graceful})
        self.crash_th.start()

        threads = []
        start = 0
        end = 0
        for _ in range(10):
            start = end
            end += 10
            th = threading.Thread(target=upsert_doc,
                                  args=[start, end, key, val])
            th.start()
            threads.append(th)

        for th in threads:
            th.join()

        self.stop_crash = True
        self.crash_th.join()
        self.assertFalse(self.crash_failure,
                         "CRASH | CRITICAL | WARN messages found in cb_logs")

        self.bucket_util._wait_for_stats_all_buckets(self.cluster,
                                                     self.cluster.buckets)

        success, _ = self.client.get_multi([key], self.wait_timeout)
        self.assertIs(key in success,
                      True,
                      msg="key {} doesn't exist\
                      ".format(key))

        expected_val = Json.loads(val.toString())
        actual_val = Json.loads(success[key]['value'].toString())
        self.assertIs(expected_val == actual_val,
                      True,
                      msg="expected_val-{} != Actual_val-{}\
                      ".format(expected_val, actual_val))

        self.change_swap_space(self.cluster.nodes_in_cluster, disable=False)
        self.validate_seq_itr()
        self.log.info("==test_crash_during_multi_updates_of_single_doc ends==")

    def test_crash_during_val_movement_across_trees(self):

        self.log.info("==test_crash_during_val_movement_across_trees starts==")
        self.change_swap_space(self.cluster.nodes_in_cluster)

        upsert_size = 0
        if self.doc_size < 32:
            upsert_size = 2048

        self.update_start = 0
        self.update_end = self.num_items
        if self.rev_update:
            self.update_start = -int(self.num_items - 1)
            self.update_end = 1
        self.doc_ops = "update"

        self.crash_th = threading.Thread(target=self.crash,
                                         kwargs={"graceful": self.graceful})
        self.crash_th.start()

        count = 0
        while count < self.test_itr:
            self.log.info("Iteration == {}".format(count))

            self.mutate += 1
            self.gen_update = doc_generator(
                self.key,
                self.update_start,
                self.update_end,
                doc_size=upsert_size,
                doc_type=self.doc_type,
                target_vbucket=self.target_vbucket,
                vbuckets=self.cluster.vbuckets,
                key_size=self.key_size,
                mutate=self.mutate,
                randomize_doc_size=self.randomize_doc_size,
                randomize_value=self.randomize_value,
                mix_key_size=self.mix_key_size,
                deep_copy=self.deep_copy)

            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  suppress_error_table=True,
                                  skip_read_on_error=True,
                                  _sync=True,
                                  track_failures=False,
                                  sdk_retry_strategy=self.sdk_retry_strategy)
            self.bucket_util._wait_for_stats_all_buckets(
                self.cluster, self.cluster.buckets)

            self.generate_docs(doc_ops="update")
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  suppress_error_table=True,
                                  skip_read_on_error=True,
                                  _sync=True,
                                  track_failures=False,
                                  sdk_retry_strategy=self.sdk_retry_strategy)
            self.bucket_util._wait_for_stats_all_buckets(
                self.cluster, self.cluster.buckets)

            count += 1
        self.stop_crash = True
        self.crash_th.join()
        self.assertFalse(self.crash_failure,
                         "CRASH | CRITICAL | WARN messages found in cb_logs")

        self.change_swap_space(self.cluster.nodes_in_cluster, disable=False)
        self.validate_seq_itr()
        self.log.info("==test_crash_during_val_movement_across_trees ends==")
Esempio n. 18
0
    def test_stop_process(self):
        """
        1. Starting loading docs into the default bucket
        2. Stop the requested process, which will impact the
           memcached operations
        3. Wait for load bucket task to complete
        4. Validate the docs for durability
        """
        error_to_simulate = self.input.param("simulate_error", None)
        def_bucket = self.bucket_util.buckets[0]
        target_node = self.getTargetNode()
        remote = RemoteMachineShellConnection(target_node)
        error_sim = CouchbaseError(self.log, remote)
        target_vbuckets = self.getVbucketNumbers(remote, def_bucket.name,
                                                 self.target_node)
        if len(target_vbuckets) == 0:
            self.log.error("No target vbucket list generated to load data")
            remote.disconnect()
            return

        # Create doc_generator targeting only the active/replica vbuckets
        # present in the target_node
        gen_load = doc_generator(self.key,
                                 self.num_items,
                                 self.new_docs_to_add,
                                 key_size=self.key_size,
                                 doc_size=self.doc_size,
                                 doc_type=self.doc_type,
                                 target_vbucket=target_vbuckets,
                                 vbuckets=self.cluster_util.vbuckets)

        if self.atomicity:
            task = self.task.async_load_gen_docs_atomicity(
                self.cluster,
                self.bucket_util.buckets,
                gen_load,
                "create",
                exp=0,
                batch_size=10,
                process_concurrency=self.process_concurrency,
                replicate_to=self.replicate_to,
                persist_to=self.persist_to,
                durability=self.durability_level,
                timeout_secs=self.sdk_timeout,
                update_count=self.update_count,
                transaction_timeout=self.transaction_timeout,
                commit=True,
                sync=self.sync)
        else:
            task = self.task.async_load_gen_docs(
                self.cluster,
                def_bucket,
                gen_load,
                "create",
                exp=0,
                batch_size=1,
                process_concurrency=8,
                replicate_to=self.replicate_to,
                persist_to=self.persist_to,
                durability=self.durability_level,
                timeout_secs=self.sdk_timeout,
                skip_read_on_error=True)

        # Induce the error condition
        error_sim.create(error_to_simulate)

        self.sleep(20, "Wait before reverting the error condition")
        # Revert the simulated error condition and close the ssh session
        error_sim.revert(error_to_simulate)
        remote.disconnect()

        # Wait for doc loading task to complete
        self.task.jython_task_manager.get_task_result(task)
        if not self.atomicity:
            if len(task.fail.keys()) != 0:
                if self.target_node == "active" or self.num_replicas in [2, 3]:
                    self.log_failure("Unwanted failures for keys: %s" %
                                     task.fail.keys())

            validate_passed = \
                self.durability_helper.validate_durability_exception(
                    task.fail,
                    SDKException.DurabilityAmbiguousException)
            if not validate_passed:
                self.log_failure("Unwanted exception seen during validation")

            # Create SDK connection for CRUD retries
            sdk_client = SDKClient([self.cluster.master], def_bucket)
            for doc_key, crud_result in task.fail.items():
                result = sdk_client.crud("create",
                                         doc_key,
                                         crud_result["value"],
                                         replicate_to=self.replicate_to,
                                         persist_to=self.persist_to,
                                         durability=self.durability_level,
                                         timeout=self.sdk_timeout)
                if result["status"] is False:
                    self.log_failure("Retry of doc_key %s failed: %s" %
                                     (doc_key, result["error"]))
            # Close the SDK connection
            sdk_client.close()

        # Update self.num_items
        self.num_items += self.new_docs_to_add

        if not self.atomicity:
            # Validate doc count
            self.bucket_util._wait_for_stats_all_buckets()
            self.bucket_util.verify_stats_all_buckets(self.num_items)

        self.validate_test_failure()
Esempio n. 19
0
class MagmaFlushBucketTests(MagmaBaseTest):
    def setUp(self):
        super(MagmaFlushBucketTests, self).setUp()
        self.sdk_timeout = self.input.param("sdk_timeout", 100)
        self.assertTrue(self.rest.update_autofailover_settings(False, 600),
                        "AutoFailover disabling failed")
        self.sigkill = self.input.param("sigkill", False)
        self.multiplier = self.input.param("multiplier", 2)
        self.flush_th = None
        self.shell_conn = list()
        for node in self.cluster.nodes_in_cluster:
            shell = RemoteMachineShellConnection(node)
            self.shell_conn.append(shell)

    def tearDown(self):
        self.stop_flush = True
        if self.flush_th and self.flush_th.is_alive():
            self.flush_th.join()
        super(MagmaFlushBucketTests, self).tearDown()

    def bucket_flush(self, sigkill=False, wait=False):
        self.stop_flush = False
        flush_iteration = 1
        while not self.stop_flush:
            for bucket in self.buckets:
                result = self.bucket_util.flush_bucket(self.cluster, bucket)
                if sigkill and result:
                    for shell in self.shell_conn:
                        shell.kill_memcached()
            if wait:
                for node in self.cluster.nodes_in_cluster:
                    if "kv" in node.services:
                        result = self.bucket_util._wait_warmup_completed(
                                    [node],
                                    self.cluster.buckets[0],
                                    wait_time=self.wait_timeout * 5)
                        if not result:
                            msg = "warm-up couldn't complete in %s seconds" %\
                                (self.wait_timeout * 5)
                            self.log.critical(msg)
                            self.task.jython_task_manager.abort_all_tasks()
                            self.stop_flush = True
            sleep = random.randint(30, 60)
            self.sleep(sleep, "Iteration:{} done,  waiting for {} sec after bucket flush".
                       format(flush_iteration, sleep))
            flush_iteration += 1

    def loadgen_docs_per_bucket(self, bucket,
                     retry_exceptions=[],
                     ignore_exceptions=[],
                     skip_read_on_error=False,
                     suppress_error_table=False,
                     scope=CbServer.default_scope,
                     collection=CbServer.default_collection,
                     _sync=True,
                     track_failures=True,
                     doc_ops=None):
        doc_ops = doc_ops or self.doc_ops

        tasks_info = dict()
        tem_tasks_info = dict()
        read_tasks_info = dict()
        read_task = False

        if self.check_temporary_failure_exception:
            retry_exceptions.append(SDKException.TemporaryFailureException)

        if "update" in doc_ops and self.gen_update is not None:
            task = self.bucket_util.async_load_bucket(
                self.cluster, bucket, self.gen_update,  "update", 0,
                batch_size=self.batch_size,
                process_concurrency=self.process_concurrency,
                persist_to=self.persist_to, replicate_to=self.replicate_to,
                durability=self.durability_level,
                sdk_timeout=self.sdk_timeout, retries=self.sdk_retries,
                skip_read_on_error=skip_read_on_error,
                suppress_error_table=suppress_error_table,
                scope=scope,
                collection=collection,
                monitor_stats=self.monitor_stats,
                track_failures=track_failures)
            tem_tasks_info[task] = self.bucket_util.get_doc_op_info_dict(
                bucket, "update", 0,
                scope=scope,
                collection=collection,
                replicate_to=self.replicate_to,
                persist_to=self.persist_to,
                durability=self.durability_level,
                timeout=self.sdk_timeout, time_unit="seconds",
                ignore_exceptions=ignore_exceptions,
                retry_exceptions=retry_exceptions)
            tasks_info.update(tem_tasks_info.items())
        if "create" in doc_ops and self.gen_create is not None:
            task = self.bucket_util.async_load_bucket(
                self.cluster, bucket, self.gen_create, "create", 0,
                batch_size=self.batch_size,
                process_concurrency=self.process_concurrency,
                persist_to=self.persist_to, replicate_to=self.replicate_to,
                durability=self.durability_level,
                sdk_timeout=self.sdk_timeout, retries=self.sdk_retries,
                skip_read_on_error=skip_read_on_error,
                suppress_error_table=suppress_error_table,
                scope=scope,
                collection=collection,
                monitor_stats=self.monitor_stats,
                track_failures=track_failures)
            tem_tasks_info[task] = self.bucket_util.get_doc_op_info_dict(
                bucket, "create", 0,
                scope=scope,
                collection=collection,
                replicate_to=self.replicate_to,
                persist_to=self.persist_to,
                durability=self.durability_level,
                timeout=self.sdk_timeout, time_unit="seconds",
                ignore_exceptions=ignore_exceptions,
                retry_exceptions=retry_exceptions)
            tasks_info.update(tem_tasks_info.items())
            self.num_items += (self.gen_create.end - self.gen_create.start)
        if "expiry" in doc_ops and self.gen_expiry is not None and self.maxttl:
            task = self.bucket_util.async_load_bucket(
                self.cluster, bucket, self.gen_expiry, "update", self.maxttl,
                batch_size=self.batch_size,
                process_concurrency=self.process_concurrency,
                persist_to=self.persist_to, replicate_to=self.replicate_to,
                durability=self.durability_level,
                sdk_timeout=self.sdk_timeout, retries=self.sdk_retries,
                skip_read_on_error=skip_read_on_error,
                suppress_error_table=suppress_error_table,
                scope=scope,
                collection=collection,
                monitor_stats=self.monitor_stats,
                track_failures=track_failures)
            tem_tasks_info[task] = self.bucket_util.get_doc_op_info_dict(
                bucket, "update", 0,
                scope=scope,
                collection=collection,
                replicate_to=self.replicate_to,
                persist_to=self.persist_to,
                durability=self.durability_level,
                timeout=self.sdk_timeout, time_unit="seconds",
                ignore_exceptions=ignore_exceptions,
                retry_exceptions=retry_exceptions)
            tasks_info.update(tem_tasks_info.items())
            self.num_items -= (self.gen_expiry.end - self.gen_expiry.start)
        if "read" in doc_ops and self.gen_read is not None:
            read_tasks_info = self.bucket_util.async_validate_docs(
               self.cluster, bucket, self.gen_read, "read", 0,
               batch_size=self.batch_size,
               process_concurrency=self.process_concurrency,
               timeout_secs=self.sdk_timeout,
               retry_exceptions=retry_exceptions,
               ignore_exceptions=ignore_exceptions,
               scope=scope,
               collection=collection)
            read_task = True
        if "delete" in doc_ops and self.gen_delete is not None:
            task = self.bucket_util.async_load_bucket(
                self.cluster, bucket, self.gen_delete, "delete", 0,
                batch_size=self.batch_size,
                process_concurrency=self.process_concurrency,
                persist_to=self.persist_to, replicate_to=self.replicate_to,
                durability=self.durability_level,
                sdk_timeout=self.sdk_timeout, retries=self.sdk_retries,
                skip_read_on_error=skip_read_on_error,
                suppress_error_table=suppress_error_table,
                scope=scope,
                collection=collection,
                monitor_stats=self.monitor_stats,
                track_failures=track_failures)
            tem_tasks_info[task] = self.bucket_util.get_doc_op_info_dict(
                bucket, "delete", 0,
                scope=scope,
                collection=collection,
                replicate_to=self.replicate_to,
                persist_to=self.persist_to,
                durability=self.durability_level,
                timeout=self.sdk_timeout, time_unit="seconds",
                ignore_exceptions=ignore_exceptions,
                retry_exceptions=retry_exceptions)
            tasks_info.update(tem_tasks_info.items())
            self.num_items -= (self.gen_delete.end - self.gen_delete.start)

        if _sync:
            for task in tasks_info:
                self.task_manager.get_task_result(task)

            self.bucket_util.verify_doc_op_task_exceptions(tasks_info,
                                                           self.cluster)
            self.bucket_util.log_doc_ops_task_failures(tasks_info)

        if read_task:
            # TODO: Need to converge read_tasks_info into tasks_info before
            #       itself to avoid confusions during _sync=False case
            tasks_info.update(read_tasks_info.items())
            if _sync:
                for task in read_tasks_info:
                    self.task_manager.get_task_result(task)

        return tasks_info

    def compute_docs(self, start, mem_only_items, doc_ops=None):
        doc_ops = doc_ops or self.doc_ops
        ops_len = len(self.doc_ops.split(":"))
        if "create" in self.doc_ops:
            self.create_start = start
            self.create_end = mem_only_items
        if ops_len == 1:
            if "update" in self.doc_ops:
                self.update_start = 0
                self.update_end = mem_only_items
            if "delete" in self.doc_ops:
                self.delete_start = 0
                self.delete_end = mem_only_items
            if "expiry" in self.doc_ops:
                self.expiry_start = 0
                self.expiry_end =  mem_only_items
        elif ops_len == 2:
            self.expiry_start = 0
            self.expiry_end = mem_only_items
            self.delete_start = start // 2
            self.delete_end = mem_only_items
            if "update" in self.doc_ops:
                self.delete_start = 0
                self.delete_end = mem_only_items
                self.update_start = start // 2
                self.update_end = mem_only_items
        else:
            self.expiry_start = 0
            self.expiry_end = mem_only_items
            self.delete_start = start // 3
            self.delete_end = mem_only_items
            self.update_start = (2 * start) // 3
            self.update_end = mem_only_items

    def test_flush_bucket_during_creates(self):

        self.log.info("====test_flush_bucket_during_creates starts====")
        self.create_start = self.init_items_per_collection
        self.create_end = self.init_items_per_collection * self.multiplier
        self.generate_docs(doc_ops=self.doc_ops, target_vbucket=None)

        tasks_info = dict()
        for scope in self.scopes:
            for collection in self.collections:
                task_info = self.loadgen_docs(
                self.retry_exceptions,
                self.ignore_exceptions,
                scope=scope,
                collection=collection,
                suppress_error_table=True,
                skip_read_on_error=True,
                _sync=False,
                doc_ops=self.doc_ops,
                track_failures=False,
                sdk_retry_strategy=SDKConstants.RetryStrategy.FAIL_FAST)
            tasks_info.update(task_info.items())

        self.flush_th = threading.Thread(target=self.bucket_flush,
                                         kwargs=dict(sigkill=self.sigkill))
        self.flush_th.start()
        for task in tasks_info:
            self.task_manager.get_task_result(task)

        self.stop_flush = True
        self.flush_th.join()

    def test_create_fragmentation_before_flush_bucket(self):
        self.log.info("====test_create_fragmentation_before_flush_bucket starts====")
        self.doc_ops = "update"
        self.num_threads = self.input.param("num_threads", 100)
        count = 0

        self.client = SDKClient([self.cluster.master],
                                self.cluster.buckets[0],
                                scope=CbServer.default_scope,
                                collection=CbServer.default_collection)

        self.gen_update = self.genrate_docs_basic(start=0, end=1)

        key, val = self.gen_update.next()

        def upsert_doc(start_num, end_num, key_obj, val_obj):
            print threading.currentThread().getName(), 'Starting'
            for i in range(start_num, end_num):
                val_obj.put("mutated", i)
                self.client.upsert(key_obj, val_obj)
            if threading.currentThread().getName() == 't'+str(self.num_threads):
                self.bucket_util.flush_bucket(self.cluster, self.cluster.buckets[0])

            print threading.currentThread().getName(), 'Exiting'

        while count < self.test_itr:
            self.log.info("Iteration : {}".format(count))
            self.generate_docs(create_start =0, create_end =self.init_items_per_collection,
                               doc_ops="create")
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  doc_ops="create",
                                  _sync=True)
            threads = []
            start = 0
            end = 0
            for i in range(self.num_threads+1):
                start = end
                end += self.init_items_per_collection
                th = threading.Thread(name='t'+str(i),
                                      target=upsert_doc, args=[start, end, key, val])
                th.start()
                threads.append(th)

            for th in threads:
                th.join()

            count +=1

    def test_flush_bucket_during_rollback(self):
        '''
        Test focus: Stopping persistence one by one on all nodes,
                    and trigger roll back on other  nodes,
                    During rollback flush the data
                    Above step will be done num_rollback
                    (variable defined in test) times

        STEPS:
         -- Ensure creation of at least a single state file
         -- Below steps will be repeated on all nodes, with stopping peristence on one at a time
         -- Stop persistence on node x
         -- Start load on node x for a given duration(self.duration * 60 seconds)
         -- Above step ensures creation of new state files (# equal to self.duration)
         -- Kill MemCached on Node x
         -- Trigger roll back on other/replica nodes
         -- ReStart persistence on Node -x
         -- Repeat all the above steps for num_rollback times
        '''
        self.assertTrue(self.rest.update_autofailover_settings(False, 600),
                        "AutoFailover disabling failed")
        items = copy.deepcopy(self.init_items_per_collection)
        mem_only_items = self.input.param("rollback_items", 10000)

        ops_len = len(self.doc_ops.split(":"))

        if self.nodes_init < 2 or self.num_replicas < 1:
            self.fail("Not enough nodes/replicas in the cluster/bucket \
            to test rollback")

        self.duration = self.input.param("duration", 2)
        self.num_rollbacks = self.input.param("num_rollbacks", 3)

        #######################################################################
        '''
        STEP - 1, Ensures creation of at least one snapshot

        To ensure at least one snapshot should get created before rollback
        starts, we need to sleep for 60 seconds as per magma design which
        create state file every 60s

        '''
        self.sleep(60, "Ensures creation of at least one snapshot")
        #######################################################################
        '''
        STEP - 2,  Stop persistence on node - x
        '''

        for i in range(1, self.num_rollbacks+1):
            self.log.info("Roll back Iteration == {}".format(i))
            start = items
            for x, node in enumerate(self.cluster.nodes_in_cluster):
                shell = RemoteMachineShellConnection(node)
                cbstats = Cbstats(shell)
                self.target_vbucket = cbstats.vbucket_list(self.cluster.buckets[0].
                                                   name)
                mem_item_count = 0
                # Stopping persistence on Node-x
                self.log.debug("Iteration == {}, Stopping persistence on Node-{}, ip ={}"
                               .format(i, x+1, node))
                Cbepctl(shell).persistence(self.cluster.buckets[0].name, "stop")

                ###############################################################
                '''
                STEP - 3
                  -- Load documents on node  x for  self.duration * 60 seconds
                  -- This step ensures new state files (number equal to self.duration)
                '''
                self.compute_docs(start, mem_only_items)
                self.gen_create = None
                self.gen_update = None
                self.gen_delete = None
                self.gen_expiry = None
                time_end = time.time() + 60 * self.duration
                itr = 0
                while time.time() < time_end:
                    itr += 1
                    time_start = time.time()
                    mem_item_count += mem_only_items * ops_len
                    self.generate_docs(doc_ops=self.doc_ops,
                                       target_vbucket=self.target_vbucket)
                    self.loadgen_docs(_sync=True,
                                      retry_exceptions=self.retry_exceptions)
                    if self.gen_create is not None:
                        self.create_start = self.gen_create.key_counter
                    if self.gen_update is not None:
                        self.update_start = self.gen_update.key_counter
                    if self.gen_delete is not None:
                        self.delete_start = self.gen_delete.key_counter
                    if self.gen_expiry is not None:
                        self.expiry_start = self.gen_expiry.key_counter

                    if time.time() < time_start + 60:
                        self.log.info("Rollback Iteration== {}, itr== {}, Active-Node=={}, Node=={}".format(i, itr, x+1, node))
                        self.sleep(time_start + 60 - time.time(),
                                   "Sleep to ensure creation of state files for roll back")
                        self.log.info("state files == {}".format(
                                     self.get_state_files(self.buckets[0])))
                ep_queue_size_map = {node:
                                     mem_item_count}
                if self.durability_level:
                    self.log.info("updating the num_items on disk check to double due to durability")
                    ep_queue_size_map = {node:
                                     mem_item_count * 2}
                vb_replica_queue_size_map = {node: 0}

                for nod in self.cluster.nodes_in_cluster:
                    if nod != node:
                        ep_queue_size_map.update({nod: 0})
                        vb_replica_queue_size_map.update({nod: 0})

                for bucket in self.cluster.buckets:
                    self.bucket_util._wait_for_stat(bucket, ep_queue_size_map,
                                                    timeout=1200)
                    self.bucket_util._wait_for_stat(bucket, vb_replica_queue_size_map,
                                                    cbstat_cmd="all",
                                                    stat_name="vb_replica_queue_size",
                                                    timeout=1200)
                # replica vBuckets
                for bucket in self.cluster.buckets:
                    self.log.debug(cbstats.failover_stats(bucket.name))

                ###############################################################
                '''
                STEP - 4
                  -- Kill Memcached on Node - x and trigger rollback on other nodes
                  -- After 20 seconds , flush bucket
                '''

                shell.kill_memcached()
                self.sleep(20, "sleep after killing memcached")
                self.bucket_util.flush_bucket(self.cluster, self.cluster.buckets[0])
                ###############################################################
                '''
                STEP -5
                 -- Restarting persistence on Node -- x
                '''
                self.assertTrue(self.bucket_util._wait_warmup_completed(
                    [self.cluster.master],
                    self.cluster.buckets[0],
                    wait_time=self.wait_timeout * 10))

                self.log.debug("Iteration=={}, Re-Starting persistence on Node -- {}".format(i, node))
                Cbepctl(shell).persistence(self.cluster.buckets[0].name, "start")

                self.sleep(5, "Sleep after re-starting persistence, Iteration{}".format(i))
                shell.disconnect()
                ###################################################################
                '''
                STEP - 6
                  -- Load Docs on all the nodes
                  -- Loading of doc for 60 seconds
                  -- Ensures creation of new state file
                '''
                self.create_start = 0
                self.create_end = self.init_items_per_collection
                self.generate_docs(doc_ops="create", target_vbucket=None)
                self.loadgen_docs(self.retry_exceptions,
                              self.ignore_exceptions, _sync=True,
                              doc_ops="create")
                self.bucket_util._wait_for_stats_all_buckets(self.cluster,
                                                             self.cluster.buckets,
                                                             timeout=1200)

    def test_flush_bucket_during_create_delete_kvstores(self):
        """
        Test Focus: Create and Delete bucket multiple times.

                    Since buckets are already created in magma base
                    we'll start by deleting the buckets, then will recreate

        STEPS:
             -- Doc ops on bucket which we'll not be deleting(optional)
             -- Delete already exisiting buckets
             -- Recreate new buckets
             -- Doc ops on buckets
             -- Repaeat all the above steps
        """
        self.log.info("=====test_create_delete_bucket_n_times starts=====")
        count = 0
        self.num_delete_buckets = self.input.param("num_delete_buckets", 1)
        '''
        Sorting bucket list
        '''
        bucket_lst = []
        for bucket in self.cluster.buckets:
            bucket_lst.append((bucket, bucket.name))
        bucket_lst = sorted(bucket_lst, key = lambda x : x[-1])
        self.log.debug ("bucket list is {} ".format(bucket_lst))
        bucket_ram_quota = bucket_lst[0][0].ramQuotaMB
        self.log.debug("ram_quota is {}".format(bucket_ram_quota))

        scope_name = CbServer.default_scope

        while count < self.test_itr:
            self.log.info("Iteration=={}".format(count+1))
            start = self.init_items_per_collection
            '''
            Step 1
              -- Doc loading to buckets, which will not be getting deleted
            '''
            self.compute_docs(start, start//2)
            self.generate_docs(doc_ops=self.doc_ops, target_vbucket=None)
            tasks_info = dict()
            for bucket, _ in bucket_lst[self.num_delete_buckets:]:
                self.log.debug("Iteration=={}, Bucket=={}".format(count+1, bucket.name))
                for collection in self.collections:
                    tem_tasks_info = self.loadgen_docs_per_bucket(bucket, self.retry_exceptions,
                                                                      self.ignore_exceptions,
                                                                      scope=scope_name,
                                                                      collection=collection,
                                                                      _sync=False,
                                                                      doc_ops=self.doc_ops)
                    tasks_info.update(tem_tasks_info.items())
            '''
            Step 2
             -- Start flushing the buckets
             -- Deletion of buckets
            '''
            self.flush_th = threading.Thread(target=self.bucket_flush,
                                         kwargs=dict(sigkill=self.sigkill))
            self.flush_th.start()

            for bucket, _ in bucket_lst[:self.num_delete_buckets]:
                self.log.info("Iteration=={}, Deleting bucket=={}".format(count+1, bucket.name))
                self.bucket_util.delete_bucket(self.cluster, bucket)
                self.sleep(30, "waiting for 30 seconds after deletion of bucket")

            for task in tasks_info:
                self.task_manager.get_task_result(task)

            self.stop_flush = True
            self.flush_th.join()

            '''
            Step 4
            -- Loading to buckets which are not deleted
            -- Bucket recreation steps
            '''
            self.log.debug("Doc loading after flush")
            start = 0
            self.compute_docs(start, self.init_items_per_collection, doc_ops="create")
            self.generate_docs(doc_ops="create", target_vbucket=None)
            tasks_info = dict()
            for bucket, _ in bucket_lst[self.num_delete_buckets:]:
                self.log.debug("Loading after flush, Iteration=={}, Bucket=={}".
                               format(count+1, bucket.name))
                for collection in self.collections:
                    tem_tasks_info = self.loadgen_docs_per_bucket(bucket, self.retry_exceptions,
                                                                      self.ignore_exceptions,
                                                                      scope=scope_name,
                                                                      collection=collection,
                                                                      _sync=False,
                                                                      doc_ops="create")
                    tasks_info.update(tem_tasks_info.items())
            self.sleep(30, "Ensuring doc loading for 30 seconds")

            self.flush_th = threading.Thread(target=self.bucket_flush,
                                         kwargs=dict(sigkill=self.sigkill))
            self.flush_th.start()
            buckets_created = self.bucket_util.create_multiple_buckets(
                self.cluster, self.num_replicas,
                bucket_count=self.num_delete_buckets,
                bucket_type=self.bucket_type,
                storage={"couchstore": 0,
                         "magma": self.num_delete_buckets},
                eviction_policy=self.bucket_eviction_policy,
                ram_quota=bucket_ram_quota,
                bucket_name=self.bucket_name)

            self.stop_flush = True
            self.flush_th.join()

            if not buckets_created:
                buckets_created = self.bucket_util.create_multiple_buckets(
                    self.cluster, self.num_replicas,
                    bucket_count=self.num_delete_buckets,
                    bucket_type=self.bucket_type,
                    storage={"couchstore": 0,
                             "magma": self.num_delete_buckets},
                    eviction_policy=self.bucket_eviction_policy,
                    ram_quota=bucket_ram_quota,
                    bucket_name=self.bucket_name,
                    flush_enabled=1)

            self.assertTrue(buckets_created, "Unable to create multiple buckets after bucket deletion")

            for bucket in self.cluster.buckets:
                ready = self.bucket_util.wait_for_memcached(
                    self.cluster.master,
                    bucket)
                self.assertTrue(ready, msg="Wait_for_memcached failed")

            task_info = dict()
            bucket_lst = []
            for bucket in self.cluster.buckets:
                bucket_lst.append((bucket, bucket.name))
                bucket_lst = sorted(bucket_lst, key = lambda x : x[-1])
            self.log.debug("Iteration=={}, Bucket list after recreation of bucket =={} ".
                           format(count+1, bucket_lst))

            '''
            Step 5
            -- Doc loading in all buckets
            '''
            self.generate_docs(create_end=self.init_items_per_collection,
                               create_start=0,
                               doc_ops="create",
                               target_vbucket=None)
            for bucket, _ in bucket_lst:
                self.log.info("Iteration=={}, doc loading  to bucket=={}".format(count+1, bucket.name))
                for collection in self.collections:
                    tem_task_info = self.loadgen_docs_per_bucket(bucket, self.retry_exceptions,
                                                                 self.ignore_exceptions,
                                                                 scope=scope_name,
                                                                 collection=collection,
                                                                 _sync=False,
                                                                 doc_ops="create")
                    task_info.update(tem_task_info.items())

            for task in task_info:
                self.task_manager.get_task_result(task)
            count += 1

    def test_flush_bucket_during_data_persistence(self):
        self.assertTrue(self.rest.update_autofailover_settings(False, 600),
                        "AutoFailover disabling failed")
        count = 0
        start = copy.deepcopy(self.init_items_per_collection)
        while count < self.test_itr:
            self.log.info("Iteration {}".format(count+1))
            self.compute_docs(start, start)
            for shell in self.shell_conn:
                for bucket in self.cluster.buckets:
                    Cbepctl(shell).persistence(bucket.name, "stop")
            self.generate_docs()
            tasks_info = dict()
            for scope in self.scopes:
                for collection in self.collections:
                    task_info = self.loadgen_docs(
                        self.retry_exceptions,
                        self.ignore_exceptions,
                        scope=scope,
                        collection=collection,
                        suppress_error_table=True,
                        skip_read_on_error=True,
                        _sync=False,
                        doc_ops=self.doc_ops,
                        track_failures=False,
                        sdk_retry_strategy=SDKConstants.RetryStrategy.FAIL_FAST)
                    tasks_info.update(task_info.items())
            for task in tasks_info:
                self.task_manager.get_task_result(task)
            for shell in self.shell_conn:
                for bucket in self.cluster.buckets:
                    Cbepctl(shell).persistence(bucket.name, "start")
            self.sleep(10, "sleep before flush thread")
            for bucket in self.buckets:
                self.bucket_util.flush_bucket(self.cluster, bucket)
            count += 1
Esempio n. 20
0
    def test_update_single_doc_n_times(self):
        """
        Test Focus: Update single/same doc n times

          Note: MultiThreading is used to update
               single doc, since we are not worried
               about what should be the final mutate
               value of the document semaphores have
               been avoided. MultiThreading also speed up
               the execution of test
        """
        self.log.info("test_update_single_doc_n_times starts")
        self.doc_ops = "update"

        self.client = SDKClient([self.cluster.master],
                                self.bucket_util.buckets[0],
                                scope=CbServer.default_scope,
                                collection=CbServer.default_collection)

        self.gen_update = self.genrate_docs_basic(start=0, end=1)

        key, val = self.gen_update.next()
        for node in self.cluster.nodes_in_cluster:
            shell = RemoteMachineShellConnection(node)
            shell.restart_couchbase()
            shell.disconnect()
            self.assertTrue(
                self.bucket_util._wait_warmup_completed(
                    [self.cluster_util.cluster.master],
                    self.bucket_util.buckets[0],
                    wait_time=self.wait_timeout * 10))

        def upsert_doc(start_num, end_num, key_obj, val_obj):
            for i in range(start_num, end_num):
                val_obj.put("mutated", i)
                self.client.upsert(key_obj, val_obj)

        threads = []
        start = 0
        end = 0
        for _ in range(10):
            start = end
            end += 100000
            th = threading.Thread(
                target=upsert_doc, args=[start, end, key, val])
            th.start()
            threads.append(th)

        for th in threads:
            th.join()

        self.bucket_util._wait_for_stats_all_buckets(timeout=3600)

        # Space amplification check
        msg_stats = "Fragmentation value for {} stats exceeds\
        the configured value"
        _result = self.check_fragmentation_using_magma_stats(
            self.buckets[0],
            self.cluster.nodes_in_cluster)
        self.assertIs(_result, True,
                      msg_stats.format("magma"))

        _r = self.check_fragmentation_using_bucket_stats(
            self.buckets[0], self.cluster.nodes_in_cluster)
        self.assertIs(_r, True,
                      msg_stats.format("KV"))

        disk_usage = self.get_disk_usage(
            self.buckets[0],
            self.cluster.nodes_in_cluster)
        self.log.debug("Disk usage after updates {}".format(
            disk_usage))
        _res = disk_usage[0]
        msg = "Disk Usage = {}MB exceeds 2.2 times \
        from Actual disk usage = {}MB"
        self.assertIs(
            _res > 2.2 * self.disk_usage[
                self.disk_usage.keys()[0]],
            False,
            msg.format(_res, self.disk_usage[self.disk_usage.keys()[0]]))
        # Space amplification check ends

        success, fail = self.client.get_multi([key],
                                              self.wait_timeout)

        self.assertIs(key in success, True,
                      msg="key {} doesn't exist\
                      ".format(key))
        actual_val = dict()
        expected_val = Json.loads(val.toString())
        actual_val = Json.loads(success[key][
            'value'].toString())
        self.log.debug("Expected_val= {} and actual_val = {}\
        ".format(expected_val, actual_val))
        self.assertIs(expected_val == actual_val, True,
                      msg="expected_val-{} != Actual_val-{}\
                      ".format(expected_val, actual_val))

        self.change_swap_space(self.cluster.nodes_in_cluster,
                               disable=False)
        self.log.info("====test_update_single_doc_n_times ends====")
Esempio n. 21
0
class BasicUpsertTests(BasicCrudTests):
    def test_update_n_times(self):
        """
        Test Focus: Update items n times and
                    test space amplification
        STEPS:
          -- Update items n times (where n gets calculated
             from fragmentation value
          -- Check space amplification
          -- Repeat the above steps n times
          -- After all iterations validate the data
        """

        self.log.info("test_update_n_times starts")
        upsert_doc_list = self.get_fragmentation_upsert_docs_list()
        self.mutate = 0
        count = 0

        while count < self.test_itr:
            self.log.info("Iteration == {}".format(count+1))
            #######################################################################
            '''
            STEP - 1, Update Items

            '''
            for itr in upsert_doc_list:
                self.doc_ops = "update"
                self.update_start = 0
                self.update_end = itr

                if self.rev_update:
                    self.update_start = -int(itr - 1)
                    self.update_end = 1

                self.generate_docs(doc_ops="update")
                _ = self.loadgen_docs(self.retry_exceptions,
                                      self.ignore_exceptions,
                                      _sync=True)
                self.log.info("Waiting for ep-queues to get drained")
                self.bucket_util._wait_for_stats_all_buckets(timeout=3600)

            #######################################################################
            '''
            STEP - 2, Space Amplification Check

            '''
            msg = "Fragmentation value for {} stats exceeds\
            the configured value"

            _result = self.check_fragmentation_using_magma_stats(
                self.buckets[0],
                self.cluster.nodes_in_cluster)
            self.assertIs(_result, True,
                          msg.format("magma"))

            _r = self.check_fragmentation_using_bucket_stats(
                self.buckets[0], self.cluster.nodes_in_cluster)
            self.assertIs(_r, True,
                          msg.format("KV"))

            usage_factor = ((float(
                    self.num_items + sum(upsert_doc_list)
                    ) / self.num_items) + 0.5)
            self.log.debug("Disk usage factor = {}".format(usage_factor))

            time_end = time.time() + 60 * 2
            while time.time() < time_end:
                disk_usage = self.get_disk_usage(self.buckets[0],
                                            self.cluster.nodes_in_cluster)
                _res = disk_usage[0]
                self.log.debug("usage at time {} is {}".format((time_end - time.time()), _res))
                if _res < usage_factor * self.disk_usage[self.disk_usage.keys()[0]]:
                    break

            msg = "Iteration= {}, Disk Usage = {}MB\
            exceeds {} times from Actual disk usage = {}MB"
            self.assertIs(_res > usage_factor * self.disk_usage[
                self.disk_usage.keys()[0]],
                False, msg.format(count+1, _res, usage_factor,
                                  self.disk_usage[self.disk_usage.keys()[0]]))

            count += 1
        #######################################################################
        '''
        STEP - 3, Data Validation

        '''
        self.validate_data("update", self.gen_update)

        self.change_swap_space(self.cluster.nodes_in_cluster,
                               disable=False)
        self.log.info("====test_update_n_times ends====")

    def test_multi_update_delete(self):
        """
        STEPS:
          -- Update items x times
          -- Check space amplification
          -- Delete half of the items
          -- Check space Amplification
          -- Recreate deleted items
          -- Check Space Amplification
          -- Repeat above steps for n times
          -- After all iterations validate the data
        """
        self.log.info("==== test_multi_update_delete starts =====")

        count = 0
        msg_stats = "Fragmentation value for {} stats exceeds\
        the configured value"
        msg = "{} Iteration= {}, Disk Usage = {}MB\
         exceeds 2.5 times from Actual disk usage = {}MB"

        self.mutate = 0
        for i in range(self.test_itr):
            self.log.info("Step 1, Iteration= {}".format(i+1))
            #######################################################################
            '''
            STEP - 1, Update Items, update_itr times

            '''
            while count < self.update_itr:
                self.doc_ops = "update"
                self.update_start = 0
                self.update_end = self.num_items
                if self.rev_update:
                    self.update_start = -int(self.num_items - 1)
                    self.update_end = 1

                self.generate_docs(doc_ops="update")
                _ = self.loadgen_docs(self.retry_exceptions,
                                      self.ignore_exceptions,
                                      _sync=True)

                self.log.info("Waiting for ep-queues to get drained")
                self.bucket_util._wait_for_stats_all_buckets(timeout=3600)

                ###################################################################
                '''
                  STEP - 2
                   -- Space Amplification check after each update iteration.
                   -- Data validation only for last update iteration
                '''
                self.log.info("Step 2, Iteration= {}".format(i+1))
                _result = self.check_fragmentation_using_magma_stats(
                    self.buckets[0],
                    self.cluster.nodes_in_cluster)
                self.assertIs(_result, True,
                              msg_stats.format("magma"))

                _r = self.check_fragmentation_using_bucket_stats(
                    self.buckets[0], self.cluster.nodes_in_cluster)
                self.assertIs(_r, True,
                              msg_stats.format("KV"))
                time_end = time.time() + 60 * 2
                while time.time() < time_end:
                    disk_usage = self.get_disk_usage(self.buckets[0],
                                                     self.cluster.nodes_in_cluster)
                    _res = disk_usage[0]
                    self.log.info("Update Iteration-{}, Disk Usage at time {} is {}MB \
                    ".format(count+1, time_end - time.time(), _res))
                    if _res < 2.5 * self.disk_usage[self.disk_usage.keys()[0]]:
                        break

                self.assertIs(
                    _res > 2.5 * self.disk_usage[self.disk_usage.keys()[0]],
                    False, msg.format("update", count+1, _res,
                                      self.disk_usage[self.disk_usage.keys()[0]]))

                count += 1
            self.update_itr += self.update_itr

            if i+1 == self.test_itr:
                self.validate_data("update", self.gen_update)
            ###################################################################
            '''
            STEP - 3
              -- Delete half of the docs.
            '''

            self.log.debug("Step 3, Iteration {}".format(i+1))
            self.doc_ops = "delete"

            self.delete_start = 0
            self.delete_end = self.num_items//2
            if self.rev_del:
                self.delete_start = -int(self.num_items//2 - 1)
                self.delete_end = 1

            self.generate_docs(doc_ops="delete")
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  _sync=True)

            self.bucket_util._wait_for_stats_all_buckets(timeout=3600)
            self.bucket_util.verify_stats_all_buckets(self.num_items)

            ###################################################################
            '''
            STEP - 4
              -- Space Amplification Check after deletion.
            '''
            self.log.debug("Step 4, Iteration {}".format(i+1))
            _result = self.check_fragmentation_using_magma_stats(
                self.buckets[0],
                self.cluster.nodes_in_cluster)
            self.assertIs(_result, True,
                          msg_stats.format("magma"))

            _r = self.check_fragmentation_using_bucket_stats(
                 self.buckets[0], self.cluster.nodes_in_cluster)
            self.assertIs(_r, True,
                          msg_stats.format("KV"))

            disk_usage = self.get_disk_usage(self.buckets[0],
                                             self.cluster.nodes_in_cluster)
            _res = disk_usage[0]
            self.log.info("Delete Iteration {}, Disk Usage- {}MB\
            ".format(i+1, _res))
            self.assertIs(
                _res > 2.5 * self.disk_usage[
                    self.disk_usage.keys()[0]],
                False, msg.format(
                    "delete", i+1, _res,
                    self.disk_usage[self.disk_usage.keys()[0]]))

            ###################################################################
            '''
            STEP - 5
              -- ReCreation of docs.
            '''
            self.log.debug("Step 5, Iteration= {}".format(i+1))

            self.gen_create = copy.deepcopy(self.gen_delete)
            self.doc_ops = "create"

            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  _sync=True)

            self.bucket_util._wait_for_stats_all_buckets(timeout=3600)
            self.bucket_util.verify_stats_all_buckets(self.num_items)

            ###################################################################
            '''
            STEP - 6
              -- Space Amplification Check after Recreation.
            '''
            self.log.debug("Step 6, Iteration= {}".format(i+1))

            _result = self.check_fragmentation_using_magma_stats(
                self.buckets[0],
                self.cluster.nodes_in_cluster)
            self.assertIs(_result, True,
                          msg_stats.format("magma"))

            _r = self.check_fragmentation_using_bucket_stats(
                self.buckets[0], self.cluster.nodes_in_cluster)
            self.assertIs(_r, True,
                          msg_stats.format("KV"))

            disk_usage = self.get_disk_usage(self.buckets[0],
                                             self.cluster.nodes_in_cluster)
            _res = disk_usage[0]
            self.log.info("Create Iteration{}, Disk Usage= {}MB \
            ".format(i+1, _res))
            self.assertIs(_res > 2.5 * self.disk_usage[
                self.disk_usage.keys()[0]],
                False, msg.format("Create", _res, i+1,
                                  self.disk_usage[self.disk_usage.keys()[0]]))

        ###################################################################
        '''
        STEP - 7
          -- Validate data
           -- Data validation is only for the creates in last iterations.
        '''
        self.log.debug("Step 7, Iteration= {}".format(i+1))
        self.validate_data("create", self.gen_create)
        self.log.info("====test_multiUpdate_delete ends====")

    def test_update_rev_update(self):
        """
        STEPS:
          -- Update num_items // 2 items.
          -- Reverse update remaining num_items // 2 items.
          -- If next.half is false skip above step
          -- And reverse update items in first point
          -- Check space amplification
          -- Repeat above steps x times
          -- Delete all the items
          -- Check space Amplification
          -- Recreate deleted items
          -- Check Space Amplification
          -- Repeat above steps for n times
          -- After all iterations validate the data
        """
        self.log.info("==== test_update_rev_update starts =====")

        msg_stats = "Fragmentation value for {} stats exceeds\
        the configured value"
        msg = "{} Iteration= {}, Disk Usage = {}MB\
        exceeds {} times from Actual disk usage = {}MB"

        count = 0
        mutated = 1
        for i in range(self.test_itr):
            self.log.debug("Step 1, Iteration= {}".format(i+1))
            #######################################################################
            '''
            STEP - 1, Update Items, update_itr times
              -- Update n // 2 items
              -- If self.next_half is true
              -- Update remaining n//2 items
              -- Else, again update items in
                reverse order in first point
            '''
            while count < self.update_itr:
                tasks_info = dict()
                self.doc_ops = "update"
                self.gen_update = self.genrate_docs_basic(0, self.num_items //2,
                                                          mutate=mutated)
                tem_tasks_info = self.loadgen_docs(self.retry_exceptions,
                                                   self.ignore_exceptions,
                                                   _sync=False)
                tasks_info.update(tem_tasks_info.items())
                if self.next_half:
                    start = - (self.num_items - 1)
                    end = - (self.num_items // 2 - 1)
                    self.gen_update = self.genrate_docs_basic(start, end,
                                                              mutate=mutated)
                    tem_tasks_info = self.loadgen_docs(self.retry_exceptions,
                                                       self.ignore_exceptions,
                                                       _sync=False)
                    tasks_info.update(tem_tasks_info.items())

                for task in tasks_info:
                    self.task_manager.get_task_result(task)
                self.bucket_util.verify_doc_op_task_exceptions(
                    tasks_info, self.cluster)
                self.bucket_util.log_doc_ops_task_failures(tasks_info)
                mutated += 1

                if not self.next_half:
                    start = - (self.num_items - 1)
                    end = - (self.num_items // 2 - 1)
                    self.gen_update = self.genrate_docs_basic(start, end,
                                                              mutate=mutated)
                    _ = self.loadgen_docs(self.retry_exceptions,
                                                       self.ignore_exceptions,
                                                       _sync=True)
                    mutated += 1

                self.log.info("Waiting for ep-queues to get drained")
                self.bucket_util._wait_for_stats_all_buckets(timeout=3600)
                ###################################################################
                '''
                STEP - 2
                  -- Space Amplification check after each update iteration.
                '''
                self.log.debug("Step 2, Iteration= {}".format(i+1))
                _result = self.check_fragmentation_using_magma_stats(
                    self.buckets[0],
                    self.cluster.nodes_in_cluster)
                self.assertIs(_result, True,
                              msg_stats.format("magma"))

                _r = self.check_fragmentation_using_bucket_stats(
                    self.buckets[0], self.cluster.nodes_in_cluster)
                self.assertIs(_r, True,
                              msg_stats.format("KV"))

                disk_usage = self.get_disk_usage(
                    self.buckets[0], self.cluster.nodes_in_cluster)
                _res = disk_usage[0]
                self.log.info("Update Iteration- {}, Disk Usage- {}MB\
                ".format(count+1, _res))
                self.assertIs(
                    _res > 2.5 * self.disk_usage[self.disk_usage.keys()[0]],
                    False, msg.format("update", count+1, _res, 2.5,
                                      self.disk_usage[self.disk_usage.keys()[0]]))

                count += 1
            self.update_itr += self.update_itr

            ###################################################################
            '''
            STEP - 3
              -- Delete all the items.
            '''
            self.log.debug("Step 3, Iteration {}".format(i+1))
            self.doc_ops = "delete"
            self.delete_start = 0
            self.delete_end = self.num_items
            if self.rev_del:
                self.delete_start = -int(self.num_items - 1)
                self.delete_end = 1

            self.generate_docs(doc_ops="delete")
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  _sync=True)

            self.bucket_util._wait_for_stats_all_buckets(timeout=3600)
            self.bucket_util.verify_stats_all_buckets(self.num_items)

            ###################################################################
            '''
            STEP - 4
              -- Space Amplification Check after deletion.
            '''
            self.log.debug("Step 4, Iteration {}".format(i+1))
            _result = self.check_fragmentation_using_magma_stats(
                self.buckets[0],
                self.cluster.nodes_in_cluster)
            self.assertIs(_result, True,
                          msg_stats.format("magma"))

            _r = self.check_fragmentation_using_bucket_stats(
                 self.buckets[0], self.cluster.nodes_in_cluster)
            self.assertIs(_r, True,
                          msg_stats.format("KV"))

            disk_usage = self.get_disk_usage(self.buckets[0],
                                             self.cluster.nodes_in_cluster)
            _res = disk_usage[0]
            self.log.info("Delete Iteration {}, Disk Usage- {}MB\
            ".format(i+1, _res))
            self.assertIs(
                _res > 0.5 * self.disk_usage[
                    self.disk_usage.keys()[0]],
                False, msg.format(
                    "delete", i+1, _res, 0.5,
                    self.disk_usage[self.disk_usage.keys()[0]]))
            ###################################################################
            '''
            STEP - 5
              -- ReCreation of docs.
            '''
            self.log.debug("Step 5, Iteration= {}".format(i+1))
            self.gen_create = copy.deepcopy(self.gen_delete)
            self.doc_ops = "create"
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  _sync=True)

            self.bucket_util._wait_for_stats_all_buckets(timeout=3600)
            self.bucket_util.verify_stats_all_buckets(self.num_items)

            ###################################################################
            '''
            STEP - 6
              -- Space Amplification Check after Recreation.
            '''
            self.log.debug("Step 6, Iteration= {}".format(i+1))
            _result = self.check_fragmentation_using_magma_stats(
                self.buckets[0],
                self.cluster.nodes_in_cluster)
            self.assertIs(_result, True,
                          msg_stats.format("magma"))

            _r = self.check_fragmentation_using_bucket_stats(
                self.buckets[0], self.cluster.nodes_in_cluster)
            self.assertIs(_r, True,
                          msg_stats.format("KV"))

            disk_usage = self.get_disk_usage(self.buckets[0],
                                             self.cluster.nodes_in_cluster)
            _res = disk_usage[0]
            self.log.info("Create Iteration{}, Disk Usage= {}MB \
            ".format(i+1, _res))
            self.assertIs(_res > 1.5 * self.disk_usage[
                self.disk_usage.keys()[0]],
                False, msg.format("Create", _res, i+1, 1.5,
                                  self.disk_usage[self.disk_usage.keys()[0]]))

        ###################################################################
        '''
        STEP - 7
          -- Validate data
           -- Data validation is only for the creates in last iterations.
        '''
        self.log.debug("Step 7, Iteration= {}".format(i+1))
        self.validate_data("create", self.gen_create)
        self.log.info("====test_update_rev_update ends====")

    def test_update_single_doc_n_times(self):
        """
        Test Focus: Update single/same doc n times

          Note: MultiThreading is used to update
               single doc, since we are not worried
               about what should be the final mutate
               value of the document semaphores have
               been avoided. MultiThreading also speed up
               the execution of test
        """
        self.log.info("test_update_single_doc_n_times starts")
        self.doc_ops = "update"

        self.client = SDKClient([self.cluster.master],
                                self.bucket_util.buckets[0],
                                scope=CbServer.default_scope,
                                collection=CbServer.default_collection)

        self.gen_update = self.genrate_docs_basic(start=0, end=1)

        key, val = self.gen_update.next()
        for node in self.cluster.nodes_in_cluster:
            shell = RemoteMachineShellConnection(node)
            shell.restart_couchbase()
            shell.disconnect()
            self.assertTrue(
                self.bucket_util._wait_warmup_completed(
                    [self.cluster_util.cluster.master],
                    self.bucket_util.buckets[0],
                    wait_time=self.wait_timeout * 10))

        def upsert_doc(start_num, end_num, key_obj, val_obj):
            for i in range(start_num, end_num):
                val_obj.put("mutated", i)
                self.client.upsert(key_obj, val_obj)

        threads = []
        start = 0
        end = 0
        for _ in range(10):
            start = end
            end += 100000
            th = threading.Thread(
                target=upsert_doc, args=[start, end, key, val])
            th.start()
            threads.append(th)

        for th in threads:
            th.join()

        self.bucket_util._wait_for_stats_all_buckets(timeout=3600)

        # Space amplification check
        msg_stats = "Fragmentation value for {} stats exceeds\
        the configured value"
        _result = self.check_fragmentation_using_magma_stats(
            self.buckets[0],
            self.cluster.nodes_in_cluster)
        self.assertIs(_result, True,
                      msg_stats.format("magma"))

        _r = self.check_fragmentation_using_bucket_stats(
            self.buckets[0], self.cluster.nodes_in_cluster)
        self.assertIs(_r, True,
                      msg_stats.format("KV"))

        disk_usage = self.get_disk_usage(
            self.buckets[0],
            self.cluster.nodes_in_cluster)
        self.log.debug("Disk usage after updates {}".format(
            disk_usage))
        _res = disk_usage[0]
        msg = "Disk Usage = {}MB exceeds 2.2 times \
        from Actual disk usage = {}MB"
        self.assertIs(
            _res > 2.2 * self.disk_usage[
                self.disk_usage.keys()[0]],
            False,
            msg.format(_res, self.disk_usage[self.disk_usage.keys()[0]]))
        # Space amplification check ends

        success, fail = self.client.get_multi([key],
                                              self.wait_timeout)

        self.assertIs(key in success, True,
                      msg="key {} doesn't exist\
                      ".format(key))
        actual_val = dict()
        expected_val = Json.loads(val.toString())
        actual_val = Json.loads(success[key][
            'value'].toString())
        self.log.debug("Expected_val= {} and actual_val = {}\
        ".format(expected_val, actual_val))
        self.assertIs(expected_val == actual_val, True,
                      msg="expected_val-{} != Actual_val-{}\
                      ".format(expected_val, actual_val))

        self.change_swap_space(self.cluster.nodes_in_cluster,
                               disable=False)
        self.log.info("====test_update_single_doc_n_times ends====")

    def test_move_val_btwn_key_and_seq_trees(self):
        """
        Test Focus: Update items such that values moves
                    Sequence Tree and Key Trees.
        STEPS:
          -- Update items with new size , so that
             items move from sequence tree to key
             tree or vice versa
          -- Do data validation
          -- Again update items with initial size
          -- Check space amplification
          -- Again validate documents
        """
        self.log.info("test_move_val_btwn_key_and_seq_trees starts")
        msg_stats = "Fragmentation value for {} stats exceeds\
        the configured value"
        count = 0
        keyTree, seqTree = (self.get_disk_usage(
                        self.buckets[0],
                        self.cluster.nodes_in_cluster)[2:4])
        self.log.debug("Disk usage after pure creates {}".format((
            self.disk_usage, keyTree, seqTree)))
        initial_doc_size = self.doc_size
        upsert_size = 0
        if self.doc_size < 32:
            upsert_size = 2048

        while count < self.test_itr:
            self.log.info("Update Iteration count == {}".format(count))
            for node in self.cluster.nodes_in_cluster:
                shell = RemoteMachineShellConnection(node)
                shell.kill_memcached()
                shell.disconnect()
                self.assertTrue(self.bucket_util._wait_warmup_completed(
                                [self.cluster_util.cluster.master],
                                self.bucket_util.buckets[0],
                                wait_time=self.wait_timeout * 10))
            #######################################################################
            '''
            STEP - 1, Update items with changed/new size
            '''
            self.log.info("Step 1, Iteration= {}".format(count+1))
            self.doc_ops = "update"
            self.update_start = 0
            self.update_end = self.num_items
            if self.rev_update:
                self.update_start = -int(self.num_items - 1)
                self.update_end = 1
            self.doc_size = upsert_size
            self.generate_docs()
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  _sync=True)
            self.bucket_util._wait_for_stats_all_buckets(timeout=3600)

            if upsert_size > 32:
                seqTree_update = (self.get_disk_usage(
                        self.buckets[0],
                        self.cluster.nodes_in_cluster)[-1])
                self.log.info("For upsert_size > 32 seqIndex usage-{}\
                ".format(seqTree_update))

            #######################################################################
            '''
            STEP - 2, Validate data after initial upsert
            '''
            self.log.info("Step 2, Iteration= {}".format(count+1))
            self.validate_data("update", self.gen_update)

            #######################################################################
            '''
            STEP - 3, Updating items with changed doc size
                     to move between tress
            '''
            self.log.info("Step 3, Iteration= {}".format(count+1))
            self.doc_size = initial_doc_size
            self.generate_docs()
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  _sync=True)
            self.bucket_util._wait_for_stats_all_buckets(timeout=3600)

            #######################################################################
            '''
            STEP - 4, Space Amplification Checks
            '''
            _result = self.check_fragmentation_using_magma_stats(
                self.buckets[0],
                self.cluster.nodes_in_cluster)
            self.assertIs(_result, True,
                          msg_stats.format("magma"))

            _r = self.check_fragmentation_using_bucket_stats(
                 self.buckets[0], self.cluster.nodes_in_cluster)
            self.assertIs(_r, True,
                          msg_stats.format("KV"))

            disk_usage = self.get_disk_usage(
                self.buckets[0], self.cluster.nodes_in_cluster)
            _res = disk_usage[0]
            self.log.info("disk usage after upsert count {} is {}MB \
                ".format(count+1, _res))
            if self.doc_size > 32:
                self.assertIs(
                    _res > 1.5 * self.disk_usage[self.disk_usage.keys()[0]],
                    False, "Disk Usage {} After \
                    update count {} exceeds \
                    Actual disk usage {} by 1.5 \
                    times".format(_res, count+1,
                                  self.disk_usage[self.disk_usage.keys()[0]]))
            else:
                self.assertIs(disk_usage[3] > 0.5 * seqTree_update,
                              False, " Current seqTree usage-{} exceeds by'\n'\
                               0.5 times from the earlier '\n' \
                               seqTree usage (after update) -{} \
                              ".format(disk_usage[3], seqTree_update))

            count += 1

            #######################################################################
            '''
            STEP - 5, Data validation
            '''
            self.log.info("Step 5, Iteration= {}".format(count+1))
            self.validate_data("update", self.gen_update)

            #######################################################################
        self.change_swap_space(self.cluster.nodes_in_cluster, disable=False)
        self.log.info("====test_move_docs_btwn_key_and_seq_trees ends====")

    def test_parallel_create_update(self):
        """
        STEPS:
          -- Create new items and update already
             existing items
          -- Check disk_usage after each Iteration
          -- Data validation for last iteration
        """
        self.log.info("test_parallel_create_update starts")
        count = 0
        init_items = self.num_items
        self.doc_ops = "create:update"
        self.update_start = 0
        self.update_end = self.num_items
        while count < self.test_itr:
            self.log.info("Iteration {}".format(count+1))
            self.create_start = self.num_items
            self.create_end = self.num_items+init_items

            if self.rev_write:
                self.create_start = -int(self.num_items+init_items - 1)
                self.create_end = -int(self.num_items - 1)

            self.generate_docs()
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  _sync=True)
            self.bucket_util._wait_for_stats_all_buckets(timeout=3600)
            self.bucket_util.verify_stats_all_buckets(self.num_items)
            if count == self.test_itr - 1:
                self.validate_data("update", self.gen_update)
            self.update_start = self.num_items
            self.update_end = self.num_items+init_items
            if self.rev_update:
                self.update_start = -int(self.num_items+init_items - 1)
                self.update_end = -int(self.num_items - 1)

            disk_usage = self.get_disk_usage(
                self.buckets[0],
                self.cluster.nodes_in_cluster)
            if self.doc_size <= 32:
                self.assertIs(
                    disk_usage[2] >= disk_usage[3], True,
                    "seqIndex usage = {}MB'\n' \
                    after Iteration {}'\n' \
                    exceeds keyIndex usage={}MB'\n' \
                    ".format(disk_usage[3],
                             count+1,
                             disk_usage[2]))
            self.assertIs(
                disk_usage[0] > 2.2 * (2 * self.disk_usage[
                    self.disk_usage.keys()[0]]),
                False, "Disk Usage {}MB After '\n\'\
                Updates exceeds '\n\'\
                Actual disk usage {}MB by '\n'\
                2.2 times".format(disk_usage[0],
                                  (2 * self.disk_usage[
                                      self.disk_usage.keys()[0]])))
            count += 1
        self.change_swap_space(self.cluster.nodes_in_cluster, disable=False)
        self.log.info("====test_parallel_create_update ends====")
class MagmaExpiryTests(MagmaBaseTest):
    def setUp(self):
        super(MagmaExpiryTests, self).setUp()
        self.gen_delete = None
        self.gen_create = None
        self.gen_update = None
        self.gen_expiry = None
        self.exp_pager_stime = self.input.param("exp_pager_stime", 10)
        self.iterations = self.input.param("iterations", 5)
        self.expiry_perc = self.input.param("expiry_perc", 100)
        self.items = self.num_items

    def load_bucket(self):
        tasks = dict()
        for collection in self.collections:
            self.generate_docs(doc_ops="create", target_vbucket=None)
            tasks.update(self.bucket_util._async_load_all_buckets(
                    self.cluster, self.gen_create, "create", 0,
                    batch_size=self.batch_size,
                    process_concurrency=self.process_concurrency,
                    persist_to=self.persist_to, replicate_to=self.replicate_to,
                    durability=self.durability_level, pause_secs=5,
                    timeout_secs=self.sdk_timeout, retries=self.sdk_retries,
                    retry_exceptions=self.retry_exceptions,
                    ignore_exceptions=self.ignore_exceptions,
                    skip_read_on_error=False,
                    scope=self.scope_name,
                    collection=collection,
                    monitor_stats=self.monitor_stats))
        for task in tasks:
            self.task_manager.get_task_result(task)
        self.bucket_util.verify_doc_op_task_exceptions(
            tasks, self.cluster)
        self.bucket_util.log_doc_ops_task_failures(tasks)
        self.bucket_util._wait_for_stats_all_buckets(timeout=1200)

    def tearDown(self):
        super(MagmaExpiryTests, self).tearDown()

    def test_read_expired_replica(self):
        result = True
        self.gen_create = doc_generator(
            self.key, 0, 10,
            doc_size=20,
            doc_type=self.doc_type,
            key_size=self.key_size)

        tasks_info = self.bucket_util._async_load_all_buckets(
                self.cluster, self.gen_create, "create", exp=10,
                batch_size=10,
                process_concurrency=1,
                persist_to=self.persist_to, replicate_to=self.replicate_to,
                durability=self.durability_level, pause_secs=5,
                timeout_secs=self.sdk_timeout, retries=self.sdk_retries,
                )
        self.task.jython_task_manager.get_task_result(tasks_info.keys()[0])
        self.sleep(20)
        self.client = SDKClient([self.cluster.master],
                                self.bucket_util.buckets[0],
                                scope=CbServer.default_scope,
                                collection=CbServer.default_collection)
        for i in range(10):
            key = (self.key + "-" + str(i).zfill(self.key_size-len(self.key)))
            try:
                getReplicaResult = self.client.collection.getAnyReplica(
                    key, GetAnyReplicaOptions.getAnyReplicaOptions())
                if getReplicaResult:
                    result = False
                    try:
                        self.log.info("Able to retreive: %s" %
                                      {"key": key,
                                       "value": getReplicaResult.contentAsObject(),
                                       "cas": getReplicaResult.cas()})
                    except Exception as e:
                        print str(e)
            except DocumentUnretrievableException as e:
                pass
            if len(self.client.get_from_all_replicas(key)) > 0:
                result = False
        self.client.close()
        self.assertTrue(result, "SDK is able to retrieve expired documents")

    def test_expiry(self):
        '''
        Test Focus: Expire items (for n iterations)
         and verify tombstone count after expiry pagers time
         and meta data purge interval

        Steps:

           --- Expired already created items/create expiry load
           --- Wait for docs to expire
           --- Check for tombstones count
           --- Verify doc count
           --- Set meta data purge age and verify tomb stone count
           --- Repeat above steps n times
        '''
        self.log.info("test_expiry starts")
        self.expiry_start = 0
        self.expiry_end = self.num_items
        self.doc_ops = "expiry"
        for it in range(self.iterations):
            self.log.info("Iteration {}".format(it))
            self.expiry_perc = self.input.param("expiry_perc", 100)

            self.generate_docs(doc_ops="expiry",
                               expiry_start=self.expiry_start,
                               expiry_end=self.expiry_end)
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  _sync=True)
            self.bucket_util._wait_for_stats_all_buckets()

            self.sleep(self.maxttl, "Wait for docs to expire")

            # exp_pager_stime
            self.bucket_util._expiry_pager(self.exp_pager_stime)
            self.sleep(self.exp_pager_stime, "Wait until exp_pager_stime for kv_purger\
             to kickoff")
            self.sleep(self.exp_pager_stime*10, "Wait for KV purger to scan expired docs and add \
            tombstones.")

            # Check for tombstone count in Storage
            ts = self.get_tombstone_count_key(self.cluster.nodes_in_cluster)
            self.log.info("Tombstones after exp_pager_stime: {}".format(ts))
            expected_ts_count = self.items*self.expiry_perc/100*(self.num_replicas+1)*(it+1)
            self.log.info("Iterations - {}, expected_ts_count - {}".format(it, expected_ts_count))
            self.assertEqual(expected_ts_count, ts, "Incorrect tombstone count in storage,\
                              Expected: {}, Found: {}".
                              format(expected_ts_count, ts))

            self.log.info("Verifying doc counts after create doc_ops")
            self.bucket_util.verify_stats_all_buckets(items=0)

            # Metadata Purge Interval
            self.meta_purge_interval = 180
            self.meta_purge_interval_in_days = 180 / 86400.0

            self.set_metadata_purge_interval(
                value=self.meta_purge_interval_in_days, buckets=self.buckets)
            self.sleep(180, "sleeping after setting metadata purge interval using diag/eval")
            self.bucket_util.cbepctl_set_metadata_purge_interval(
                value=self.meta_purge_interval, buckets=self.buckets)
    #         self.bucket_util.set_metadata_purge_interval(str(self.meta_purge_interval),
    #                                                      buckets=self.buckets)
    #         self.sleep(self.meta_purge_interval*60*60*2/0.04, "Wait for Metadata Purge Interval to drop \
    #         tomb-stones from storage")
            self.sleep(self.meta_purge_interval*2, "Wait for Metadata Purge Interval to drop \
            tomb-stones from storage")
            ts = self.get_tombstone_count_key(self.cluster.nodes_in_cluster)
            self.log.info("Tombstones after persistent_metadata_purge_age: {}".format(ts))

            #Check for tombs-tones removed
            self.run_compaction(compaction_iterations=1)
            ts = self.get_tombstone_count_key(self.cluster.nodes_in_cluster)
            self.log.info("Tombstones after bucket compaction: {}".format(ts))
            self.assertTrue(self.vbuckets * (self.num_replicas+1)>=ts,
                             "Incorrect tombstone count in storage,\
                             Expected: {}, Found: {}".format(self.vbuckets * (self.num_replicas+1), ts))

    def test_create_expire_same_items(self):
        '''
        Test Focus: Create and expire n items

        Steps:

           --- Create items == num_items
                    (init_loading will be set to False)
           --- Check Disk Usage after creates
           --- Expire all the items
           --- Check for tombstones count
           --- Check Disk Usage
           --- Repeat above steps n times
        '''
        self.log.info("test_create_expire_same_items starts")
        self.create_start = 0
        self.create_end = self.num_items
        self.expiry_start = 0
        self.expiry_end = self.num_items
        #self.create_perc = 100
        #self.expiry_perc = 100
        for _iter in range(self.iterations):
            self.maxttl = random.randint(5, 20)
            self.log.info("Test Iteration: {}".format(_iter))
            # Create items which are expired
            self.generate_docs(doc_ops="create",
                               create_start=self.create_start,
                               create_end=self.create_end)
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  _sync=True,
                                  doc_ops="create")
            self.bucket_util._wait_for_stats_all_buckets()
            disk_usage = self.get_disk_usage(self.buckets[0],
                                             self.cluster.nodes_in_cluster)

            self.log.info("Disk usage after creates {}".format(disk_usage))
            size_before = disk_usage[0]

            self.generate_docs(doc_ops="expiry",
                               expiry_start=self.expiry_start,
                               expiry_end=self.expiry_end)

            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  _sync=True,
                                  doc_ops="expiry")
            self.bucket_util._wait_for_stats_all_buckets()

            self.sleep(self.maxttl, "Wait for docs to expire")

            # exp_pager_stime
            self.bucket_util._expiry_pager(self.exp_pager_stime)
            self.sleep(self.exp_pager_stime, "Wait until exp_pager_stime for kv_purger\
             to kickoff")
            self.sleep(self.exp_pager_stime*10, "Wait for KV purger to scan expired docs and add \
            tombstones.")

            # Check for tombstone count in Storage
            ts = self.get_tombstone_count_key(self.cluster.nodes_in_cluster)
            self.log.info("Tombstones after exp_pager_stime: {}".format(ts))


            # Space amplification check
            msg_stats = "Fragmentation value for {} stats exceeds\
            the configured value"
            result = self.check_fragmentation_using_magma_stats(self.buckets[0],
                                                                self.cluster.nodes_in_cluster)
            self.assertIs(result, True, msg_stats.format("magma"))

            result = self.check_fragmentation_using_bucket_stats(
                self.buckets[0], self.cluster.nodes_in_cluster)
            self.assertIs(result, True, msg_stats.format("KV"))

            disk_usage = self.get_disk_usage(self.buckets[0],
                                             self.cluster.nodes_in_cluster)
            self.log.info("Disk usage after expiry {}".format(disk_usage))
            size_after = disk_usage[0]

            self.assertTrue(size_after < size_before * 0.8,
                            "Data Size before(%s) and after expiry(%s)"
                            .format(size_before, size_after))
            # Metadata Purge Interva
            self.meta_purge_interval = 180
            self.meta_purge_interval_in_days = 180 / 86400.0
            self.set_metadata_purge_interval(
                value=self.meta_purge_interval_in_days, buckets=self.buckets)
            self.sleep(180, "sleeping after setting metadata purge interval using diag/eval")
            self.bucket_util.cbepctl_set_metadata_purge_interval(
                value=self.meta_purge_interval, buckets=self.buckets)
            self.sleep(self.meta_purge_interval*2, "Wait for Metadata Purge Interval to drop \
            tomb-stones from storage")
            self.run_compaction(compaction_iterations=1)
            ts = self.get_tombstone_count_key(self.cluster.nodes_in_cluster)
            self.log.info("Tombstones after persistent_metadata_purge_age: {}".format(ts))
            self.sleep(60, "wait after compaction")
            disk_usage = self.get_disk_usage(self.buckets[0],
                                             self.cluster.nodes_in_cluster)
            disk_usage_after_compaction = disk_usage[0]
            self.log.info("Iteration--{}, disk usage after compaction--{}".
                           format(_iter, disk_usage[0]))
            self.assertTrue(disk_usage_after_compaction < 500,
                            "Disk size after compaction exceeds 500MB")

    def test_expiry_no_wait_update(self):
        '''
        Test Focus: Upsert and expire n items

        Steps:

           --- Upsert items == num_items
                    (init_loading will be in magma base)
           --- Expire all the load
           -- Wait for docs to expire and conver to tomb stones
           --- Check Disk Usage after expiry
           --- Check for tombstones count
           --- Set meta data purge age and verify tomb stone count
           --- Check Disk Usage
           --- Repeat above steps n times
        '''
        self.log.info(" test_expiry_no_wait_update starts")
        self.update_start = 0
        self.update_end = self.num_items
        self.expiry_start = 0
        self.expiry_end = self.num_items
        self.update_perc = 100
        self.expiry_perc = 100
        for _iter in range(self.iterations):
            self.log.info("Iteration--{}".format(_iter))
            self.generate_docs(doc_ops="update",
                               update_start=self.update_start,
                               update_end=self.update_end)
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  _sync=True,
                                  doc_ops="update")
            self.bucket_util._wait_for_stats_all_buckets()
            disk_usage = self.get_disk_usage(self.buckets[0],
                                             self.cluster.nodes_in_cluster)
            self.log.debug("Disk usage after updates {}".format(disk_usage))
            size_before = disk_usage[0]

            self.generate_docs(doc_ops="expiry",
                               expiry_start=self.expiry_start,
                               expiry_end=self.expiry_end)
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  _sync=True,
                                  doc_ops="expiry")
            self.bucket_util._wait_for_stats_all_buckets()

            self.sleep(self.maxttl, "Wait for docs to expire")

            # exp_pager_stime
            self.bucket_util._expiry_pager(self.exp_pager_stime)
            self.sleep(self.exp_pager_stime, "Wait until exp_pager_stime for kv_purger\
             to kickoff")
            self.sleep(self.exp_pager_stime*10, "Wait for KV purger to scan expired docs and add \
            tombstones.")

            # Check for tombstone count in Storage
            ts = self.get_tombstone_count_key(self.cluster.nodes_in_cluster)
            self.log.info("Tombstones after exp_pager_stime: {}".format(ts))
            expected_ts_count = self.items*self.expiry_perc/100*(self.num_replicas+1)*(_iter+1)
            self.log.info("Expected ts count is {}".format(expected_ts_count))
            self.assertEqual(expected_ts_count, ts, "Incorrect tombstone count in storage,\
                              Expected: {}, Found: {}".
                              format(expected_ts_count, ts))

            self.log.info("Verifying doc counts after create doc_ops")
            self.bucket_util.verify_stats_all_buckets(items=0)
            disk_usage = self.get_disk_usage(self.buckets[0],
                                             self.cluster.nodes_in_cluster)
            self.log.info("Disk usage after expiry {}".format(disk_usage))
            size_after = disk_usage[0]

            self.assertTrue(size_after < self.disk_usage[self.disk_usage.keys()[0]] * 0.6,
                            "Data Size before(%s) and after expiry(%s)"
                            .format(self.disk_usage[self.disk_usage.keys()[0]], size_after))

            # Metadata Purge Interval
            self.meta_purge_interval = 180
            self.meta_purge_interval_in_days = 180 / 86400.0

            self.set_metadata_purge_interval(
                value=self.meta_purge_interval_in_days, buckets=self.buckets)
            self.sleep(180, "sleeping after setting metadata purge interval using diag/eval")
            self.bucket_util.cbepctl_set_metadata_purge_interval(
                value=self.meta_purge_interval, buckets=self.buckets)

            self.sleep(self.meta_purge_interval*2, "Wait for Metadata Purge Interval to drop \
            tomb-stones from storage")

            ts = self.get_tombstone_count_key(self.cluster.nodes_in_cluster)
            self.log.info("Tombstones after persistent_metadata_purge_age: {}".format(ts))

            # Check for tombs-tones removed
            self.run_compaction(compaction_iterations=1)
            ts = self.get_tombstone_count_key(self.cluster.nodes_in_cluster)
            self.log.info("Tombstones after bucket compaction: {}".format(ts))

            disk_usage = self.get_disk_usage(self.buckets[0],
                                             self.cluster.nodes_in_cluster)
            self.log.info("Disk usage after compaction {}".format(disk_usage))
            size_after_compaction = disk_usage[0]
            self.log.info("disk usage after compaction {}".format(size_after_compaction))
            self.sleep(300, "sleeping after test")
            # below assert is only applicable if we expire all the items
            self.assertTrue(size_after_compaction < 500,
                           "size after compaction shouldn't be more than 500")

    def test_docs_expired_wait_for_magma_purge(self):
        pass

    def test_expiry_disk_full(self):
        self.expiry_perc = self.input.param("expiry_perc", 100)
        self.doc_ops = "expiry"
        self.generate_docs(doc_ops="expiry")
        _ = self.loadgen_docs(self.retry_exceptions,
                              self.ignore_exceptions,
                              _sync=True)
        self.bucket_util._wait_for_stats_all_buckets()

        self.sleep(self.maxttl, "Wait for docs to expire")

        def _get_disk_usage_in_MB(remote_client, path):
            disk_info = remote_client.get_disk_info(in_MB=True, path=path)
            disk_space = disk_info[1].split()[-3][:-1]
            return disk_space

        # Fill up the disk
        remote_client = RemoteMachineShellConnection(self.cluster.master)
        du = int(_get_disk_usage_in_MB(remote_client, self.cluster.master.data_path)) - 50
        _file = os.path.join(self.cluster.master.data_path, "full_disk_")
        cmd = "dd if=/dev/zero of={0}{1} bs=1024M count=1"
        while int(du) > 0:
            cmd = cmd.format(_file, str(du) + "MB_" + str(time.time()))
            output, error = remote_client.execute_command(cmd, use_channel=True)
            remote_client.log_command_output(output, error)
            du -= 1024
            if du < 1024:
                cmd = "dd if=/dev/zero of={0}{1} bs=" + str(du) + "M count=1"

        # exp_pager_stime
        self.bucket_util._expiry_pager(self.exp_pager_stime)
        self.sleep(self.exp_pager_stime, "Wait until exp_pager_stime for kv_purger\
         to kickoff")
        self.sleep(self.exp_pager_stime*10, "Wait for KV purger to scan expired docs and add \
        tombstones.")

        # Check for tombstone count in Storage
        ts = self.get_tombstone_count_key(self.cluster.nodes_in_cluster)
        self.log.info("Tombstones after exp_pager_stime: {}".format(ts))

        self.log.info("Verifying doc counts after create doc_ops")
        self.bucket_util.verify_stats_all_buckets(items=0)

        # Metadata Purge Interval
        self.meta_purge_interval = 60
        self.bucket_util.cbepctl_set_metadata_purge_interval(
            value=self.meta_purge_interval, buckets=self.buckets)
        self.sleep(self.meta_purge_interval*2, "Wait for Metadata Purge Interval to drop \
        tomb-stones from storage")

        ts = self.get_tombstone_count_key(self.cluster.nodes_in_cluster)
        self.log.info("Tombstones after persistent_metadata_purge_age: {}".format(ts))

        # free up the disk
        output, error = remote_client.execute_command("rm -rf full_disk*",
                                                      use_channel=True)

        # Wait for expiry pager to insert tombstones again
        self.sleep(self.exp_pager_stime*10, "Wait for KV purger to scan expired docs and add \
        tombstones.")

        # Check for tombstone count in Storage after disk is available
        ts = self.get_tombstone_count_key(self.cluster.nodes_in_cluster)
        self.log.info("Tombstones after exp_pager_stime: {}".format(ts))

        # Metadata Purge Interval after disk space is available
        self.sleep(self.meta_purge_interval*2, "Wait for Metadata Purge Interval to drop \
        tomb-stones from storage")

        ts = self.get_tombstone_count_key(self.cluster.nodes_in_cluster)
        self.log.info("Tombstones after persistent_metadata_purge_age: {}".format(ts))

        # Check for tombs-tones removed
        self.run_compaction()
        ts = self.get_tombstone_count_key(self.cluster.nodes_in_cluster)
        self.log.info("Tombstones after bucket compaction: {}".format(ts))

    def test_random_expiry(self):
        '''
        Test Focus: Expire items (Items will have different ttl value)
         and verify tombstone count after expiry pagers time
         and meta data purge interval

        Steps:

           --- Expired already created items/create expiry load
                  (Items will have different ttl values)
           --- Wait for docs to expire
           --- Check for tombstones count
           --- Verify doc count
           --- Set meta data purge age and verify tomb stone count
           --- Repeat above steps n times
        '''
        self.random_exp = True
        self.doc_ops = "expiry"
        self.expiry_perc = self.input.param("expiry_perc", 100)
        self.generate_docs()

        _ = self.loadgen_docs(self.retry_exceptions,
                              self.ignore_exceptions,
                              _sync=True)
        self.bucket_util._wait_for_stats_all_buckets()

        self.sleep(self.maxttl, "Wait for docs to expire")

        # exp_pager_stime
        self.bucket_util._expiry_pager(self.exp_pager_stime)
        self.sleep(self.exp_pager_stime, "Wait until exp_pager_stime for kv_purger\
         to kickoff")
        self.sleep(self.exp_pager_stime*10, "Wait for KV purger to scan expired docs and add \
        tombstones.")

        # Check for tombstone count in Storage
        ts = self.get_tombstone_count_key(self.cluster.nodes_in_cluster)
        self.log.info("Tombstones after exp_pager_stime: {}".format(ts))
        expected_ts_count = self.items*self.expiry_perc/100*(self.num_replicas+1)
        self.log.info("Expected ts count is {}".format(expected_ts_count))
        self.assertEqual(expected_ts_count, ts, "Incorrect tombstone count in storage,\
                              Expected: {}, Found: {}".
                              format(expected_ts_count, ts))
        self.log.info("Verifying doc counts after create doc_ops")
        self.bucket_util.verify_stats_all_buckets(self.num_items)

        # Metadata Purge Interval
        self.meta_purge_interval = 180
        self.meta_purge_interval_in_days = 180 / 86400.0

        self.set_metadata_purge_interval(
            value=self.meta_purge_interval_in_days, buckets=self.buckets)
        self.sleep(180, "sleeping after setting metadata purge interval using diag/eval")
        self.bucket_util.cbepctl_set_metadata_purge_interval(
            value=self.meta_purge_interval, buckets=self.buckets)

        self.sleep(self.meta_purge_interval*2, "Wait for Metadata Purge Interval to drop \
        tomb-stones from storage")

        ts = self.get_tombstone_count_key(self.cluster.nodes_in_cluster)
        self.log.info("Tombstones after persistent_metadata_purge_age: {}".format(ts))

        # Check for tombs-tones removed
        self.run_compaction(compaction_iterations=1)
        ts = self.get_tombstone_count_key(self.cluster.nodes_in_cluster)
        self.log.info("Tombstones after bucket compaction: {}".format(ts))
        self.assertTrue(self.vbuckets * (self.num_replicas+1)>=ts,
                        "Incorrect tombstone count in storage,\
                        Expected: {}, Found: {}".format(self.vbuckets * (self.num_replicas+1), ts))

    def test_expire_read_validate_meta(self):
        self.expiry_perc = self.input.param("expiry_perc", 100)
        self.doc_ops = "expiry"
        self.bucket_util._expiry_pager(216000)
        self.generate_docs(doc_ops="expiry")
        _ = self.loadgen_docs(self.retry_exceptions,
                              self.ignore_exceptions,
                              _sync=True)
        self.bucket_util._wait_for_stats_all_buckets()

        self.sleep(self.maxttl, "Wait for docs to expire")
        self.sigkill_memcached()
        self.bucket_util._expiry_pager(216000)
        # Read all the docs to ensure they get converted to tombstones
        self.generate_docs(doc_ops="read",
                           read_start=self.expiry_start,
                           read_end=self.expiry_end)
        self.gen_delete = copy.deepcopy(self.gen_read)
        _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  _sync=True,
                                  doc_ops="delete")
        self.sleep(180, "wait after get ops")
        #data_validation = self.task.async_validate_docs(
        #        self.cluster, self.bucket_util.buckets[0],
        #        self.gen_read, "delete", 0,
        #        batch_size=self.batch_size,
        #        process_concurrency=self.process_concurrency,
        #        pause_secs=5, timeout_secs=self.sdk_timeout)
        #self.task.jython_task_manager.get_task_result(data_validation)

        # All docs converted to tomb-stone
        # Check for tombstone count in Storage
        ts = self.get_tombstone_count_key(self.cluster.nodes_in_cluster)
        self.log.info("Tombstones after exp_pager_stime: {}".format(ts))
        expected_ts_count = self.items*self.expiry_perc/100*(self.num_replicas+1)
        self.log.info("Expected ts count is {}".format(expected_ts_count))
        self.assertEqual(expected_ts_count, ts, "Incorrect tombstone count in storage,\
                        Expected: {}, Found: {}".format(expected_ts_count, ts))

    def test_wait_for_expiry_read_repeat(self):
        for _iter in range(self.iterations):
            self.maxttl = random.randint(20, 60)
            self.log.info("Test Iteration: {}".format(_iter))
            self.test_expire_read_validate_meta()

    def test_expiry_full_compaction(self):
        self.doc_ops = "expiry"
        self.generate_docs(doc_ops="expiry")
        _ = self.loadgen_docs(self.retry_exceptions,
                              self.ignore_exceptions,
                              _sync=True)
        self.bucket_util._wait_for_stats_all_buckets()

        self.sleep(self.maxttl, "Wait for docs to expire")

        # exp_pager_stime
        self.bucket_util._expiry_pager(21600)
        self.log.info("Starting compaction for each bucket to add tombstones")
        self.run_compaction(compaction_iterations=1)

        self.sleep(300, "sleep after triggering compaction")
        ts = self.get_tombstone_count_key(self.cluster.nodes_in_cluster)
        self.log.info("Tombstones after compaction: {}".format(ts))
        expected_ts_count = self.items*self.expiry_perc/100*(self.num_replicas+1)
        self.log.info("Expected ts count after compaction is {}".format(expected_ts_count))
        self.assertEqual(expected_ts_count, ts, "Incorrect tombstone count in storage,\
        Expected: {}, Found: {}".format(expected_ts_count, ts))

        # Metadata Purge Interval
        self.meta_purge_interval = 180
        self.meta_purge_interval_in_days = 180 / 86400.0
        self.set_metadata_purge_interval(
            value=self.meta_purge_interval_in_days, buckets=self.buckets)
        self.sleep(180, "sleeping after setting metadata purge interval using diag/eval")
        self.bucket_util.cbepctl_set_metadata_purge_interval(
            value=self.meta_purge_interval, buckets=self.buckets)
        self.sleep(self.meta_purge_interval*2, "Wait for Metadata Purge Interval to drop \
        tomb-stones from storage")

        self.log.info("Starting compaction for each bucket")
        self.run_compaction(compaction_iterations=1)
        self.sleep(300, "sleep after triggering compaction, to drop tombstones")

        # All docs and tomb-stone should be dropped from the storage
        ts = self.get_tombstone_count_key(self.cluster.nodes_in_cluster)
        self.log.info("Tombstones after full compaction: {}".format(ts))
        self.log.info("Expected {}".format(self.vbuckets * (self.num_replicas+1)))

        self.assertTrue(self.vbuckets * (self.num_replicas+1) >= ts,
                        "Incorrect tombstone count in storage,\
                        Expected: {}, Found: {}".format(self.vbuckets * (self.num_replicas+1), ts))

    def test_drop_collection_expired_items(self):
        self.log.info("test_drop_collection_expired_items starts")
        tasks = dict()
        for collection in self.collections[::2]:
            self.generate_docs(doc_ops="expiry")
            task = self.loadgen_docs(scope=self.scope_name,
                                     collection=collection,
                                     _sync=False,
                                     doc_ops="expiry")
            tasks.update(task.items())
        for task in tasks:
            self.task_manager.get_task_result(task)

        self.sleep(self.maxttl, "Wait for docs to expire")

        # Convert to tomb-stones
        tasks_info = dict()
        self.gen_delete = copy.deepcopy(self.gen_expiry)
        for collection in self.collections[::2]:
            temp_tasks_info = self.bucket_util._async_validate_docs(
                self.cluster, self.gen_delete, "delete", 0,
                batch_size=self.batch_size,
                process_concurrency=self.process_concurrency,
                pause_secs=5, timeout_secs=self.sdk_timeout,
                scope=self.scope_name,
                collection=collection,
                retry_exceptions=self.retry_exceptions,
                ignore_exceptions=self.ignore_exceptions)
            tasks_info.update(temp_tasks_info.items())
        for task in tasks_info:
            self.task_manager.get_task_result(task)

        for collection in self.collections[::2]:
            self.bucket_util.drop_collection(self.cluster.master,
                                             self.buckets[0],
                                             scope_name=self.scope_name,
                                             collection_name=collection)
            self.buckets[0].scopes[self.scope_name].collections.pop(collection)
            self.collections.remove(collection)
        self.sleep(180, "sleep after dropping collections")
        ts = self.get_tombstone_count_key(self.cluster.nodes_in_cluster)
        self.log.info("tombstone count is {}".format(ts))
        self.assertEqual(ts, 0, "Tombstones found after collections(expired items) drop.")

    def test_drop_collection_during_tombstone_creation(self):
        scope_name, collections = self.scope_name, self.collections
        self.load_bucket()
        tasks = []
        for collection in collections[::2]:
            self.generate_docs(doc_ops="expiry")
            tasks.append(self.loadgen_docs(scope=scope_name,
                         collection=collection,
                         _sync=False))
        # exp_pager_stime
        self.bucket_util._expiry_pager(self.exp_pager_stime)
        self.sleep(self.exp_pager_stime, "Wait until exp_pager_stime for kv_purger\
         to kickoff")
        for task in tasks:
            self.task_manager.get_task_result(task)
        for collection in collections[::2]:
            self.bucket_util.drop_collection(self.cluster.master,
                                             self.buckets[0],
                                             scope_name=scope_name,
                                             collection_name=collection)
            self.buckets[0].scopes[scope_name].collections.pop(collection)
            collections.remove(collection)

        ts = self.get_tombstone_count_key(self.cluster.nodes_in_cluster)
        self.log.info("Tombstones after full compaction: {}".format(ts))
        self.assertEqual(ts, 0, "Tombstones found after collections(expired items) drop.")

    def test_failover_expired_items_in_vB(self):
        self.maxttl = 120
        self.doc_ops = "expiry"
        self.expiry_perc = self.input.param("expiry_perc", 100)

        shell_conn = RemoteMachineShellConnection(self.cluster.nodes_in_cluster[-1])
        cbstats = Cbstats(shell_conn)
        self.target_vbucket = cbstats.vbucket_list(self.bucket_util.buckets[0].name)

        self.generate_docs(target_vbucket=self.target_vbucket)

        _ = self.loadgen_docs(self.retry_exceptions,
                              self.ignore_exceptions,
                              _sync=True)
        self.bucket_util._wait_for_stats_all_buckets()

        # exp_pager_stime
        self.bucket_util._expiry_pager(self.exp_pager_stime)
        self.sleep(self.exp_pager_stime, "Wait until exp_pager_stime for kv_purger\
         to kickoff")
        self.sleep(self.exp_pager_stime*10, "Wait for KV purger to scan expired docs and add \
        tombstones.")

        self.task.async_failover(self.cluster.nodes_in_cluster,
                                 self.cluster.nodes_in_cluster[-1],
                                 graceful=True)

        self.nodes = self.rest.node_statuses()
        self.task.rebalance(self.cluster.nodes_in_cluster,
                            to_add=[],
                            to_remove=[self.cluster.nodes_in_cluster[-1]])

        # Metadata Purge Interval
        self.meta_purge_interval = 60
        self.bucket_util.cbepctl_set_metadata_purge_interval(
            value=self.meta_purge_interval, buckets=self.buckets)
        self.sleep(self.meta_purge_interval*2, "Wait for Metadata Purge Interval to drop \
        tomb-stones from storage")

        self.log.info("Starting compaction for each bucket")
        self.run_compaction()

        # All docs and tomb-stone should be dropped from the storage
        ts = self.get_tombstone_count_key(self.cluster.nodes_in_cluster)
        self.log.info("Tombstones after full compaction: {}".format(ts))

    def test_random_update_expire_same_docrange(self):
        pass

    def test_expiry_heavy_reads(self):
        pass

    def test_magma_purge_after_kv_purge(self):
        '''Need a way to trigger magma purging internally'''
        pass

    def test_magma_purge_before_kv_purge(self):
        '''Need a way to trigger magma purging internally'''
        pass

    def test_expire_data_key_tree(self):
        self.doc_size = 32
        self.test_expiry()
        seqTree_update = self.get_disk_usage(self.buckets[0],
                                             self.cluster.nodes_in_cluster)[-1]
        self.log.info("For upsert_size > 32 seqIndex usage-{}\
        ".format(seqTree_update))

    def test_random_key_expiry(self):
        self.key = "random_key"
        self.test_random_expiry()

    def test_expire_cold_data(self):
        '''Ensure that data expiring is from level 4'''
        pass

    def test_update_cold_data_with_ttl(self):
        '''Ensure that data expiring is from level 4'''
        pass

    def test_full_compaction_before_metadata_purge_interval(self):
        # if i am doing full compaction and there are expired items present
        # then such items will be converted into tombstones and on next compaction
        # those tombstones will be dropped if they satisfy above condition.
        self.doc_ops = "expiry"
        self.generate_docs(doc_ops="expiry")
        _ = self.loadgen_docs(self.retry_exceptions,
                              self.ignore_exceptions,
                              _sync=True)
        self.bucket_util._wait_for_stats_all_buckets()

        self.sleep(self.maxttl, "Wait for docs to expire")

        self.log.info("Starting compaction for each bucket")
        self.run_compaction()

        # All docs and tomb-stone should be dropped from the storage
        ts = self.get_tombstone_count_key(self.cluster.nodes_in_cluster)
        self.log.info("Tombstones after full compaction: {}".format(ts))

        self.assertTrue(1 >= ts, "Incorrect tombstone count in storage,\
        Expected: {}, Found: {}".format("<=1", ts))

    def test_items_partially_greater_than_purge_interval(self):
        # Only tombstones greater than purge interval will be dropped.
        # remaining expired items should get converted to tombstones
        pass
Esempio n. 23
0
    def test_sub_doc_op_with_bucket_level_durability(self):
        """
        Create Buckets with durability_levels set and perform
        Sub_doc CRUDs from client without durability settings and
        validate the ops to make sure respective durability is honored
        """
        key, value = doc_generator("test_key", 0, 1).next()
        sub_doc_key = "sub_doc_key"
        sub_doc_vals = ["val_1", "val_2", "val_3", "val_4", "val_5"]
        for d_level in self.get_supported_durability_for_bucket():
            # Avoid creating bucket with durability=None
            if d_level == Bucket.DurabilityLevel.NONE:
                continue

            step_desc = "Creating %s bucket with level '%s'" \
                        % (self.bucket_type, d_level)
            verification_dict = self.get_cb_stat_verification_dict()

            self.log.info(step_desc)
            # Object to support performing CRUDs and create Bucket
            bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
            bucket_obj = Bucket(bucket_dict)
            self.bucket_util.create_bucket(self.cluster, bucket_obj,
                                           wait_for_warmup=True)
            self.summary.add_step(step_desc)

            # SDK client to perform sub_doc ops
            client = SDKClient([self.cluster.master], bucket_obj)

            result = client.crud("create", key, value)
            verification_dict["ops_create"] += 1
            verification_dict["sync_write_committed_count"] += 1
            if result["status"] is False:
                self.log_failure("Doc insert failed for key: %s" % key)

            # Perform sub_doc CRUD
            for sub_doc_op in ["subdoc_insert", "subdoc_upsert",
                               "subdoc_replace"]:
                sub_doc_val = choice(sub_doc_vals)
                _, fail = client.crud(sub_doc_op, key,
                                      [sub_doc_key, sub_doc_val])
                if fail:
                    self.log_failure("%s failure. Key %s, sub_doc (%s, %s): %s"
                                     % (sub_doc_op, key,
                                        sub_doc_key, sub_doc_val, result))
                else:
                    verification_dict["ops_update"] += 1
                    verification_dict["sync_write_committed_count"] += 1

                success, fail = client.crud("subdoc_read", key, sub_doc_key)
                if fail or str(success[key]["value"].get(0)) != sub_doc_val:
                    self.log_failure("%s failed. Expected: %s, Actual: %s"
                                     % (sub_doc_op, sub_doc_val,
                                        success[key]["value"].get(0)))
                self.summary.add_step("%s for key %s" % (sub_doc_op, key))

            # Subdoc_delete and verify
            sub_doc_op = "subdoc_delete"
            _, fail = client.crud(sub_doc_op, key, sub_doc_key)
            if fail:
                self.log_failure("%s failure. Key %s, sub_doc (%s, %s): %s"
                                 % (sub_doc_op, key,
                                    sub_doc_key, sub_doc_val, result))
            verification_dict["ops_update"] += 1
            verification_dict["sync_write_committed_count"] += 1

            _, fail = client.crud(sub_doc_op, key, sub_doc_key)
            if SDKException.PathNotFoundException \
                    not in str(fail[key]["error"]):
                self.log_failure("Invalid error after sub_doc_delete")

            self.summary.add_step("%s for key %s" % (sub_doc_op, key))

            # Validate doc_count
            self.bucket_util._wait_for_stats_all_buckets(self.cluster,
                                                         self.cluster.buckets)
            self.bucket_util.verify_stats_all_buckets(self.cluster, 1)

            # Cbstats vbucket-details validation
            self.cb_stat_verify(verification_dict)

            # Close SDK client
            client.close()

            # Delete the bucket on server
            self.bucket_util.delete_bucket(self.cluster, bucket_obj)
            self.summary.add_step("Delete %s bucket" % self.bucket_type)
Esempio n. 24
0
 def __init__(self, master, cbas_node):
     self.server = master
     super(CBASHelper, self).__init__(master, cbas_node)
     SDKClient(self.server).__init__(self.server)
     self.connectionLive = False
Esempio n. 25
0
    def verify_cas(self, ops, generator):
        """
        Verify CAS value manipulation.

        For update we use the latest CAS value return by set()
        to do the mutation again to see if there is any exceptions.
        We should be able to mutate that item with the latest CAS value.
        For delete(), after it is called, we try to mutate that item with the
        cas value returned by delete(). We should see SDK Error.
        Otherwise the test should fail.
        For expire, We want to verify using the latest CAS value of that item
        can not mutate it because it is expired already.
        """

        for bucket in self.bucket_util.buckets:
            client = SDKClient(RestConnection(self.cluster.master), bucket)
            gen = generator
            while gen.has_next():
                key, value = gen.next()
                vb_of_key = self.bucket_util.get_vbucket_num_for_key(key)
                active_node_ip = None
                for node_ip in self.shell_conn.keys():
                    if vb_of_key in self.vb_details[node_ip]["active"]:
                        active_node_ip = node_ip
                        break
                self.log.info("Performing %s on key %s" % (ops, key))
                if ops in ["update", "touch"]:
                    for x in range(self.mutate_times):
                        old_cas = client.crud("read", key, timeout=10)["cas"]
                        # value = {"val": "mysql-new-value-%s" % x}
                        if ops == 'update':
                            result = client.crud(
                                "replace",
                                key,
                                value,
                                durability=self.durability_level,
                                cas=old_cas)
                        else:
                            result = client.touch(
                                key,
                                0,
                                durability=self.durability_level,
                                timeout=self.sdk_timeout)

                        if result["status"] is False:
                            client.close()
                            self.log_failure("Touch / replace with cas failed")
                            return

                        new_cas = result["cas"]
                        if old_cas == new_cas:
                            self.log_failure("CAS old (%s) == new (%s)" %
                                             (old_cas, new_cas))

                        if ops == 'update':
                            if json.loads(str(result["value"])) \
                                    != json.loads(value):
                                self.log_failure("Value mismatch. "
                                                 "%s != %s" %
                                                 (result["value"], value))
                            else:
                                self.log.debug(
                                    "Mutate %s with CAS %s successfully! "
                                    "Current CAS: %s" %
                                    (old_cas, key, new_cas))

                        active_read = client.crud("read",
                                                  key,
                                                  timeout=self.sdk_timeout)
                        active_cas = active_read["cas"]
                        replica_cas = -1
                        cas_in_active_node = \
                            self.cb_stat[active_node_ip].vbucket_details(
                                bucket.name)[str(vb_of_key)]["max_cas"]
                        if str(cas_in_active_node) != str(new_cas):
                            self.log_failure("CbStats CAS mismatch. %s != %s" %
                                             (cas_in_active_node, new_cas))

                        poll_count = 0
                        max_retry = 5
                        while poll_count < max_retry:
                            replica_read = client.getFromReplica(
                                key, ReplicaMode.FIRST)[0]
                            replica_cas = replica_read["cas"]
                            if active_cas == replica_cas \
                                    or self.durability_level:
                                break
                            poll_count = poll_count + 1
                            self.sleep(1, "Retry read CAS from replica..")

                        if active_cas != replica_cas:
                            self.log_failure("Replica cas mismatch. %s != %s" %
                                             (new_cas, replica_cas))
                elif ops == "delete":
                    old_cas = client.crud("read", key, timeout=10)["cas"]
                    result = client.crud("delete",
                                         key,
                                         durability=self.durability_level,
                                         timeout=self.sdk_timeout)
                    self.log.info("CAS after delete of key %s: %s" %
                                  (key, result["cas"]))
                    result = client.crud("replace",
                                         key,
                                         "test",
                                         durability=self.durability_level,
                                         timeout=self.sdk_timeout,
                                         cas=old_cas)
                    if result["status"] is True:
                        self.log_failure("The item should already be deleted")
                    if DurableExceptions.KeyNotFoundException \
                            not in result["error"]:
                        self.log_failure("Invalid Excepetion: %s" % result)
                    if result["cas"] != 0:
                        self.log_failure("Delete returned invalid cas: %s, "
                                         "Expected 0" % result["cas"])
                    if result["cas"] == old_cas:
                        self.log_failure("Deleted doc returned old cas: %s " %
                                         old_cas)
                elif ops == "expire":
                    old_cas = client.crud("read", key, timeout=10)["cas"]
                    result = client.crud("touch", key, exp=self.expire_time)
                    if result["status"] is True:
                        if result["cas"] == old_cas:
                            self.log_failure("Touch failed to update CAS")
                    else:
                        self.log_failure("Touch operation failed")

                    self.sleep(self.expire_time + 1, "Wait for item to expire")
                    result = client.crud("replace",
                                         key,
                                         "test",
                                         durability=self.durability_level,
                                         timeout=self.sdk_timeout,
                                         cas=old_cas)
                    if result["status"] is True:
                        self.log_failure("Able to mutate %s with old cas: %s" %
                                         (key, old_cas))
                    if ClientException.KeyNotFoundException \
                            not in result["error"]:
                        self.log_failure("Invalid error after expiry: %s" %
                                         result)
Esempio n. 26
0
        def test_scenario(bucket, doc_ops,
                          with_sync_write_val=None):
            # Set crud_batch_size
            crud_batch_size = 4
            simulate_error = CouchbaseError.STOP_MEMCACHED

            # Fetch target_vbs for CRUDs
            node_vb_info = self.vbs_in_node
            target_vbuckets = node_vb_info[target_nodes[0]]["replica"]
            if len(target_nodes) > 1:
                index = 1
                while index < len(target_nodes):
                    target_vbuckets = list(
                        set(target_vbuckets).intersection(
                            set(node_vb_info[target_nodes[index]]["replica"]))
                    )
                    index += 1

            # Variable to hold one of the doc_generator objects
            gen_loader_1 = None
            gen_loader_2 = None

            # Initialize doc_generators to use for testing
            self.log.info("Creating doc_generators")
            gen_create = doc_generator(
                self.key, self.num_items, crud_batch_size,
                vbuckets=self.cluster.vbuckets,
                target_vbucket=target_vbuckets)
            gen_update = doc_generator(
                self.key, 0, crud_batch_size,
                vbuckets=self.cluster.vbuckets,
                target_vbucket=target_vbuckets, mutate=1)
            gen_delete = doc_generator(
                self.key, 0, crud_batch_size,
                vbuckets=self.cluster.vbuckets,
                target_vbucket=target_vbuckets)
            self.log.info("Done creating doc_generators")

            # Start CRUD operation based on the given 'doc_op' type
            if doc_ops[0] == "create":
                self.num_items += crud_batch_size
                gen_loader_1 = gen_create
            elif doc_ops[0] in ["update", "replace", "touch"]:
                gen_loader_1 = gen_update
            elif doc_ops[0] == "delete":
                gen_loader_1 = gen_delete
                self.num_items -= crud_batch_size

            if doc_ops[1] == "create":
                gen_loader_2 = gen_create
            elif doc_ops[1] in ["update", "replace", "touch"]:
                gen_loader_2 = gen_update
            elif doc_ops[1] == "delete":
                gen_loader_2 = gen_delete

            # Load required docs for doc_op_1 in case of type != create
            if doc_op[2] == "load_initial_docs":
                doc_loading_task = self.task.async_load_gen_docs(
                    self.cluster, bucket, gen_loader_1, "create", 0,
                    batch_size=crud_batch_size, process_concurrency=1,
                    timeout_secs=10,
                    print_ops_rate=False,
                    sdk_client_pool=self.sdk_client_pool)
                self.task_manager.get_task_result(doc_loading_task)
                if doc_loading_task.fail:
                    self.log_failure("Failure while loading initial docs")
                self.summary.add_step("Create docs for %s" % doc_op[0])
                verification_dict["ops_create"] += crud_batch_size
                verification_dict["sync_write_committed_count"] \
                    += crud_batch_size

            # Initialize tasks and store the task objects
            doc_loader_task = self.task.async_load_gen_docs(
                self.cluster, bucket, gen_loader_1, doc_ops[0], 0,
                batch_size=crud_batch_size, process_concurrency=8,
                timeout_secs=60,
                print_ops_rate=False,
                start_task=False,
                sdk_client_pool=self.sdk_client_pool)

            # SDK client for performing individual ops
            client = SDKClient([self.cluster.master], bucket)

            # Perform specified action
            for node in target_nodes:
                error_sim = CouchbaseError(self.log,
                                           self.vbs_in_node[node]["shell"])
                error_sim.create(simulate_error,
                                 bucket_name=bucket.name)
            self.sleep(5, "Wait for error simulation to take effect")

            self.task_manager.add_new_task(doc_loader_task)
            self.sleep(5, "Wait for task_1 CRUDs to reach server")

            # Perform specified CRUD operation on sync_write docs
            tem_gen = deepcopy(gen_loader_2)
            while tem_gen.has_next():
                key, value = tem_gen.next()
                for retry_strategy in [
                        SDKConstants.RetryStrategy.FAIL_FAST,
                        SDKConstants.RetryStrategy.BEST_EFFORT]:
                    if with_sync_write_val:
                        fail = client.crud(doc_ops[1], key, value=value,
                                           exp=0,
                                           durability=with_sync_write_val,
                                           timeout=3, time_unit="seconds",
                                           sdk_retry_strategy=retry_strategy)
                    else:
                        fail = client.crud(doc_ops[1], key, value=value,
                                           exp=0,
                                           timeout=3, time_unit="seconds",
                                           sdk_retry_strategy=retry_strategy)

                    expected_exception = SDKException.AmbiguousTimeoutException
                    retry_reason = \
                        SDKException.RetryReason.KV_SYNC_WRITE_IN_PROGRESS
                    if retry_strategy == SDKConstants.RetryStrategy.FAIL_FAST:
                        expected_exception = \
                            SDKException.RequestCanceledException
                        retry_reason = \
                            SDKException.RetryReason \
                            .KV_SYNC_WRITE_IN_PROGRESS_NO_MORE_RETRIES

                    # Validate the returned error from the SDK
                    if expected_exception not in str(fail["error"]):
                        self.log_failure("Invalid exception for {0}: {1}"
                                         .format(key, fail["error"]))
                    if retry_reason not in str(fail["error"]):
                        self.log_failure("Invalid retry reason for {0}: {1}"
                                         .format(key, fail["error"]))

                    # Try reading the value in SyncWrite in-progress state
                    fail = client.crud("read", key)
                    if doc_ops[0] == "create":
                        # Expected KeyNotFound in case of CREATE operation
                        if fail["status"] is True:
                            self.log_failure(
                                "%s returned value during SyncWrite state: %s"
                                % (key, fail))
                    else:
                        # Expects prev value in case of other operations
                        if fail["status"] is False:
                            self.log_failure(
                                "Key %s read failed for previous value: %s"
                                % (key, fail))

            # Revert the introduced error condition
            for node in target_nodes:
                error_sim = CouchbaseError(self.log,
                                           self.vbs_in_node[node]["shell"])
                error_sim.revert(simulate_error,
                                 bucket_name=bucket.name)

            # Wait for doc_loader_task to complete
            self.task.jython_task_manager.get_task_result(doc_loader_task)

            verification_dict["ops_%s" % doc_op[0]] += crud_batch_size
            verification_dict["sync_write_committed_count"] \
                += crud_batch_size

            # Disconnect the client
            client.close()
Esempio n. 27
0
class basic_ops(BaseTestCase):
    def setUp(self):
        super(basic_ops, self).setUp()
        self.test_log = logging.getLogger("test")
        self.fail = self.input.param("fail", False)
        nodes_init = self.cluster.servers[1:self.nodes_init] if self.nodes_init != 1 else []
        self.task.rebalance([self.cluster.master], nodes_init, [])
        self.cluster.nodes_in_cluster.extend([self.cluster.master] + nodes_init)
        self.bucket_util.add_rbac_user()

        if self.default_bucket:
            self.bucket_util.create_default_bucket(replica=self.num_replicas,
                                               compression_mode=self.compression_mode, ram_quota=100, bucket_type=self.bucket_type)

        time.sleep(10)
        self.def_bucket= self.bucket_util.get_all_buckets()
        self.client = VBucketAwareMemcached(RestConnection(self.cluster.master), self.def_bucket[0])
        self.__durability_level()

        self.operation = self.input.param("operation", "afterAtrPending")
        # create load
        self.value = {'value':'value1'}
        self.content = self.client.translate_to_json_object(self.value)

        self.docs = []
        self.keys = []
        for i in range(self.num_items):
            key = "test_docs-" + str(i)
            doc = Tuples.of(key, self.content)
            self.keys.append(key)
            self.docs.append(doc)

        self.transaction_config = Transaction().createTransactionConfig(self.transaction_timeout, self.durability)
        self.log.info("==========Finished Basic_ops base setup========")

    def tearDown(self):
        self.client.close()
        super(basic_ops, self).tearDown()

    def __durability_level(self):
        if self.durability_level == Bucket.DurabilityLevel.MAJORITY:
            self.durability = 1
        elif self.durability_level \
                == Bucket.DurabilityLevel.MAJORITY_AND_PERSIST_TO_ACTIVE:
            self.durability = 2
        elif self.durability_level \
                == Bucket.DurabilityLevel.PERSIST_TO_MAJORITY:
            self.durability = 3
        elif self.durability_level == "ONLY_NONE":
            self.durability = 4
        else:
            self.durability = 0

    def set_exception(self, exception):
        raise BaseException(exception)

    def test_txnwithhooks(self):
        self.verify = self.input.param("verify", True)
        # transaction load
        if "Atr" in self.operation:
            exception = Transaction().MockRunTransaction(self.client.cluster, self.transaction_config,
                                self.client.collection, self.docs, self.transaction_commit, self.operation, self.fail)

        else:
            if "Replace" in self.operation:
                exception = Transaction().MockRunTransaction(self.client.cluster, self.transaction_config,
                                self.client.collection, self.docs, self.keys, [], self.transaction_commit, self.operation, self.keys[-1], self.fail)
                self.value = {'mutated':1, 'value':'value1'}
                self.content = self.client.translate_to_json_object(self.value)
            else:
                exception = Transaction().MockRunTransaction(self.client.cluster, self.transaction_config,
                                self.client.collection, self.docs, [], [], self.transaction_commit, self.operation, self.keys[-1], self.fail)

            if "Remove" in self.operation:
                exception = Transaction().MockRunTransaction(self.client.cluster, self.transaction_config,
                                 self.client.collection, [], [], self.keys, self.transaction_commit, self.operation, self.keys[-1], self.fail)

        # verify the values
        for key in self.keys:
            result = self.client.read(key)
            if "Remove" in self.operation or self.transaction_commit == False or self.verify == False :
                if result['status']:
                    actual_val = self.client.translate_to_json_object(result['value'])
                    self.test_log.info("actual value for key {} is {}".format(key,actual_val))
                    msg = "Key should be deleted but present in the cluster {}".format(key)
                    self.set_exception(msg)
            else:
                actual_val = self.client.translate_to_json_object(result['value'])
                if self.content != actual_val:
                    self.test_log.info("actual value for key {} is {}".format(key,actual_val))
                    self.test_log.info("expected value for key {} is {}".format(key,self.content))
                    self.set_exception("actual and expected value does not match")


        if exception and self.fail != True:
            self.set_exception(exception)
    def test_no_eviction_impact_on_cbas(self):

        self.log.info("Create dataset")
        self.cbas_util.create_dataset_on_bucket(self.cb_bucket_name,
                                                self.cbas_dataset_name)

        self.log.info("Connect to Local link")
        self.cbas_util.connect_link()

        self.log.info("Add documents until ram percentage")
        self.load_document_until_ram_percentage()

        self.log.info("Fetch current document count")
        target_bucket = None
        self.bucket_util.get_all_buckets()
        for tem_bucket in self.bucket_util.buckets:
            if tem_bucket.name == self.cb_bucket_name:
                target_bucket = tem_bucket
                break
        item_count = target_bucket.stats.itemCount
        self.log.info("Completed base load with %s items" % item_count)

        self.log.info("Load more until we are out of memory")
        client = SDKClient([self.cluster.master], target_bucket)
        i = item_count
        op_result = {"status": True}
        while op_result["status"] is True:
            op_result = client.crud("create",
                                    "key-id" + str(i),
                                    '{"name":"dave"}',
                                    durability=self.durability_level)
            i += 1

        if SDKException.AmbiguousTimeoutException not in op_result["error"] \
                or SDKException.RetryReason.KV_TEMPORARY_FAILURE \
                not in op_result["error"]:
            client.close()
            self.fail("Invalid exception for OOM insert: %s" % op_result)

        self.log.info('Memory is full at {0} items'.format(i))
        self.log.info("As a result added more %s items" % (i - item_count))

        self.log.info("Fetch item count")
        target_bucket = None
        self.bucket_util.get_all_buckets()
        for tem_bucket in self.bucket_util.buckets:
            if tem_bucket.name == self.cb_bucket_name:
                target_bucket = tem_bucket
                break
        item_count_when_oom = target_bucket.stats.itemCount
        mem_when_oom = target_bucket.stats.memUsed
        self.log.info('Item count when OOM {0} and memory used {1}'.format(
            item_count_when_oom, mem_when_oom))

        self.log.info("Validate document count on CBAS")
        count_n1ql = self.rest.query_tool(
            'select count(*) from %s' %
            self.cb_bucket_name)['results'][0]['$1']
        self.assertTrue(self.cbas_util.validate_cbas_dataset_items_count(
            self.cbas_dataset_name, count_n1ql),
                        msg="Count mismatch on CBAS")
Esempio n. 29
0
    def test_crash_during_multi_updates_of_single_doc(self):

        self.log.info(
            "==test_crash_during_multi_updates_of_single_doc starts==")
        self.change_swap_space(self.cluster.nodes_in_cluster)

        self.client = SDKClient([self.cluster.master],
                                self.cluster.buckets[0],
                                scope=CbServer.default_scope,
                                collection=CbServer.default_collection)

        self.doc_ops = "update"
        self.gen_update = self.genrate_docs_basic(start=0, end=1)
        key, val = self.gen_update.next()

        def upsert_doc(start_num, end_num, key_obj, val_obj):
            for i in range(start_num, end_num):
                val_obj.put("mutated", i)
                self.client.upsert(key_obj, val_obj)

        self.crash_th = threading.Thread(target=self.crash,
                                         kwargs={"graceful": self.graceful})
        self.crash_th.start()

        threads = []
        start = 0
        end = 0
        for _ in range(10):
            start = end
            end += 10
            th = threading.Thread(target=upsert_doc,
                                  args=[start, end, key, val])
            th.start()
            threads.append(th)

        for th in threads:
            th.join()

        self.stop_crash = True
        self.crash_th.join()
        self.assertFalse(self.crash_failure,
                         "CRASH | CRITICAL | WARN messages found in cb_logs")

        self.bucket_util._wait_for_stats_all_buckets(self.cluster,
                                                     self.cluster.buckets)

        success, _ = self.client.get_multi([key], self.wait_timeout)
        self.assertIs(key in success,
                      True,
                      msg="key {} doesn't exist\
                      ".format(key))

        expected_val = Json.loads(val.toString())
        actual_val = Json.loads(success[key]['value'].toString())
        self.assertIs(expected_val == actual_val,
                      True,
                      msg="expected_val-{} != Actual_val-{}\
                      ".format(expected_val, actual_val))

        self.change_swap_space(self.cluster.nodes_in_cluster, disable=False)
        self.validate_seq_itr()
        self.log.info("==test_crash_during_multi_updates_of_single_doc ends==")
Esempio n. 30
0
class OpsChangeCasTests(CasBaseTest):
    def setUp(self):
        super(OpsChangeCasTests, self).setUp()
        self.key = "test_cas"
        self.expire_time = self.input.param("expire_time", 35)
        self.item_flag = self.input.param("item_flag", 0)
        self.load_gen = doc_generator(self.key, 0, self.num_items,
                                      doc_size=self.doc_size)
        self.node_data = dict()
        for node in self.cluster_util.get_kv_nodes():
            shell = RemoteMachineShellConnection(node)
            cb_stat = Cbstats(shell)
            self.node_data[node.ip] = dict()
            self.node_data[node.ip]["shell"] = shell
            self.node_data[node.ip]["cb_stat"] = Cbstats(shell)
            self.node_data[node.ip]["active"] = cb_stat.vbucket_list(
                self.bucket,
                "active")
            self.node_data[node.ip]["replica"] = cb_stat.vbucket_list(
                self.bucket,
                "replica")
        if self.sdk_client_pool:
            self.client = self.sdk_client_pool.get_client_for_bucket(
                self.bucket)
        else:
            self.client = SDKClient([self.cluster.master], self.bucket)

    def tearDown(self):
        # Close opened shell connections
        for node_ip in self.node_data.keys():
            self.node_data[node_ip]["shell"].disconnect()

        # Close SDK client connection
        self.client.close()

        super(OpsChangeCasTests, self).tearDown()

    def test_meta_rebalance_out(self):
        KEY_NAME = 'key1'

        for i in range(10):
            # set a key
            value = 'value' + str(i)
            self.client.memcached(KEY_NAME).set(KEY_NAME, 0, 0, json.dumps({'value':value}))
            vbucket_id = self.client._get_vBucket_id(KEY_NAME)
            mc_active = self.client.memcached(KEY_NAME)
            mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)
            cas_active = mc_active.getMeta(KEY_NAME)[4]

        max_cas = int(mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(KEY_NAME)) + ':max_cas'] )
        self.assertTrue(cas_active == max_cas, '[ERROR]Max cas  is not 0 it is {0}'.format(cas_active))

        # remove that node
        self.log.info('Remove the node with active data')
        rebalance = self.cluster.async_rebalance(self.servers[-1:],
                                                 [],
                                                 [self.master])
        rebalance.result()
        replica_cas = mc_replica.getMeta(KEY_NAME)[4]
        get_meta_resp = mc_replica.getMeta(KEY_NAME,
                                           request_extended_meta_data=False)
        # Add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:],
                                                 [self.master], [])
        if rebalance.result is False:
            self.log_failure("Node add-back rebalance failed")

        # verify the CAS is good
        mc_active = self.client.memcached(KEY_NAME)
        active_cas = mc_active.getMeta(KEY_NAME)[4]

        if replica_cas == active_cas:
            self.log_failure("CAS mismatch. Active: %s, Replica: %s"
                             % (active_cas, replica_cas))
        self.validate_test_failure()

    def test_meta_failover(self):
        KEY_NAME = 'key2'

        for i in range(10):
            # set a key
            value = 'value' + str(i)
            client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,json.dumps({'value':value}))
            vbucket_id = client._get_vBucket_id(KEY_NAME)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = client.memcached(KEY_NAME)
            mc_master = client.memcached_for_vbucket( vbucket_id )
            mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

            cas_active = mc_active.getMeta(KEY_NAME)[4]
            #print 'cas_a {0} '.format(cas_active)

        max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(client._get_vBucket_id(KEY_NAME)) + ':max_cas'] )

        self.assertTrue(cas_active == max_cas, '[ERROR]Max cas  is not 0 it is {0}'.format(cas_active))

        # failover that node
        self.log.info('Failing over node with active data {0}'.format(self.master))
        self.cluster.failover(self.servers, [self.master])

        self.log.info('Remove the node with active data {0}'.format(self.master))

        rebalance = self.cluster.async_rebalance(self.servers[:], [] ,[self.master])

        rebalance.result()
        self.sleep(60)

        replica_CAS = mc_replica.getMeta(KEY_NAME)[4]
        #print 'replica CAS {0}'.format(replica_CAS)

        # add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:], [self.master], [])

        rebalance.result()

        # verify the CAS is good
        mc_active = client.memcached(KEY_NAME)
        active_CAS = mc_active.getMeta(KEY_NAME)[4]
        #print 'active cas {0}'.format(active_CAS)

        get_meta_resp = mc_active.getMeta(KEY_NAME,request_extended_meta_data=False)
        #print 'replica CAS {0}'.format(replica_CAS)
        #print 'replica ext meta {0}'.format(get_meta_resp)

        self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS,replica_CAS))
        self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    def test_meta_soft_restart(self):
        KEY_NAME = 'key2'

        for i in range(10):
            # set a key
            value = 'value' + str(i)
            client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,json.dumps({'value':value}))
            vbucket_id = client._get_vBucket_id(KEY_NAME)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = client.memcached(KEY_NAME)
            mc_master = client.memcached_for_vbucket( vbucket_id )
            mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

            cas_pre = mc_active.getMeta(KEY_NAME)[4]
            #print 'cas_a {0} '.format(cas_pre)

        max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(client._get_vBucket_id(KEY_NAME)) + ':max_cas'] )

        self.assertTrue(cas_pre == max_cas, '[ERROR]Max cas  is not 0 it is {0}'.format(cas_pre))

        # restart nodes
        self._restart_server(self.servers[:])

        # verify the CAS is good
        mc_active = client.memcached(KEY_NAME)
        cas_post = mc_active.getMeta(KEY_NAME)[4]
        #print 'post cas {0}'.format(cas_post)

        get_meta_resp = mc_active.getMeta(KEY_NAME,request_extended_meta_data=False)
        #print 'post CAS {0}'.format(cas_post)
        #print 'post ext meta {0}'.format(get_meta_resp)

        self.assertTrue(cas_pre == cas_post, 'cas mismatch active: {0} replica {1}'.format(cas_pre, cas_post))
        # extended meta is not supported self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    def test_meta_hard_restart(self):
        KEY_NAME = 'key2'

        for i in range(10):
            # set a key
            value = 'value' + str(i)
            client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,json.dumps({'value':value}))
            vbucket_id = client._get_vBucket_id(KEY_NAME)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = client.memcached(KEY_NAME)
            mc_master = client.memcached_for_vbucket( vbucket_id )
            mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

            cas_pre = mc_active.getMeta(KEY_NAME)[4]
            #print 'cas_a {0} '.format(cas_pre)

        max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(client._get_vBucket_id(KEY_NAME)) + ':max_cas'] )

        self.assertTrue(cas_pre == max_cas, '[ERROR]Max cas  is not 0 it is {0}'.format(cas_pre))

        # reboot nodes
        self._reboot_server()

        self.sleep(60)
        # verify the CAS is good
        mc_active = client.memcached(KEY_NAME)
        cas_post = mc_active.getMeta(KEY_NAME)[4]
        #print 'post cas {0}'.format(cas_post)

        get_meta_resp = mc_active.getMeta(KEY_NAME,request_extended_meta_data=False)
        #print 'post CAS {0}'.format(cas_post)
        #print 'post ext meta {0}'.format(get_meta_resp)

        self.assertTrue(cas_pre == cas_post, 'cas mismatch active: {0} replica {1}'.format(cas_pre, cas_post))
        # extended meta is not supported self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    def test_cas_set(self):
        """
        Test Incremental sets on cas and max cas values for keys
        """
        self._load_ops(self.load_gen, 'update', mutations=20)
        self._check_cas(self.load_gen, check_conflict_resolution=False)
        self.validate_test_failure()

    def test_cas_updates(self):
        """
        Test Incremental updates on cas and max cas values for keys
        """
        self._load_ops(self.load_gen, 'update', mutations=20)
        self._load_ops(self.load_gen, 'replace', mutations=20)
        self._check_cas(self.load_gen, check_conflict_resolution=False)
        self.validate_test_failure()

    def test_cas_deletes(self):
        """
        Test Incremental deletes on cas and max cas values for keys
        """
        self._load_ops(self.load_gen, 'create')
        self._load_ops(self.load_gen, 'replace', mutations=20)
        self._check_cas(self.load_gen, check_conflict_resolution=False)
        self._load_ops(self.load_gen, 'delete')
        self._check_cas(self.load_gen, check_conflict_resolution=False,
                        docs_deleted=True)
        self.validate_test_failure()

    ''' Test expiry on cas and max cas values for keys
    '''
    def test_cas_expiry(self):
        self._load_ops(self.load_gen, 'create')
        self._load_ops(self.load_gen, 'expiry')
        self._check_cas(self.load_gen, check_conflict_resolution=False)
        self._check_expiry(self.load_gen)
        self.validate_test_failure()

    def test_cas_touch(self):
        """
        Test touch on cas and max cas values for keys
        """
        self.log.info('Starting test-touch')
        self._load_ops(self.load_gen, 'update', mutations=20)
        self._load_ops(self.load_gen, 'touch')
        self._check_cas(self.load_gen, check_conflict_resolution=False)
        self.validate_test_failure()

    def test_cas_getMeta(self):
        """
        Test getMeta on cas and max cas values for keys
        """
        self.log.info('Starting test-getMeta')
        self._load_ops(ops='set', mutations=20)
        self._check_cas(check_conflict_resolution=False)
        #self._load_ops(ops='add')
        self._load_ops(ops='replace',mutations=20)
        self._check_cas(check_conflict_resolution=False)

        self._load_ops(ops='delete')
        self._check_cas(check_conflict_resolution=False)

    def test_cas_setMeta_lower(self):

        self.log.info(' Starting test-getMeta')


        # set some kv
        self._load_ops(ops='set', mutations=1)
        #self._check_cas(check_conflict_resolution=False)

        k=0
        while k<10:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            self.log.info('For key {0} the vbucket is {1}'.format( key,vbucket_id ))
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            #mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)

            TEST_SEQNO = 123
            TEST_CAS = k

            rc = mc_active.getMeta(key)
            cas = rc[4] + 1

            self.log.info('Key {0} retrieved CAS is {1} and will set CAS to {2}'.format(key, rc[4], cas))
            rev_seqno = rc[3]



            # do a set meta based on the existing CAS
            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, 123, cas)


            # check what get meta say
            rc = mc_active.getMeta(key)
            cas_post_meta = rc[4]
            self.log.info('Getmeta CAS is {0}'.format(cas_post_meta))
            self.assertTrue( cas_post_meta == cas, 'Meta expected {0} actual {1}'.format( cas, cas_post_meta))

            # and what stats says
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.log.info('Max CAS for key {0} vbucket is {1}'.format( key, max_cas))
            self.assertTrue(cas_post_meta >= max_cas, '[ERROR]Max cas  is not higher it is lower than {0}'.format(cas_post_meta))
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to original cas {0}'.format(cas))


            # do another mutation and compare
            mc_active.set(key, 0, 0,json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))

            # and then mix in a set with meta
            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, 225, max_cas+1)
            cas_post_meta = mc_active.getMeta(key)[4]


            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(cas_post_meta == max_cas, '[ERROR]Max cas  is not higher it is lower than {0}'.format(cas_post_meta))


            # and one more mutation for good measure
            mc_active.set(key, 0, 0,json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))

    def test_cas_setMeta_higher(self):

        self.log.info(' Starting test-getMeta')
        self._load_ops(ops='set', mutations=20)
        self._check_cas(check_conflict_resolution=False)

        k=0

        while k<10:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            #mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)
            get_meta_1 = mc_active.getMeta(key,request_extended_meta_data=False)
            #print 'cr {0}'.format(get_meta_1)
            #print '-'*100
            TEST_SEQNO = 123
            TEST_CAS = 9966180844186042368

            cas = mc_active.getMeta(key)[4]
            #set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, TEST_SEQNO, TEST_CAS, '123456789',vbucket_id,
             #   add_extended_meta_data=True, conflict_resolution_mode=1)
            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, TEST_SEQNO, TEST_CAS)

            cas_post_meta = mc_active.getMeta(key)[4]
            get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=False)
            #print 'cr2 {0}'.format(get_meta_2)

            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(cas_post_meta == max_cas, '[ERROR]Max cas  is not equal it is {0}'.format(cas_post_meta))
            self.assertTrue(max_cas > cas, '[ERROR]Max cas  is not higher than original cas {0}'.format(cas))

            mc_active.set(key, 0, 0,json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))

            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, TEST_SEQNO, max_cas+1)

            #set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, 125, TEST_CAS+1, '223456789',vbucket_id,
            #    add_extended_meta_data=True, conflict_resolution_mode=1)
            cas_post_meta = mc_active.getMeta(key)[4]
            get_meta_3 = mc_active.getMeta(key,request_extended_meta_data=False)
            #print 'cr3 {0}'.format(get_meta_3)

            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )

            self.assertTrue(cas_post_meta == max_cas, '[ERROR]Max cas  is not lower it is higher than {0}'.format(cas_post_meta))
            #self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to original cas {0}'.format(cas))

            mc_active.set(key, 0, 0,json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))


    ''' Test deleteMeta on cas and max cas values for keys
    '''
    def test_cas_deleteMeta(self):

        self.log.info(' Starting test-deleteMeta')


        # load 20 kvs and check the CAS
        self._load_ops(ops='set', mutations=20)
        self.sleep(60)
        self._check_cas(check_conflict_resolution=False)

        k = 0
        test_cas = 456

        while k < 1:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)

            TEST_SEQNO = 123
            test_cas = test_cas + 1


            # get the meta data
            cas = mc_active.getMeta(key)[4] + 1

            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, TEST_SEQNO, cas)



            cas_post_meta = mc_active.getMeta(key)[4]

            # verify the observed CAS is as set

            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )

            self.assertTrue(max_cas == cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas, cas))


            mc_active.set(key, 0, 0,json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))

            # what is test cas for? Commenting out for now
            """
            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, TEST_SEQNO, test_cas)
            cas_post_meta = mc_active.getMeta(key)[4]

            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(cas_post_meta < max_cas, '[ERROR]Max cas  is not higher it is lower than {0}'.format(cas_post_meta))
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to original cas {0}'.format(cas))
            """

            # test the delete

            mc_active.set(key, 0, 0,json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))


            #
            self.log.info('Doing delete with meta, using a lower CAS value')
            get_meta_pre = mc_active.getMeta(key)[4]
            del_with_meta_resp = mc_active.del_with_meta(key, 0, 0, TEST_SEQNO, test_cas, test_cas+1)
            get_meta_post = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas > test_cas+1, '[ERROR]Max cas {0} is not greater than delete cas {1}'.format(max_cas, test_cas))




    ''' Testing skipping conflict resolution, whereby the last write wins, and it does neither cas CR nor rev id CR
    '''
    def test_cas_skip_conflict_resolution(self):

        self.log.info(' Starting test_cas_skip_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)
        self._check_cas(check_conflict_resolution=False)

        k=0

        #Check for first 20 keys
        while k<20:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)

            low_seq=12

            cas = mc_active.getMeta(key)[4]
            pre_seq = mc_active.getMeta(key)[3]
            all = mc_active.getMeta(key)
            self.log.info('all meta data before set_meta_force {0}'.format(all))

            self.log.info('Forcing conflict_resolution to allow insertion of lower Seq Number')
            lower_cas = int(cas)-1
            #import pdb;pdb.set_trace()
            #set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, low_seq, lower_cas, '123456789',vbucket_id)
            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, low_seq, lower_cas, 3)
            cas_post_meta = mc_active.getMeta(key)[4]
            all_post_meta = mc_active.getMeta(key)
            post_seq = mc_active.getMeta(key)[3]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.log.info('Expect No conflict_resolution to occur, and the last updated mutation to be the winner..')

            #print 'cas meta data after set_meta_force {0}'.format(cas_post_meta)
            #print 'all meta data after set_meta_force {0}'.format(all_post_meta)
            self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

            self.assertTrue(max_cas == cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas, cas))
            self.assertTrue(pre_seq > post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))

    ''' Testing revid based conflict resolution with timeSync enabled, where cas on either mutations match and it does rev id CR
        '''
    def test_revid_conflict_resolution(self):

        self.log.info(' Starting test_cas_revid_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)
        self._check_cas(check_conflict_resolution=False)

        k=0

        #Check for first 20 keys
        while k<20:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)

            new_seq=121

            cas = mc_active.getMeta(key)[4]
            pre_seq = mc_active.getMeta(key)[3]
            all = mc_active.getMeta(key)
            self.log.info('all meta data before set_meta_force {0}'.format(all))

            self.log.info('Forcing conflict_resolution to rev-id by matching inserting cas ')
            set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, cas, '123456789',vbucket_id,
                                add_extended_meta_data=True, conflict_resolution_mode=1)
            cas_post_meta = mc_active.getMeta(key)[4]
            all_post_meta = mc_active.getMeta(key)
            post_seq = mc_active.getMeta(key)[3]
            get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=False)
            #print 'cr2 {0}'.format(get_meta_2)
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.log.info('Expect No conflict_resolution to occur, and the last updated mutation to be the winner..')
            self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

            self.assertTrue(max_cas == cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas, cas))
            self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))



    ''' Testing conflict resolution, where timeSync is enabled and cas is lower but higher revid, expect Higher Cas to Win
        '''
    def test_cas_conflict_resolution(self):

        self.log.info(' Starting test_cas_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)
        self._check_cas(check_conflict_resolution=False)

        k=0

        #Check for first 20 keys
        while k<20:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)

            new_seq=121

            cas = mc_active.getMeta(key)[4]
            pre_seq = mc_active.getMeta(key)[3]
            all = mc_active.getMeta(key)
            self.log.info('all meta data before set_meta_force {0}'.format(all))

            lower_cas = int(cas)-100
            self.log.info('Forcing lower rev-id to win with higher CAS value, instead of higher rev-id with Lower Cas ')
            #set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, lower_cas, '123456789',vbucket_id)
            try:
                set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, new_seq, lower_cas)
            except mc_bin_client.MemcachedError as e:
                # this is expected
                pass

            cas_post_meta = mc_active.getMeta(key)[4]
            all_post_meta = mc_active.getMeta(key)
            post_seq = mc_active.getMeta(key)[3]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.log.info('Expect CAS conflict_resolution to occur, and the first mutation to be the winner..')

            self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))
            self.assertTrue(max_cas == cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas, cas))
            #self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))

    ''' Testing revid based conflict resolution with timeSync enabled, where cas on either mutations match and it does rev id CR
        and retains it after a restart server'''
    def test_restart_revid_conflict_resolution(self):

        self.log.info(' Starting test_restart_revid_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)

        k = 0
        key = "{0}{1}".format(self.prefix, k)

        vbucket_id = self.client._get_vBucket_id(key)
        mc_active = self.client.memcached(key)
        mc_master = self.client.memcached_for_vbucket( vbucket_id )
        mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)


        # set a key
        value = 'value0'
        client.memcached(key).set(key, 0, 0,json.dumps({'value':value}))
        vbucket_id = client._get_vBucket_id(key)
        #print 'vbucket_id is {0}'.format(vbucket_id)
        mc_active = client.memcached(key)
        mc_master = client.memcached_for_vbucket( vbucket_id )
        mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

        new_seq=121

        pre_cas = mc_active.getMeta(key)[4]
        pre_seq = mc_active.getMeta(key)[3]
        pre_max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        all = mc_active.getMeta(key)
        get_meta_1 = mc_active.getMeta(key,request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_1)
        self.log.info('all meta data before set_meta_force {0}'.format(all))
        self.log.info('max_cas before set_meta_force {0}'.format(pre_max_cas))

        self.log.info('Forcing conflict_resolution to rev-id by matching inserting cas ')
        try:
            set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, pre_cas, '123456789',vbucket_id)
        except mc_bin_client.MemcachedError as e:
            # this is expected
            pass
        cas_post = mc_active.getMeta(key)[4]
        all_post_meta = mc_active.getMeta(key)
        post_seq = mc_active.getMeta(key)[3]
        get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_2)
        max_cas_post = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        self.log.info('Expect RevId conflict_resolution to occur, and the last updated mutation to be the winner..')
        self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

        #self.assertTrue(max_cas_post == pre_cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas_post, pre_cas))
        #self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))


        # Restart Nodes
        self._restart_server(self.servers[:])

        # verify the CAS is good
        mc_active = client.memcached(key)
        cas_restart = mc_active.getMeta(key)[4]
        #print 'post cas {0}'.format(cas_post)

        get_meta_resp = mc_active.getMeta(key,request_extended_meta_data=False)
        #print 'post CAS {0}'.format(cas_post)
        #print 'post ext meta {0}'.format(get_meta_resp)

        self.assertTrue(pre_cas == cas_post, 'cas mismatch active: {0} replica {1}'.format(pre_cas, cas_post))
        #self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    ''' Testing revid based conflict resolution with timeSync enabled, where cas on either mutations match and it does rev id CR
        and retains it after a rebalance server'''
    def test_rebalance_revid_conflict_resolution(self):

        self.log.info(' Starting test_rebalance_revid_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)
        key = 'key1'

        value = 'value'
        client.memcached(key).set(key, 0, 0,json.dumps({'value':value}))
        vbucket_id = client._get_vBucket_id(key)
        #print 'vbucket_id is {0}'.format(vbucket_id)
        mc_active = client.memcached(key)
        mc_master = client.memcached_for_vbucket( vbucket_id )
        mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

        new_seq=121

        pre_cas = mc_active.getMeta(key)[4]
        pre_seq = mc_active.getMeta(key)[3]
        pre_max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        all = mc_active.getMeta(key)
        get_meta_1 = mc_active.getMeta(key,request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_1)
        self.log.info('all meta data before set_meta_force {0}'.format(all))
        self.log.info('max_cas before set_meta_force {0}'.format(pre_max_cas))

        self.log.info('Forcing conflict_resolution to rev-id by matching inserting cas ')
        #set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, pre_cas, '123456789',vbucket_id)
        set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, new_seq, pre_cas)

        cas_post = mc_active.getMeta(key)[4]
        all_post_meta = mc_active.getMeta(key)
        post_seq = mc_active.getMeta(key)[3]
        get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_2)
        max_cas_post = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        self.log.info('Expect RevId conflict_resolution to occur, and the last updated mutation to be the winner..')
        self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

        self.assertTrue(max_cas_post == pre_cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas_post, pre_cas))
        self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))

        # remove that node
        self.log.info('Remove the node with active data')

        rebalance = self.cluster.async_rebalance(self.servers[-1:], [] ,[self.master])
        rebalance.result()
        self.sleep(120)
        replica_CAS = mc_replica.getMeta(key)[4]
        get_meta_resp = mc_replica.getMeta(key,request_extended_meta_data=False)
        #print 'replica CAS {0}'.format(replica_CAS)
        #print 'replica ext meta {0}'.format(get_meta_resp)

        # add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:], [self.master], [])

        rebalance.result()

        # verify the CAS is good
        mc_active = client.memcached(key)
        active_CAS = mc_active.getMeta(key)[4]
        print 'active cas {0}'.format(active_CAS)

        self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS,replica_CAS))
        #self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    ''' Testing revid based conflict resolution with timeSync enabled, where cas on either mutations match and it does rev id CR
        and retains it after a failover server'''
    def test_failover_revid_conflict_resolution(self):

        self.log.info(' Starting test_rebalance_revid_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)
        key = 'key1'

        value = 'value'
        self.client.memcached(key).set(key, 0, 0,json.dumps({'value':value}))
        vbucket_id = client._get_vBucket_id(key)
        #print 'vbucket_id is {0}'.format(vbucket_id)
        mc_active = client.memcached(key)
        mc_master = client.memcached_for_vbucket( vbucket_id )
        mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

        new_seq=121

        pre_cas = mc_active.getMeta(key)[4]
        pre_seq = mc_active.getMeta(key)[3]
        pre_max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        all = mc_active.getMeta(key)
        get_meta_1 = mc_active.getMeta(key,request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_1)
        self.log.info('all meta data before set_meta_force {0}'.format(all))
        self.log.info('max_cas before set_meta_force {0}'.format(pre_max_cas))

        self.log.info('Forcing conflict_resolution to rev-id by matching inserting cas ')
        #set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, pre_cas, '123456789',vbucket_id)
        set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, new_seq, pre_cas)
        cas_post = mc_active.getMeta(key)[4]
        all_post_meta = mc_active.getMeta(key)
        post_seq = mc_active.getMeta(key)[3]
        get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_2)
        max_cas_post = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        self.log.info('Expect RevId conflict_resolution to occur, and the last updated mutation to be the winner..')
        self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

        self.assertTrue(max_cas_post == pre_cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas_post, pre_cas))
        self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))

        # failover that node
        self.log.info('Failing over node with active data {0}'.format(self.master))
        self.cluster.failover(self.servers, [self.master])

        self.log.info('Remove the node with active data {0}'.format(self.master))

        rebalance = self.cluster.async_rebalance(self.servers[:], [] ,[self.master])

        rebalance.result()
        self.sleep(120)
        replica_CAS = mc_replica.getMeta(key)[4]
        get_meta_resp = mc_replica.getMeta(key,request_extended_meta_data=False)
        #print 'replica CAS {0}'.format(replica_CAS)
        #print 'replica ext meta {0}'.format(get_meta_resp)

        # add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:], [self.master], [])

        rebalance.result()

        # verify the CAS is good
        mc_active = client.memcached(key)
        active_CAS = mc_active.getMeta(key)[4]
        #print 'active cas {0}'.format(active_CAS)

        self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS,replica_CAS))
        #self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    ''' Test getMeta on cas and max cas values for empty vbucket
    '''
    def test_cas_getMeta_empty_vBucket(self):
        self.log.info(' Starting test-getMeta')
        self._load_ops(ops='set', mutations=20)

        k=0
        all_keys = []
        while k<10:
            k+=1
            key = "{0}{1}".format(self.prefix, k)
            all_keys.append(key)

        vbucket_ids = self.client._get_vBucket_ids(all_keys)

        print 'bucket_ids'
        for v in vbucket_ids:
            print v

        print 'done'

        i=1111
        if i not in vbucket_ids and i <= 1023:
            vb_non_existing=i
        elif i>1023:
            i +=1
        else:
            self.log.info('ERROR generating empty vbucket id')

        vb_non_existing=vbucket_ids.pop()
        print 'nominated vb_nonexisting is {0}'.format(vb_non_existing)
        mc_active = self.client.memcached(all_keys[0]) #Taking a temp connection to the mc.
        #max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(vb_non_existing) + ':max_cas'] )
        max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(all_keys[0])) + ':max_cas'] )
        self.assertTrue( max_cas != 0, msg='[ERROR] Max cas is non-zero')


    def test_meta_backup(self):
        """
        Test addMeta on cas and max cas values for keys
        """
        self.log.info('Starting test-getMeta')
        self._load_ops(ops='set', mutations=20)

        # Do the backup on the bucket
        self.shell = RemoteMachineShellConnection(self.cluster.master)
        self.buckets = self.bucket_util.buckets
        self.couchbase_login_info = "%s:%s" % (self.input.membase_settings.rest_username,
                                               self.input.membase_settings.rest_password)
        self.backup_location = "/tmp/backup"
        self.command_options = self.input.param("command_options", '')
        try:
            shell = RemoteMachineShellConnection(self.cluster.master)
            self.shell.execute_cluster_backup(
                self.couchbase_login_info,
                self.backup_location,
                self.command_options)
            self.sleep(5, "Wait before restore backup")
            shell.restore_backupFile(self.couchbase_login_info,
                                     self.backup_location,
                                     [bucket.name for bucket in self.buckets])
            self.log.info("Done with restore")
        finally:
            self._check_cas(check_conflict_resolution=False)

    def _check_cas(self, load_gen, check_conflict_resolution=False,
                   time_sync=None, docs_deleted=False):
        """
        Common function to verify the expected values on cas
        """
        load_gen = copy.deepcopy(load_gen)
        self.log.info('Verifying CAS and max-cas for the keys')
        while load_gen.has_next():
            key, value = load_gen.next()
            cas = self.client.crud("read", key)["cas"]
            if docs_deleted:
                if cas != 0:
                    self.log_failure("Max CAS mismatch. %s != 0" % cas)
            else:
                max_cas = None
                vb_for_key = self.bucket_util.get_vbucket_num_for_key(key)
                for _, data in self.node_data.items():
                    if vb_for_key in data["active"]:
                        vb_stat = data["cb_stat"].vbucket_details(
                            self.bucket.name)
                        max_cas = long(vb_stat[str(vb_for_key)]["max_cas"])
                        break
                if cas != max_cas:
                    self.log_failure("Max CAS mismatch. %s != %s"
                                     % (cas, max_cas))

            if check_conflict_resolution:
                get_meta_resp = mc_active.getMeta(
                    key,
                    request_extended_meta_data=False)
                if time_sync == 'enabledWithoutDrift':
                    if get_meta_resp[5] != 1:
                        self.log_failure("Metadata indicates "
                                         "conflict resolution is not set")
                elif time_sync == 'disabled':
                    if get_meta_resp[5] != 0:
                        self.log_failure("Metadata indicates "
                                         "conflict resolution is set")

    def _load_ops(self, load_gen, op_type, mutations=1):
        """
        Common function to add set delete etc operations on the bucket
        """
        mutations = 1
        self.log.info("Performing %s for %s times" % (op_type, mutations))
        exp = 0
        if op_type == "expiry":
            op_type = "touch"
            exp = self.expire_time

        for _ in range(mutations):
            tasks_info = self.bucket_util.sync_load_all_buckets(
                self.cluster, load_gen, op_type, exp=exp,
                persist_to=self.persist_to, replicate_to=self.replicate_to,
                durability=self.durability_level,
                timeout_secs=self.sdk_timeout,
                batch_size=self.num_items,
                sdk_client_pool=self.sdk_client_pool)
            for task, _ in tasks_info.items():
                if task.fail:
                    self.log_failure("Failures observed during %s" % op_type)

    def _check_expiry(self, load_gen):
        """
        Check if num_items are expired as expected
        """
        self.sleep(self.expire_time, "Wait for docs to expire")

        while load_gen.has_next():
            key, value = load_gen.next()
            vb_for_key = self.bucket_util.get_vbucket_num_for_key(key)
            cas = None
            for _, data in self.node_data.items():
                if vb_for_key in data["active"]:
                    vb_stat = data["cb_stat"].vbucket_details(self.bucket.name)
                    cas = long(vb_stat[str(vb_for_key)]["max_cas"])
                    break

            replace_result = self.client.crud(
                "replace", key, value,
                replicate_to=self.replicate_to,
                persist_to=self.persist_to,
                durability=self.durability_level,
                timeout=self.sdk_timeout,
                cas=cas)
            if replace_result["status"] is True:
                self.log_failure("Replace on %s succeeded using old CAS %s"
                                 % (key, cas))
            if SDKException.DocumentNotFoundException \
                    not in str(replace_result["error"]):
                self.log_failure("Invalid exception for %s: %s"
                                 % (key, replace_result))