Exemplo n.º 1
0
 def __thread_to_transaction(self,
                             transaction,
                             op_type,
                             doc,
                             txn_commit,
                             update_count=1,
                             sync=True,
                             set_exception=True,
                             client=None):
     exception = None
     if client is None:
         client = self.client
     if op_type == "create":
         exception = Transaction().RunTransaction(transaction,
                                                  [client.collection], doc,
                                                  [], [], txn_commit, sync,
                                                  update_count)
     elif op_type == "update":
         self.test_log.info("updating all the keys through threads")
         exception = Transaction().RunTransaction(transaction,
                                                  [client.collection], [],
                                                  doc, [], txn_commit, sync,
                                                  update_count)
     elif op_type == "delete":
         exception = Transaction().RunTransaction(transaction,
                                                  [client.collection], [],
                                                  [], doc, txn_commit, sync,
                                                  update_count)
     if set_exception and exception:
         self.set_exception("Failed")
Exemplo n.º 2
0
    def __run_mock_test(self, client, doc_op):
        self.log.info("Starting Mock_Transaction")
        if "Atr" in self.operation:
            exception = Transaction().MockRunTransaction(
                client.cluster, self.transaction_config, client.collection,
                self.docs, doc_op, self.transaction_commit, self.operation,
                self.transaction_fail_count)
        else:
            if "Replace" in self.operation:
                exception = Transaction().MockRunTransaction(
                    client.cluster, self.transaction_config, client.collection,
                    self.docs, self.keys, [], self.transaction_commit,
                    self.operation, self.keys[-1], self.transaction_fail)
                self.value = {'mutated': 1, 'value': 'value1'}
                self.content = client.translate_to_json_object(self.value)
            else:
                exception = Transaction().MockRunTransaction(
                    client.cluster, self.transaction_config, client.collection,
                    self.docs, [], [], self.transaction_commit, self.operation,
                    self.keys[-1], self.transaction_fail)

            if "Remove" in self.operation:
                exception = Transaction().MockRunTransaction(
                    client.cluster, self.transaction_config, client.collection,
                    [], [], self.keys, self.transaction_commit, self.operation,
                    self.keys[-1], self.transaction_fail)
        return exception
Exemplo n.º 3
0
    def test_transactions(self):
        tasks = list()
        self.check_fragmentation_using_magma_stats(self.cluster.buckets[0])
        bucket = self.cluster.buckets[0]
        workers = self.process_concurrency
        self.tm = TaskManager(workers)
        transaction_app = Transaction()
        trans_config = transaction_app.createTransactionConfig(
            self.transaction_timeout, self.sdk_timeout,
            self.transaction_durability_level)

        transaction_items = self.input.param("transaction_items", 1000)

        workload = dict()
        workload["keyPrefix"] = "transactions"
        workload["keySize"] = self.key_size
        workload["docSize"] = self.doc_size
        workload["mutated"] = 0
        workload["keyRange"] = Tuples.of(0, transaction_items)
        workload["batchSize"] = 1
        workload["workers"] = 3
        workload["transaction_pattern"] = [[
            CbServer.default_scope, CbServer.default_collection,
            self.transaction_pattern
        ]]

        work_load = WorkLoadSettings(workload["keyPrefix"],
                                     workload["keySize"], workload["docSize"],
                                     workload["mutated"], workload["keyRange"],
                                     workload["batchSize"],
                                     workload["transaction_pattern"],
                                     workload["workers"])
        work_load.setTransactionRollback(self.rollback)
        client = SDKClient([self.cluster.master], bucket)
        transaction_obj = transaction_app.createTransaction(
            client.cluster, trans_config)
        for index, load_pattern in enumerate(work_load.transaction_pattern):
            th_name = "Transaction_%s" % index
            batch_size = load_pattern[0]
            num_transactions = load_pattern[1]
            trans_pattern = load_pattern[2]

            task = TransactionWorkLoadGenerate(
                th_name, client.cluster, client.bucketObj, transaction_obj,
                work_load.doc_gen, batch_size, num_transactions, trans_pattern,
                work_load.commit_transaction, work_load.rollback_transaction,
                transaction_app)
            tasks.append(task)
            self.tm.submit(task)
        self.tm.getAllTaskResult()
        client.close()
        result = \
            self.check_fragmentation_using_magma_stats(self.cluster.buckets[0])
        self.assertTrue(result, "Magma framentation error")

        # The final disk check requires randomized documents of size of 4096
        # and no additional documents should be loaded.
        if self.final_disk_check:
            self.final_fragmentation_check(transaction_items)
Exemplo n.º 4
0
 def create_Transaction(self, client=None):
     if not client:
         client = self.client
     transaction_config = Transaction().createTransactionConfig(self.transaction_timeout, self.durability)
     try:
         self.transaction = Transaction().createTansaction(client.cluster, transaction_config)
     except Exception as e:
         self.set_exception(e)
Exemplo n.º 5
0
    def test_txnwithhooks(self):
        # transaction load
        if "Atr" in self.operation:
            exception = Transaction().MockRunTransaction(
                self.client.cluster, self.transaction_config,
                self.client.collection, self.docs, "create",
                self.transaction_commit,
                self.operation, 1)
        else:
            if "Replace" in self.operation:
                exception = Transaction().MockRunTransaction(
                    self.client.cluster, self.transaction_config,
                    self.client.collection, self.docs, self.keys, [],
                    self.transaction_commit, self.operation, self.keys[-1],
                    self.always_fail)
                self.value = {'mutated': 1, 'value': 'value1'}
                self.content = self.client.translate_to_json_object(self.value)
            else:
                exception = Transaction().MockRunTransaction(
                    self.client.cluster, self.transaction_config,
                    self.client.collection, self.docs, [], [],
                    self.transaction_commit, self.operation, self.keys[-1],
                    self.always_fail)

            if "Remove" in self.operation:
                exception = Transaction().MockRunTransaction(
                    self.client.cluster, self.transaction_config,
                    self.client.collection, [], [], self.keys,
                    self.transaction_commit, self.operation, self.keys[-1],
                    self.always_fail)

        # verify the values
        for key in self.keys:
            result = self.client.read(key)
            if "Remove" in self.operation \
                    or self.transaction_commit is False \
                    or self.verify is False:
                if result['status']:
                    actual_val = self.client.translate_to_json_object(
                        result['value'])
                    self.log.info("Actual value for key %s is %s"
                                  % (key, actual_val))
                    msg = \
                        "Key '%s' should be deleted but present in the bucket"\
                        % key
                    self.set_exception(msg)
            else:
                actual_val = self.client.translate_to_json_object(
                    result['value'])
                if self.content != actual_val:
                    self.log.info("Key %s Actual: %s, Expected: %s"
                                  % (key, actual_val, self.content))
                    self.set_exception("Mismatch in doc content")

        if exception and self.fail is not True:
            self.set_exception(exception)
Exemplo n.º 6
0
    def test_txnwithhooks(self):

        # transaction load
        if "Atr" in self.operation:
            exception = Transaction().MockRunTransaction(
                self.client.cluster, self.transaction_config,
                self.client.collection, self.docs, self.transaction_commit,
                self.operation)

        else:
            if "Replace" in self.operation:
                exception = Transaction().MockRunTransaction(
                    self.client.cluster, self.transaction_config,
                    self.client.collection, self.docs, self.keys, [],
                    self.transaction_commit, self.operation, self.keys[-1])
                self.value = {'mutated': 1, 'value': 'value1'}
                self.content = self.client.translate_to_json_object(self.value)
            else:
                exception = Transaction().MockRunTransaction(
                    self.client.cluster, self.transaction_config,
                    self.client.collection, self.docs, [], [],
                    self.transaction_commit, self.operation, self.keys[-1])

            if "Remove" in self.operation:
                exception = Transaction().MockRunTransaction(
                    self.client.cluster, self.transaction_config,
                    self.client.collection, [], [], self.keys,
                    self.transaction_commit, self.operation, self.keys[-1])

        # verify the values
        for key in self.keys:
            result = self.client.read(key)
            if "Remove" in self.operation or self.transaction_commit == False:
                if result['status']:
                    msg = "Key should be deleted but present in the cluster {}".format(
                        key)
                    self.set_exception(msg)
            else:
                actual_val = self.client.translate_to_json_object(
                    result['value'])
                if self.content != actual_val:
                    self.test_log.info("actual value for key {} is {}".format(
                        key, actual_val))
                    self.test_log.info(
                        "expected value for key {} is {}".format(
                            key, self.content))
                    self.set_exception(
                        "actual and expected value does not match")

        if exception:
            self.set_exception(exception)
Exemplo n.º 7
0
    def setUp(self):
        super(IsolationDocTest, self).setUp()

        # Create default bucket
        self.bucket_size = 100
        self.create_bucket(self.cluster)

        self.doc_op = self.input.param("doc_op", "create")
        self.operation = self.input.param("operation", "afterAtrPending")
        self.transaction_fail_count = self.input.param("fail_count", 99999)
        self.transaction_fail = self.input.param("fail", True)

        self.cluster_util.print_cluster_stats(self.cluster)
        self.bucket_util.print_bucket_stats(self.cluster)

        # Reset active_resident_threshold to avoid further data load as DGM
        self.active_resident_threshold = 0

        # Create SDK client for each bucket
        self.sdk_clients = dict()
        for bucket in self.cluster.buckets:
            self.sdk_clients[bucket.name] = SDKClient([self.cluster.master],
                                                      bucket)

        self.read_failed = dict()
        self.stop_thread = False
        self.docs = list()
        self.keys = list()
        self.__create_transaction_docs()
        self.__durability_level()
        self.transaction_config = Transaction().createTransactionConfig(
            self.transaction_timeout, self.durability)
Exemplo n.º 8
0
    def setUp(self):
        super(basic_ops, self).setUp()
        self.test_log = logging.getLogger("test")
        self.fail = self.input.param("fail", False)
        nodes_init = self.cluster.servers[1:self.nodes_init] if self.nodes_init != 1 else []
        self.task.rebalance([self.cluster.master], nodes_init, [])
        self.cluster.nodes_in_cluster.extend([self.cluster.master] + nodes_init)
        self.bucket_util.add_rbac_user()

        if self.default_bucket:
            self.bucket_util.create_default_bucket(replica=self.num_replicas,
                                               compression_mode=self.compression_mode, ram_quota=100, bucket_type=self.bucket_type)

        time.sleep(10)
        self.def_bucket= self.bucket_util.get_all_buckets()
        self.client = VBucketAwareMemcached(RestConnection(self.cluster.master), self.def_bucket[0])
        self.__durability_level()

        self.operation = self.input.param("operation", "afterAtrPending")
        # create load
        self.value = {'value':'value1'}
        self.content = self.client.translate_to_json_object(self.value)

        self.docs = []
        self.keys = []
        for i in range(self.num_items):
            key = "test_docs-" + str(i)
            doc = Tuples.of(key, self.content)
            self.keys.append(key)
            self.docs.append(doc)

        self.transaction_config = Transaction().createTransactionConfig(self.transaction_timeout, self.durability)
        self.log.info("==========Finished Basic_ops base setup========")
Exemplo n.º 9
0
    def setUp(self):
        super(basic_ops, self).setUp()

        if self.default_bucket:
            self.bucket_size = 100
            self.create_bucket(self.cluster)

        self.sleep(10, "Wait for bucket to become ready for ops")

        self.def_bucket = self.bucket_util.get_all_buckets(self.cluster)
        self.client = SDKClient(RestConnection(self.cluster.master),
                                self.def_bucket[0])
        self.__durability_level()

        self.operation = self.input.param("operation", "afterAtrPending")
        self.always_fail = self.input.param("fail", False)
        self.verify = self.input.param("verify", True)

        # create load
        self.value = {'value': 'value1'}
        self.content = self.client.translate_to_json_object(self.value)

        self.docs = []
        self.keys = []
        for i in range(self.num_items):
            key = "test_docs-" + str(i)
            doc = Tuples.of(key, self.content)
            self.keys.append(key)
            self.docs.append(doc)

        self.transaction_config = Transaction().createTransactionConfig(
            self.transaction_timeout, self.durability)
        self.log.info("==========Finished Basic_ops base setup========")
Exemplo n.º 10
0
    def test_basic_retry_async(self):
        self.test_log.info("going to create and execute the task")
        self.gen_create = self.get_doc_generator(0, self.num_items)
        task = self.task.async_load_gen_docs_atomicity(
            self.cluster,
            self.def_bucket,
            self.gen_create,
            "create",
            exp=0,
            batch_size=10,
            process_concurrency=1,
            replicate_to=self.replicate_to,
            persist_to=self.persist_to,
            timeout_secs=self.sdk_timeout,
            retries=self.sdk_retries,
            update_count=self.update_count,
            transaction_timeout=self.transaction_timeout,
            commit=True,
            durability=self.durability_level,
            sync=True,
            num_threads=1)
        self.task.jython_task_manager.get_task_result(task)
        self.test_log.info("get all the keys in the cluster")
        keys = ["test_docs-0"] * 2

        exception = Transaction().RunTransaction(self.transaction,
                                                 [self.client.collection], [],
                                                 keys, [],
                                                 self.transaction_commit,
                                                 False, 0)
        if exception:
            self.set_exception(Exception(exception))
Exemplo n.º 11
0
    def test_stop_loading(self):
        ''' Load through transactions and close the transaction abruptly, create a new transaction sleep for 60 seconds and
        perform create on the same set of docs '''
        self.num_txn = self.input.param("num_txn", 9)
        self.doc_gen(self.num_items)
        threads = []
        
        docs = list(self.__chunks(self.docs, len(self.docs)/self.num_txn))
        
        for doc in docs: 
            threads.append(threading.Thread(target=self.__thread_to_transaction, args=(self.transaction, "create", doc, self.transaction_commit, self.update_count, True, False)))
          
        for thread in threads:
            thread.start()
        
        self.client.cluster.shutdown()       
        self.transaction.close()
          
        self.client1 = VBucketAwareMemcached(RestConnection(self.cluster.master), self.def_bucket[0])
        self.create_Transaction(self.client1)
        self.sleep(self.transaction_timeout+60) # sleep for 60 seconds so that transaction cleanup can happen
        
        self.test_log.info("going to start the load")  
        for doc in docs:
            exception = Transaction().RunTransaction(self.transaction, [self.client1.collection], doc, [], [], self.transaction_commit, self.sync, self.update_count)
            if exception:
                time.sleep(60)

        self.verify_doc(self.num_items, self.client1)  
        self.client1.close()   
Exemplo n.º 12
0
 def test_MultiThreadTxnLoad(self):
     # Atomicity.basic_retry.basic_ops.test_MultiThreadTxnLoad,num_items=1000
     ''' Load data through txn, update half the items through different threads 
     and delete half the items through different threads. if update_retry then update and delete 
     the same key in two different transaction and make sure update fails '''
     
     self.num_txn = self.input.param("num_txn", 9)
     self.update_retry = self.input.param("update_retry", False)
     
     self.doc_gen(self.num_items)
     threads = []
      
     # create the docs   
     exception = Transaction().RunTransaction(self.transaction, [self.client.collection], self.docs, [], [], self.transaction_commit, True, self.update_count)
     if exception:
         self.set_exception("Failed")
         
     if self.update_retry:
         threads.append(threading.Thread(target=self.__thread_to_transaction, args=(self.transaction, "delete", self.keys, self.transaction_commit, self.update_count)))
         threads.append(threading.Thread(target=self.__thread_to_transaction, args=(self.transaction, "update", self.keys, self.transaction_commit, self.update_count)))    
     
     else:
         update_docs = self.__chunks(self.keys[:self.num_items/2], self.num_txn)    
         delete_docs = self.__chunks(self.keys[self.num_items/2:], self.num_txn)
             
         for keys in update_docs:
             threads.append(threading.Thread(target=self.__thread_to_transaction, args=(self.transaction, "update", keys, self.transaction_commit, self.update_count)))
         
         for keys in delete_docs:
             threads.append(threading.Thread(target=self.__thread_to_transaction, args=(self.transaction, "delete", keys, self.transaction_commit, self.update_count)))
     
     for thread in threads:
         thread.start()
         
     for thread in threads:
         thread.join() 
     
     self.sleep(60)
     if self.update_retry: 
         for key in self.keys:
             result = self.client.read(key)
             self.assertEquals(result['status'], False)
             
     else:   
         self.value = {'mutated':1, 'value':'value1'}
         self.content = self.client.translate_to_json_object(self.value)
                     
         self.verify_doc(self.num_items/2, self.client)
             
         for key in self.keys[self.num_items/2:]:
             result = self.client.read(key)
             self.assertEquals(result['status'], False)
Exemplo n.º 13
0
    def setUp(self):
        super(IsolationDocTest, self).setUp()

        self.doc_op = self.input.param("doc_op", "create")
        self.operation = self.input.param("operation", "afterAtrPending")
        self.transaction_fail_count = self.input.param("fail_count", 99999)
        self.transaction_fail = self.input.param("fail", True)

        services = list()
        for service in self.services_init.split("-"):
            services.append(service.replace(":", ","))

        nodes_init = self.cluster.servers[1:self.nodes_init] \
            if self.nodes_init != 1 else []
        self.task.rebalance([self.cluster.master],
                            nodes_init, [],
                            services=services)
        self.cluster.nodes_in_cluster.extend([self.cluster.master] +
                                             nodes_init)
        self.bucket_util.add_rbac_user()

        self.bucket_util.create_default_bucket(
            replica=self.num_replicas,
            ram_quota=100,
            bucket_type=self.bucket_type,
            storage=self.bucket_storage,
            eviction_policy=self.bucket_eviction_policy,
            compression_mode=self.compression_mode)

        self.cluster_util.print_cluster_stats()
        self.bucket_util.print_bucket_stats()

        # Reset active_resident_threshold to avoid further data load as DGM
        self.active_resident_threshold = 0

        # Create SDK client for each bucket
        self.sdk_clients = dict()
        for bucket in self.bucket_util.buckets:
            self.sdk_clients[bucket.name] = SDKClient([self.cluster.master],
                                                      bucket)

        self.read_failed = dict()
        self.stop_thread = False
        self.docs = list()
        self.keys = list()
        self.__create_transaction_docs()
        self.__durability_level()
        self.transaction_config = Transaction().createTransactionConfig(
            self.transaction_timeout, self.durability)
Exemplo n.º 14
0
    def basic_concurrency(self):
        self.crash = self.input.param("crash", False)

        self.doc_gen(self.num_items)

        # run transaction
        thread = threading.Thread(target=self.__thread_to_transaction,
                                  args=(self.transaction, "create", self.docs,
                                        self.transaction_commit,
                                        self.update_count, True, False))
        thread.start()
        self.sleep(1, "Wait for transaction thread to start")

        if self.crash:
            self.client.cluster.disconnect()
            self.transaction.close()
            self.client1 = SDKClient([self.cluster.master], self.def_bucket[0])
            self.create_Transaction(self.client1)
            self.sleep(self.transaction_timeout + 60,
                       "Wait for transaction cleanup to complete")
            exception = Transaction().RunTransaction(
                self.client.cluster, self.transaction,
                [self.client1.collection], self.docs, [], [],
                self.transaction_commit, self.sync, self.update_count)
            if exception:
                self.sleep(60, "Wait for transaction cleanup to happen")

            self.verify_doc(self.num_items, self.client1)
            self.client1.close()

        else:
            key = "test_docs-0"
            # insert will succeed due to doc_isoloation feature
            result = self.client.insert(key, "value")
            self.assertEqual(result["status"], True)

            # Update should pass
            result = self.client.upsert(key, "value")
            self.assertEqual(result["status"], True)

            # delete should pass
            result = self.client.delete(key)
            self.assertEqual(result["status"], True)

        thread.join()
Exemplo n.º 15
0
    def test_parallel_transactions(self):
        trans_obj = Transaction()
        self.client = self.sdk_client_pool.get_client_for_bucket(
            self.bucket, self.scope_name, self.collection_name)

        # Create trans config and object
        transaction_config = trans_obj.createTransactionConfig(
            self.transaction_timeout, self.__durability_level())
        self.transaction = trans_obj.createTansaction(self.client.cluster,
                                                      transaction_config)

        # Create docs for update/delete ops
        if self.doc_ops[0] != DocLoading.Bucket.DocOps.CREATE:
            docs = self.trans_doc_gen(0, self.num_items / 2,
                                      DocLoading.Bucket.DocOps.CREATE)
            self.__transaction_runner(trans_obj, docs,
                                      DocLoading.Bucket.DocOps.CREATE)
        if self.doc_ops[1] != DocLoading.Bucket.DocOps.CREATE:
            docs = self.trans_doc_gen(self.num_items / 2, self.num_items,
                                      DocLoading.Bucket.DocOps.CREATE)
            self.__transaction_runner(trans_obj, docs,
                                      DocLoading.Bucket.DocOps.CREATE)

        # Create doc_gens for test
        doc_set = list()
        doc_set.append(
            self.trans_doc_gen(0, self.num_items / 2, self.doc_ops[0]))
        doc_set.append(
            self.trans_doc_gen(self.num_items / 2, self.num_items,
                               self.doc_ops[1]))

        t1 = Thread(target=self.__transaction_runner,
                    args=[trans_obj, doc_set[0], self.doc_ops[0]])
        t2 = Thread(target=self.__transaction_runner,
                    args=[trans_obj, doc_set[1], self.doc_ops[1]])

        t1.start()
        t2.start()
        t1.join()
        t2.join()

        self.validate_test_failure()
Exemplo n.º 16
0
    def test_stop_loading(self):
        """
        Load through transactions and close the transaction abruptly,
        create a new transaction sleep for 60 seconds and
        perform create on the same set of docs
        """
        self.num_txn = self.input.param("num_txn", 9)
        self.doc_gen(self.num_items)
        threads = []

        docs = list(self.__chunks(self.docs, len(self.docs) / self.num_txn))

        for doc in docs:
            threads.append(
                threading.Thread(target=self.__thread_to_transaction,
                                 args=(self.transaction, "create", doc,
                                       self.transaction_commit,
                                       self.update_count, True, False)))

        for thread in threads:
            thread.start()

        self.client.cluster.disconnect()
        self.transaction.close()

        self.client1 = SDKClient([self.cluster.master], self.def_bucket[0])
        self.create_Transaction(self.client1)
        self.sleep(self.transaction_timeout + 60,
                   "Wait for transaction cleanup to happen")

        self.log.info("going to start the load")
        for doc in docs:
            exception = Transaction().RunTransaction(
                self.client1.cluster, self.transaction,
                [self.client1.collection], doc, [], [],
                self.transaction_commit, self.sync, self.update_count)
            if exception:
                self.sleep(60, "Wait for transaction cleanup to happen")

        self.verify_doc(self.num_items, self.client1)
        self.client1.close()
Exemplo n.º 17
0
    def basic_concurrency(self):
        self.crash = self.input.param("crash", False)
        
        self.doc_gen(self.num_items)

        # run transaction
        thread = threading.Thread(target=self.__thread_to_transaction, args=(self.transaction, "create", self.docs, self.transaction_commit, self.update_count, True, False))
        thread.start()
        self.sleep(1)
        
        if self.crash:
            self.client.cluster.shutdown() 
            self.transaction.close()
            print "going to create a new transaction"
            self.client1 = VBucketAwareMemcached(RestConnection(self.cluster.master), self.def_bucket[0])
            self.create_Transaction(self.client1)
            self.sleep(self.transaction_timeout+60)
            exception = Transaction().RunTransaction(self.transaction, [self.client1.collection], self.docs, [], [], self.transaction_commit, self.sync, self.update_count)
            if exception:
                time.sleep(60)
                
            self.verify_doc(self.num_items, self.client1)
            self.client1.close() 

        else:
            key = "test_docs-0"
            # insert will fail
            result = self.client.insert(key, "value")
            self.assertEqual(result["status"], False)
            
            # Update should pass
            result = self.client.upsert(key,"value")
            self.assertEqual(result["status"], True) 
            
            # delete should pass
            result = self.client.delete(key)
            self.assertEqual(result["status"], True) 
        
        thread.join()
Exemplo n.º 18
0
    def test_transaction_with_crud(self):
        doc_op = self.doc_ops[0]
        transx_op = self.doc_ops[1]
        trans_obj = Transaction()
        supported_d_levels = self.bucket_util.get_supported_durability_levels()

        self.client = self.sdk_client_pool.get_client_for_bucket(
            self.bucket, self.scope_name, self.collection_name)

        half_of_num_items = self.num_items / 2
        doc_gen = doc_generator(self.key, 0, half_of_num_items)
        t_doc_gen = self.trans_doc_gen(half_of_num_items, self.num_items,
                                       transx_op)

        # Create trans config and object
        transaction_config = trans_obj.createTransactionConfig(
            self.transaction_timeout, self.__durability_level())
        self.transaction = trans_obj.createTansaction(self.client.cluster,
                                                      transaction_config)

        # Create docs for update/delete ops
        if doc_op != DocLoading.Bucket.DocOps.CREATE:
            task = self.task.async_load_gen_docs(
                self.cluster,
                self.bucket,
                doc_gen,
                DocLoading.Bucket.DocOps.CREATE,
                timeout_secs=self.sdk_timeout,
                process_concurrency=8,
                batch_size=100,
                sdk_client_pool=self.sdk_client_pool)
            self.task_manager.get_task_result(task)
        if transx_op != DocLoading.Bucket.DocOps.CREATE:
            t_doc_gen = self.trans_doc_gen(half_of_num_items, self.num_items,
                                           DocLoading.Bucket.DocOps.CREATE)
            self.__transaction_runner(trans_obj, t_doc_gen,
                                      DocLoading.Bucket.DocOps.CREATE)

        replicate_to = choice(range(0, self.num_replicas))
        persist_to = choice(range(0, self.num_replicas + 1))
        durability = choice(supported_d_levels)
        self.log.info("%s replicate_to=%s, persist_to=%s, durability=%s" %
                      (doc_op, replicate_to, persist_to, durability))

        trans_thread = Thread(target=self.__transaction_runner,
                              args=[trans_obj, t_doc_gen, transx_op])
        crud_task = self.task.async_load_gen_docs(
            self.cluster,
            self.bucket,
            doc_gen,
            doc_op,
            replicate_to=replicate_to,
            persist_to=persist_to,
            durability=durability,
            timeout_secs=self.sdk_timeout,
            process_concurrency=1,
            batch_size=1,
            sdk_client_pool=self.sdk_client_pool)
        trans_thread.start()
        trans_thread.join()
        self.task_manager.get_task_result(crud_task)
        if crud_task.fail:
            self.log_failure("Failures seen during doc_crud: %s" %
                             crud_task.fail)
        self.validate_test_failure()