示例#1
0
    def setUp(self):
        super(basic_ops, self).setUp()
        self.test_log = logging.getLogger("test")
        self.fail = self.input.param("fail", False)
        nodes_init = self.cluster.servers[1:self.nodes_init] if self.nodes_init != 1 else []
        self.task.rebalance([self.cluster.master], nodes_init, [])
        self.cluster.nodes_in_cluster.extend([self.cluster.master] + nodes_init)
        self.bucket_util.add_rbac_user()

        if self.default_bucket:
            self.bucket_util.create_default_bucket(replica=self.num_replicas,
                                               compression_mode=self.compression_mode, ram_quota=100, bucket_type=self.bucket_type)

        time.sleep(10)
        self.def_bucket= self.bucket_util.get_all_buckets()
        self.client = VBucketAwareMemcached(RestConnection(self.cluster.master), self.def_bucket[0])
        self.__durability_level()

        self.operation = self.input.param("operation", "afterAtrPending")
        # create load
        self.value = {'value':'value1'}
        self.content = self.client.translate_to_json_object(self.value)

        self.docs = []
        self.keys = []
        for i in range(self.num_items):
            key = "test_docs-" + str(i)
            doc = Tuples.of(key, self.content)
            self.keys.append(key)
            self.docs.append(doc)

        self.transaction_config = Transaction().createTransactionConfig(self.transaction_timeout, self.durability)
        self.log.info("==========Finished Basic_ops base setup========")
示例#2
0
 def sub_doc_read_multi(self,
                        keys,
                        timeout=5,
                        time_unit="seconds",
                        xattr=False):
     """
     :param keys: Documents to perform sub_doc operations on.
                  Must be a dictionary with Keys and List of tuples for
                  path.
     :param timeout: timeout for the operation
     :param time_unit: timeout time unit
     :return:
     """
     mutate_in_specs = []
     keys_to_loop = keys.keys()
     keys_to_loop.sort()
     for key in keys_to_loop:
         value = keys[key]
         mutate_in_spec = []
         for _tuple in value:
             _path = _tuple[0]
             _mutate_in_spec = SDKClient.sub_doc_op.getLookUpInSpec(
                 _path, xattr)
             mutate_in_spec.append(_mutate_in_spec)
         content = Tuples.of(key, mutate_in_spec)
         mutate_in_specs.append(content)
     result = SDKClient.sub_doc_op.bulkGetSubDocOperation(
         self.collection, mutate_in_specs)
     # timeout, time_unit)
     return self.__translate_get_multi_results(result)
示例#3
0
    def setUp(self):
        super(basic_ops, self).setUp()

        if self.default_bucket:
            self.bucket_size = 100
            self.create_bucket(self.cluster)

        self.sleep(10, "Wait for bucket to become ready for ops")

        self.def_bucket = self.bucket_util.get_all_buckets(self.cluster)
        self.client = SDKClient(RestConnection(self.cluster.master),
                                self.def_bucket[0])
        self.__durability_level()

        self.operation = self.input.param("operation", "afterAtrPending")
        self.always_fail = self.input.param("fail", False)
        self.verify = self.input.param("verify", True)

        # create load
        self.value = {'value': 'value1'}
        self.content = self.client.translate_to_json_object(self.value)

        self.docs = []
        self.keys = []
        for i in range(self.num_items):
            key = "test_docs-" + str(i)
            doc = Tuples.of(key, self.content)
            self.keys.append(key)
            self.docs.append(doc)

        self.transaction_config = Transaction().createTransactionConfig(
            self.transaction_timeout, self.durability)
        self.log.info("==========Finished Basic_ops base setup========")
示例#4
0
    def test_transactions(self):
        tasks = list()
        self.check_fragmentation_using_magma_stats(self.cluster.buckets[0])
        bucket = self.cluster.buckets[0]
        workers = self.process_concurrency
        self.tm = TaskManager(workers)
        transaction_app = Transaction()
        trans_config = transaction_app.createTransactionConfig(
            self.transaction_timeout, self.sdk_timeout,
            self.transaction_durability_level)

        transaction_items = self.input.param("transaction_items", 1000)

        workload = dict()
        workload["keyPrefix"] = "transactions"
        workload["keySize"] = self.key_size
        workload["docSize"] = self.doc_size
        workload["mutated"] = 0
        workload["keyRange"] = Tuples.of(0, transaction_items)
        workload["batchSize"] = 1
        workload["workers"] = 3
        workload["transaction_pattern"] = [[
            CbServer.default_scope, CbServer.default_collection,
            self.transaction_pattern
        ]]

        work_load = WorkLoadSettings(workload["keyPrefix"],
                                     workload["keySize"], workload["docSize"],
                                     workload["mutated"], workload["keyRange"],
                                     workload["batchSize"],
                                     workload["transaction_pattern"],
                                     workload["workers"])
        work_load.setTransactionRollback(self.rollback)
        client = SDKClient([self.cluster.master], bucket)
        transaction_obj = transaction_app.createTransaction(
            client.cluster, trans_config)
        for index, load_pattern in enumerate(work_load.transaction_pattern):
            th_name = "Transaction_%s" % index
            batch_size = load_pattern[0]
            num_transactions = load_pattern[1]
            trans_pattern = load_pattern[2]

            task = TransactionWorkLoadGenerate(
                th_name, client.cluster, client.bucketObj, transaction_obj,
                work_load.doc_gen, batch_size, num_transactions, trans_pattern,
                work_load.commit_transaction, work_load.rollback_transaction,
                transaction_app)
            tasks.append(task)
            self.tm.submit(task)
        self.tm.getAllTaskResult()
        client.close()
        result = \
            self.check_fragmentation_using_magma_stats(self.cluster.buckets[0])
        self.assertTrue(result, "Magma framentation error")

        # The final disk check requires randomized documents of size of 4096
        # and no additional documents should be loaded.
        if self.final_disk_check:
            self.final_fragmentation_check(transaction_items)
示例#5
0
 def next_batch(self):
     self.count = 0
     key_val = []
     while self.count < self._batch_size and self.has_next():
         key, val = self._doc_gen.next()
         key_val.append(Tuples.of(key, val))
         self.count += 1
     return key_val
示例#6
0
 def doc_gen(self, num_items, start=0, value={'value':'value1'}):
     self.docs = []
     self.keys = []
     self.content = self.client.translate_to_json_object(value)
     for i in range(start, self.num_items):
         key = "test_docs-" + str(i)
         doc = Tuples.of(key, self.content)
         self.keys.append(key)
         self.docs.append(doc)
示例#7
0
 def __create_transaction_docs(self):
     self.value = {'value': 'value1'}
     self.content = \
         self.sdk_clients[self.cluster.buckets[0].name] \
         .translate_to_json_object(self.value)
     for i in range(self.num_items):
         key = "test_docs-" + str(i)
         doc = Tuples.of(key, self.content)
         self.keys.append(key)
         self.docs.append(doc)
示例#8
0
 def trans_doc_gen(self, start, end, op_type):
     docs = list()
     value = {'mutated': 0}
     content = self.client.translate_to_json_object(value)
     for i in range(start, end):
         key = "%s-%s" % (self.key, i)
         if op_type == DocLoading.Bucket.DocOps.CREATE:
             doc = Tuples.of(key, content)
             docs.append(doc)
         else:
             docs.append(key)
     return docs
示例#9
0
 def upsertMulti(self, keys, exp=0, exp_unit="seconds",
                 persist_to=0, replicate_to=0,
                 timeout=5, time_unit="seconds", retry=5,
                 doc_type="json", durability=""):
     docs = []
     for key, value in keys.items():
         content = self.__translate_to_json_object(value, doc_type)
         tuple = Tuples.of(key, content)
         docs.append(tuple)
     result = doc_op().bulkUpsert(self.collection, docs, exp, exp_unit,
                                  persist_to, replicate_to, durability,
                                  timeout, time_unit)
     return self.__translate_upsert_multi_results(result)
示例#10
0
 def next_batch(self, op_type=None):
     self.count = 0
     key_val = []
     # Value is not required for
     # delete/touch ops, so below empty string
     # string is used
     val = ""
     while self.count < self._batch_size and self.has_next():
         if op_type == "touch" or op_type == "delete":
             key = self._doc_gen.next_key()
         else:
             key, val = self._doc_gen.next()
         key_val.append(Tuples.of(key, val))
         self.count += 1
     return key_val
示例#11
0
 def next_batch(self, skip_value=False):
     self.count = 0
     key_val = []
     # Value is not required for
     # delete/touch ops, so below empty string
     # string is used
     val = ""
     while self.count < self._batch_size and self.has_next():
         if not skip_value or self._doc_gen.deep_copy:
             key, val = self._doc_gen.next()
             skip_value = True
         else:
             key = self._doc_gen.next_key()
         key_val.append(Tuples.of(key, val))
         self.count += 1
     return key_val
示例#12
0
    def sub_doc_insert_multi(self,
                             keys,
                             exp=0,
                             exp_unit="seconds",
                             persist_to=0,
                             replicate_to=0,
                             timeout=5,
                             time_unit="seconds",
                             durability="",
                             create_path=False,
                             xattr=False,
                             cas=0):
        """

        :param keys: Documents to perform sub_doc operations on.
        Must be a dictionary with Keys and List of tuples for
        path and value.
        :param exp: Expiry of document
        :param exp_unit: Expiry time unit
        :param persist_to: Persist to parameter
        :param replicate_to: Replicate to parameter
        :param timeout: timeout for the operation
        :param time_unit: timeout time unit
        :param durability: Durability level parameter
        :param create_path: Boolean used to create sub_doc path if not exists
        :param xattr: Boolean. If 'True', perform xattr operation
        :return:
        """
        mutate_in_specs = []
        for key, value in keys.items():
            mutate_in_spec = []
            for _tuple in value:
                _path = _tuple[0]
                _val = _tuple[1]
                _mutate_in_spec = SDKClient.sub_doc_op.getInsertMutateInSpec(
                    _path, _val, create_path, xattr)
                mutate_in_spec.append(_mutate_in_spec)
            if not xattr:
                _mutate_in_spec = SDKClient.sub_doc_op.getIncrMutateInSpec(
                    "mutated", 1)
                mutate_in_spec.append(_mutate_in_spec)
            content = Tuples.of(key, mutate_in_spec)
            mutate_in_specs.append(content)
        result = SDKClient.sub_doc_op.bulkSubDocOperation(
            self.collection, mutate_in_specs, exp, exp_unit, persist_to,
            replicate_to, durability, timeout, time_unit, cas)
        return self.__translate_upsert_multi_sub_doc_result(result)
示例#13
0
 def next_batch(self, op_type=None, force_gen_docs=False):
     self.count = 0
     key_val = list()
     # Value is not required for delete/touch/read, so val is set to None
     val = None
     while self.count < self._batch_size and self.has_next():
         if op_type in [
                 DocLoading.Bucket.DocOps.DELETE,
                 DocLoading.Bucket.DocOps.TOUCH
         ]:
             key = self._doc_gen.next_key()
         elif op_type == DocLoading.Bucket.DocOps.READ:
             if force_gen_docs:
                 key, val = self._doc_gen.next()
             else:
                 key = self._doc_gen.next_key()
         else:
             key, val = self._doc_gen.next()
         key_val.append(Tuples.of(key, val))
         self.count += 1
     return key_val
示例#14
0
 def crud(self,
          op_type,
          key,
          value=None,
          exp=0,
          replicate_to=0,
          persist_to=0,
          durability="",
          timeout=5,
          time_unit="seconds",
          create_path=True,
          xattr=False,
          cas=0,
          fail_fast=False):
     result = None
     if op_type == "update":
         result = self.upsert(key,
                              value,
                              exp=exp,
                              persist_to=persist_to,
                              replicate_to=replicate_to,
                              durability=durability,
                              timeout=timeout,
                              time_unit=time_unit,
                              fail_fast=fail_fast)
     elif op_type == "create":
         result = self.insert(key,
                              value,
                              exp=exp,
                              persist_to=persist_to,
                              replicate_to=replicate_to,
                              durability=durability,
                              timeout=timeout,
                              time_unit=time_unit,
                              fail_fast=fail_fast)
     elif op_type == "delete":
         result = self.delete(key,
                              persist_to=persist_to,
                              replicate_to=replicate_to,
                              durability=durability,
                              timeout=timeout,
                              time_unit=time_unit,
                              fail_fast=fail_fast)
     elif op_type == "replace":
         result = self.replace(key,
                               value,
                               exp=exp,
                               persist_to=persist_to,
                               replicate_to=replicate_to,
                               durability=durability,
                               timeout=timeout,
                               time_unit=time_unit,
                               cas=cas,
                               fail_fast=fail_fast)
     elif op_type == "touch":
         result = self.touch(key,
                             exp=exp,
                             persist_to=persist_to,
                             replicate_to=replicate_to,
                             durability=durability,
                             timeout=timeout,
                             time_unit=time_unit,
                             fail_fast=fail_fast)
     elif op_type == "read":
         result = self.read(key,
                            timeout=timeout,
                            time_unit=time_unit,
                            fail_fast=fail_fast)
     elif op_type == "subdoc_insert":
         sub_key, value = value[0], value[1]
         mutate_in_specs = list()
         mutate_in_specs.append(
             SDKClient.sub_doc_op.getInsertMutateInSpec(
                 sub_key, value, create_path, xattr))
         if not xattr:
             mutate_in_specs.append(
                 SDKClient.sub_doc_op.getIncrMutateInSpec("mutated", 1))
         content = Tuples.of(key, mutate_in_specs)
         result = SDKClient.sub_doc_op.bulkSubDocOperation(
             self.collection, [content], exp, time_unit, persist_to,
             replicate_to, durability, timeout, time_unit, cas)
         return self.__translate_upsert_multi_sub_doc_result(result)
     elif op_type == "subdoc_upsert":
         sub_key, value = value[0], value[1]
         mutate_in_specs = list()
         mutate_in_specs.append(
             SDKClient.sub_doc_op.getUpsertMutateInSpec(
                 sub_key, value, create_path, xattr))
         if not xattr:
             mutate_in_specs.append(
                 SDKClient.sub_doc_op.getIncrMutateInSpec("mutated", 1))
         content = Tuples.of(key, mutate_in_specs)
         result = SDKClient.sub_doc_op.bulkSubDocOperation(
             self.collection, [content], exp, time_unit, persist_to,
             replicate_to, durability, timeout, time_unit, cas)
         return self.__translate_upsert_multi_sub_doc_result(result)
     elif op_type == "subdoc_delete":
         mutate_in_specs = list()
         mutate_in_specs.append(
             SDKClient.sub_doc_op.getRemoveMutateInSpec(value, xattr))
         if not xattr:
             mutate_in_specs.append(
                 SDKClient.sub_doc_op.getIncrMutateInSpec("mutated", 1))
         content = Tuples.of(key, mutate_in_specs)
         result = SDKClient.sub_doc_op.bulkSubDocOperation(
             self.collection, [content], exp, time_unit, persist_to,
             replicate_to, durability, timeout, time_unit, cas)
         result = self.__translate_upsert_multi_sub_doc_result(result)
     elif op_type == "subdoc_replace":
         sub_key, value = value[0], value[1]
         mutate_in_specs = list()
         mutate_in_specs.append(
             SDKClient.sub_doc_op.getReplaceMutateInSpec(
                 sub_key, value, xattr))
         if not xattr:
             mutate_in_specs.append(
                 SDKClient.sub_doc_op.getIncrMutateInSpec("mutated", 1))
         content = Tuples.of(key, mutate_in_specs)
         result = SDKClient.sub_doc_op.bulkSubDocOperation(
             self.collection, [content], exp, time_unit, persist_to,
             replicate_to, durability, timeout, time_unit, cas)
         result = self.__translate_upsert_multi_sub_doc_result(result)
     elif op_type == "subdoc_read":
         mutate_in_specs = list()
         mutate_in_specs.append(
             SDKClient.sub_doc_op.getLookUpInSpec(value, xattr))
         content = Tuples.of(key, mutate_in_specs)
         result = SDKClient.sub_doc_op.bulkGetSubDocOperation(
             self.collection, [content])
         result = self.__translate_get_multi_results(result)
     else:
         self.log.error("Unsupported operation %s" % op_type)
     return result