예제 #1
0
class MagmaCrashTests(MagmaBaseTest):
    def setUp(self):
        self.input = TestInputSingleton.input
        self.input.test_params.update({"random_key": True})
        super(MagmaCrashTests, self).setUp()
        self.sdk_timeout = self.input.param("sdk_timeout", 10)
        self.time_unit = "seconds"
        self.graceful = self.input.param("graceful", False)
        self.assertTrue(self.rest.update_autofailover_settings(False, 600),
                        "AutoFailover disabling failed")
        self.crash_th = None
        self.sdk_retry_strategy = self.input.param(
            "sdk_retry_strategy", SDKConstants.RetryStrategy.FAIL_FAST)

    def tearDown(self):
        self.stop_crash = True
        if self.crash_th and self.crash_th.is_alive():
            self.crash_th.join()
        super(MagmaCrashTests, self).tearDown()

    def kill_magma_check_wal_file_size(self):
        nIter = 200
        while nIter > 0:
            shell = RemoteMachineShellConnection(self.cluster.master)
            shell.kill_memcached()
            #             self.bucket_util._wait_warmup_completed()
            self.sleep(10, "sleep of 5s so that memcached can restart")

    def test_crash_during_ops(self):
        self.graceful = self.input.param("graceful", False)
        wait_warmup = self.input.param("wait_warmup", True)
        self.log.info("====test_crash_during_ops starts====")

        self.compute_docs_ranges()

        tasks_info = dict()
        for collection in self.collections:
            self.generate_docs(doc_ops=self.doc_ops, target_vbucket=None)
            tem_tasks_info = self.loadgen_docs(
                self.retry_exceptions,
                self.ignore_exceptions,
                scope=CbServer.default_scope,
                collection=collection,
                suppress_error_table=True,
                skip_read_on_error=True,
                _sync=False,
                doc_ops=self.doc_ops,
                track_failures=False,
                sdk_retry_strategy=self.sdk_retry_strategy)
            tasks_info.update(tem_tasks_info.items())

        self.crash_th = threading.Thread(target=self.crash,
                                         kwargs=dict(graceful=self.graceful,
                                                     wait=wait_warmup))
        self.crash_th.start()
        for task in tasks_info:
            self.task_manager.get_task_result(task)

        self.stop_crash = True
        self.crash_th.join()
        self.assertFalse(self.crash_failure,
                         "CRASH | CRITICAL | WARN messages found in cb_logs")
        self.validate_seq_itr()

    def test_crash_during_recovery(self):
        self.compute_docs_ranges()
        tasks_info = dict()

        for collection in self.collections:
            self.generate_docs(doc_ops=self.doc_ops, target_vbucket=None)
            tem_tasks_info = self.loadgen_docs(
                self.retry_exceptions,
                self.ignore_exceptions,
                scope=CbServer.default_scope,
                collection=collection,
                suppress_error_table=True,
                skip_read_on_error=True,
                _sync=False,
                doc_ops=self.doc_ops,
                track_failures=False,
                sdk_retry_strategy=self.sdk_retry_strategy)
            tasks_info.update(tem_tasks_info.items())

        self.crash_th = threading.Thread(target=self.crash,
                                         kwargs={"kill_itr": 5})
        self.crash_th.start()
        for task in tasks_info:
            self.task_manager.get_task_result(task)

        self.stop_crash = True
        self.crash_th.join()
        self.assertFalse(self.crash_failure,
                         "CRASH | CRITICAL | WARN messages found in cb_logs")
        self.validate_seq_itr()

    def test_crash_before_upserts(self):
        self.log.info("test_update_multi starts")
        self.change_swap_space(self.cluster.nodes_in_cluster)

        upsert_doc_list = self.get_fragmentation_upsert_docs_list()

        count = 0
        self.mutate = 0
        while count < self.test_itr:
            self.log.info("Iteration == {}".format(count + 1))

            self.sigkill_memcached(graceful=self.graceful)

            for itr in upsert_doc_list:
                self.doc_ops = "update"
                self.update_start = 0
                self.update_end = itr

                if self.rev_update:
                    self.update_start = -int(itr - 1)
                    self.update_end = 1

                self.generate_docs(doc_ops="update")

                _ = self.loadgen_docs(self.retry_exceptions,
                                      self.ignore_exceptions,
                                      suppress_error_table=True,
                                      _sync=True,
                                      doc_ops="update")

                self.bucket_util._wait_for_stats_all_buckets(
                    self.cluster, self.cluster.buckets)

            count += 1

        self.validate_data("update", self.gen_update)
        self.change_swap_space(self.cluster.nodes_in_cluster, disable=False)
        self.validate_seq_itr()
        self.log.info("====test_update_multi ends====")

    def test_crash_before_multi_update_deletes(self):
        self.log.info("===test_crash_before_multi_update_deletes starts===")
        self.change_swap_space(self.cluster.nodes_in_cluster)

        count = 0
        self.mutate = 0
        for i in range(self.test_itr):
            self.log.info("Step 1, Iteration= {}".format(i + 1))
            while count < self.update_itr:
                self.sigkill_memcached(graceful=self.graceful)

                self.doc_ops = "update"
                self.update_start = 0
                self.update_end = self.num_items

                self.generate_docs(doc_ops="update")
                _ = self.loadgen_docs(self.retry_exceptions,
                                      self.ignore_exceptions,
                                      suppress_error_table=True,
                                      _sync=True,
                                      doc_ops="update")

                self.bucket_util._wait_for_stats_all_buckets(
                    self.cluster, self.cluster.buckets)

                count += 1
            self.update_itr += self.update_itr

            # data validation is done only for the last iteration
            if i + 1 == self.test_itr:
                self.validate_data("update", self.gen_update)

            self.log.debug("Step 2, Iteration {}".format(i + 1))
            self.sigkill_memcached()

            self.doc_ops = "delete"
            self.delete_start = 0
            self.delete_end = self.num_items // 2

            self.generate_docs(doc_ops="delete")
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  suppress_error_table=True,
                                  _sync=True,
                                  doc_ops="delete")

            self.bucket_util._wait_for_stats_all_buckets(
                self.cluster, self.cluster.buckets)
            self.bucket_util.verify_stats_all_buckets(self.cluster,
                                                      self.num_items)

            self.log.debug("Step 3, Iteration= {}".format(i + 1))
            self.sigkill_memcached()

            self.gen_create = copy.deepcopy(self.gen_delete)
            self.doc_ops = "create"
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  suppress_error_table=True,
                                  _sync=True,
                                  doc_ops="create")

            self.bucket_util._wait_for_stats_all_buckets(
                self.cluster, self.cluster.buckets)
            self.bucket_util.verify_stats_all_buckets(self.cluster,
                                                      self.num_items)

        self.validate_data("create", self.gen_create)
        self.change_swap_space(self.cluster.nodes_in_cluster, disable=False)
        self.validate_seq_itr()
        self.log.info("===test_crash_before_multi_update_deletes ends===")

    def test_crash_during_get_ops(self):

        self.log.info("test_crash_during_get_ops starts")
        self.change_swap_space(self.cluster.nodes_in_cluster)

        tasks_info = dict()
        upsert_doc_list = self.get_fragmentation_upsert_docs_list()

        for itr in upsert_doc_list:
            self.doc_ops = "update"
            self.update_start = 0
            self.update_end = itr
            self.mutate = -1
            self.generate_docs(doc_ops="update")
            update_task_info = self.loadgen_docs(self.retry_exceptions,
                                                 self.ignore_exceptions,
                                                 suppress_error_table=True,
                                                 _sync=False)

            #update_task_info = self.loadgen_docs(
            #    self.retry_exceptions,
            #    self.ignore_exceptions,
            #    _sync=False)
            tasks_info.update(update_task_info.items())

        self.doc_ops = "read"
        self.generate_docs(doc_ops="read")
        start = -int(self.num_items - 1)
        end = 1
        reverse_read_gen = self.genrate_docs_basic(start, end)

        count = 0
        while count < self.read_thread_count:
            read_task_info = self.loadgen_docs(self.retry_exceptions,
                                               self.ignore_exceptions,
                                               suppress_error_table=True,
                                               _sync=False)

            tasks_info.update(read_task_info.items())
            count += 1
            if count < self.read_thread_count:
                read_task_info = self.bucket_util._async_validate_docs(
                    self.cluster,
                    reverse_read_gen,
                    "read",
                    0,
                    batch_size=self.batch_size,
                    process_concurrency=self.process_concurrency,
                    timeout_secs=self.sdk_timeout,
                    retry_exceptions=self.retry_exceptions,
                    ignore_exceptions=self.ignore_exceptions,
                    suppress_error_table=False)
                tasks_info.update(read_task_info.items())
                count += 1

        self.crash_th = threading.Thread(target=self.crash,
                                         kwargs={"graceful": self.graceful})
        self.crash_th.start()
        for task in tasks_info:
            self.task_manager.get_task_result(task)

        self.stop_crash = True
        self.crash_th.join()
        self.assertFalse(self.crash_failure,
                         "CRASH | CRITICAL | WARN messages found in cb_logs")
        self.bucket_util._wait_for_stats_all_buckets(self.cluster,
                                                     self.cluster.buckets)

        self.change_swap_space(self.cluster.nodes_in_cluster, disable=False)
        self.validate_seq_itr()
        self.log.info("test_crash_during_get_ops ends")

    def test_crash_during_upserts_using_multithreads(self):
        self.log.info("test_crash_during_upserts_using_multithreads starts")
        self.change_swap_space(self.cluster.nodes_in_cluster)

        tasks_info = dict()
        self.doc_ops = "update"
        self.update_start = 0
        self.update_end = self.num_items

        count = 0
        while count < self.read_thread_count:
            self.generate_docs(doc_ops="update")
            update_task_info = self.loadgen_docs(
                self.retry_exceptions,
                self.ignore_exceptions,
                suppress_error_table=True,
                skip_read_on_error=True,
                _sync=False,
                track_failures=False,
                sdk_retry_strategy=self.sdk_retry_strategy)
            tasks_info.update(update_task_info.items())
            count += 1
            self.sleep(5)

        self.crash_th = threading.Thread(target=self.crash,
                                         kwargs={"graceful": self.graceful})
        self.crash_th.start()
        for task in tasks_info:
            self.task_manager.get_task_result(task)

        self.stop_crash = True
        self.crash_th.join()
        self.assertFalse(self.crash_failure,
                         "CRASH | CRITICAL | WARN messages found in cb_logs")

        self.bucket_util._wait_for_stats_all_buckets(self.cluster,
                                                     self.cluster.buckets)

        self.change_swap_space(self.cluster.nodes_in_cluster, disable=False)
        self.validate_seq_itr()
        self.log.info("test_crash_during_upserts_using_multithreads ends")

    def test_crash_during_multi_updates_of_single_doc(self):

        self.log.info(
            "==test_crash_during_multi_updates_of_single_doc starts==")
        self.change_swap_space(self.cluster.nodes_in_cluster)

        self.client = SDKClient([self.cluster.master],
                                self.cluster.buckets[0],
                                scope=CbServer.default_scope,
                                collection=CbServer.default_collection)

        self.doc_ops = "update"
        self.gen_update = self.genrate_docs_basic(start=0, end=1)
        key, val = self.gen_update.next()

        def upsert_doc(start_num, end_num, key_obj, val_obj):
            for i in range(start_num, end_num):
                val_obj.put("mutated", i)
                self.client.upsert(key_obj, val_obj)

        self.crash_th = threading.Thread(target=self.crash,
                                         kwargs={"graceful": self.graceful})
        self.crash_th.start()

        threads = []
        start = 0
        end = 0
        for _ in range(10):
            start = end
            end += 10
            th = threading.Thread(target=upsert_doc,
                                  args=[start, end, key, val])
            th.start()
            threads.append(th)

        for th in threads:
            th.join()

        self.stop_crash = True
        self.crash_th.join()
        self.assertFalse(self.crash_failure,
                         "CRASH | CRITICAL | WARN messages found in cb_logs")

        self.bucket_util._wait_for_stats_all_buckets(self.cluster,
                                                     self.cluster.buckets)

        success, _ = self.client.get_multi([key], self.wait_timeout)
        self.assertIs(key in success,
                      True,
                      msg="key {} doesn't exist\
                      ".format(key))

        expected_val = Json.loads(val.toString())
        actual_val = Json.loads(success[key]['value'].toString())
        self.assertIs(expected_val == actual_val,
                      True,
                      msg="expected_val-{} != Actual_val-{}\
                      ".format(expected_val, actual_val))

        self.change_swap_space(self.cluster.nodes_in_cluster, disable=False)
        self.validate_seq_itr()
        self.log.info("==test_crash_during_multi_updates_of_single_doc ends==")

    def test_crash_during_val_movement_across_trees(self):

        self.log.info("==test_crash_during_val_movement_across_trees starts==")
        self.change_swap_space(self.cluster.nodes_in_cluster)

        upsert_size = 0
        if self.doc_size < 32:
            upsert_size = 2048

        self.update_start = 0
        self.update_end = self.num_items
        if self.rev_update:
            self.update_start = -int(self.num_items - 1)
            self.update_end = 1
        self.doc_ops = "update"

        self.crash_th = threading.Thread(target=self.crash,
                                         kwargs={"graceful": self.graceful})
        self.crash_th.start()

        count = 0
        while count < self.test_itr:
            self.log.info("Iteration == {}".format(count))

            self.mutate += 1
            self.gen_update = doc_generator(
                self.key,
                self.update_start,
                self.update_end,
                doc_size=upsert_size,
                doc_type=self.doc_type,
                target_vbucket=self.target_vbucket,
                vbuckets=self.cluster.vbuckets,
                key_size=self.key_size,
                mutate=self.mutate,
                randomize_doc_size=self.randomize_doc_size,
                randomize_value=self.randomize_value,
                mix_key_size=self.mix_key_size,
                deep_copy=self.deep_copy)

            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  suppress_error_table=True,
                                  skip_read_on_error=True,
                                  _sync=True,
                                  track_failures=False,
                                  sdk_retry_strategy=self.sdk_retry_strategy)
            self.bucket_util._wait_for_stats_all_buckets(
                self.cluster, self.cluster.buckets)

            self.generate_docs(doc_ops="update")
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  suppress_error_table=True,
                                  skip_read_on_error=True,
                                  _sync=True,
                                  track_failures=False,
                                  sdk_retry_strategy=self.sdk_retry_strategy)
            self.bucket_util._wait_for_stats_all_buckets(
                self.cluster, self.cluster.buckets)

            count += 1
        self.stop_crash = True
        self.crash_th.join()
        self.assertFalse(self.crash_failure,
                         "CRASH | CRITICAL | WARN messages found in cb_logs")

        self.change_swap_space(self.cluster.nodes_in_cluster, disable=False)
        self.validate_seq_itr()
        self.log.info("==test_crash_during_val_movement_across_trees ends==")
예제 #2
0
class BasicUpsertTests(BasicCrudTests):
    def test_update_n_times(self):
        """
        Test Focus: Update items n times and
                    test space amplification
        STEPS:
          -- Update items n times (where n gets calculated
             from fragmentation value
          -- Check space amplification
          -- Repeat the above steps n times
          -- After all iterations validate the data
        """

        self.log.info("test_update_n_times starts")
        upsert_doc_list = self.get_fragmentation_upsert_docs_list()
        self.mutate = 0
        count = 0

        while count < self.test_itr:
            self.log.info("Iteration == {}".format(count+1))
            #######################################################################
            '''
            STEP - 1, Update Items

            '''
            for itr in upsert_doc_list:
                self.doc_ops = "update"
                self.update_start = 0
                self.update_end = itr

                if self.rev_update:
                    self.update_start = -int(itr - 1)
                    self.update_end = 1

                self.generate_docs(doc_ops="update")
                _ = self.loadgen_docs(self.retry_exceptions,
                                      self.ignore_exceptions,
                                      _sync=True)
                self.log.info("Waiting for ep-queues to get drained")
                self.bucket_util._wait_for_stats_all_buckets(timeout=3600)

            #######################################################################
            '''
            STEP - 2, Space Amplification Check

            '''
            msg = "Fragmentation value for {} stats exceeds\
            the configured value"

            _result = self.check_fragmentation_using_magma_stats(
                self.buckets[0],
                self.cluster.nodes_in_cluster)
            self.assertIs(_result, True,
                          msg.format("magma"))

            _r = self.check_fragmentation_using_bucket_stats(
                self.buckets[0], self.cluster.nodes_in_cluster)
            self.assertIs(_r, True,
                          msg.format("KV"))

            usage_factor = ((float(
                    self.num_items + sum(upsert_doc_list)
                    ) / self.num_items) + 0.5)
            self.log.debug("Disk usage factor = {}".format(usage_factor))

            time_end = time.time() + 60 * 2
            while time.time() < time_end:
                disk_usage = self.get_disk_usage(self.buckets[0],
                                            self.cluster.nodes_in_cluster)
                _res = disk_usage[0]
                self.log.debug("usage at time {} is {}".format((time_end - time.time()), _res))
                if _res < usage_factor * self.disk_usage[self.disk_usage.keys()[0]]:
                    break

            msg = "Iteration= {}, Disk Usage = {}MB\
            exceeds {} times from Actual disk usage = {}MB"
            self.assertIs(_res > usage_factor * self.disk_usage[
                self.disk_usage.keys()[0]],
                False, msg.format(count+1, _res, usage_factor,
                                  self.disk_usage[self.disk_usage.keys()[0]]))

            count += 1
        #######################################################################
        '''
        STEP - 3, Data Validation

        '''
        self.validate_data("update", self.gen_update)

        self.change_swap_space(self.cluster.nodes_in_cluster,
                               disable=False)
        self.log.info("====test_update_n_times ends====")

    def test_multi_update_delete(self):
        """
        STEPS:
          -- Update items x times
          -- Check space amplification
          -- Delete half of the items
          -- Check space Amplification
          -- Recreate deleted items
          -- Check Space Amplification
          -- Repeat above steps for n times
          -- After all iterations validate the data
        """
        self.log.info("==== test_multi_update_delete starts =====")

        count = 0
        msg_stats = "Fragmentation value for {} stats exceeds\
        the configured value"
        msg = "{} Iteration= {}, Disk Usage = {}MB\
         exceeds 2.5 times from Actual disk usage = {}MB"

        self.mutate = 0
        for i in range(self.test_itr):
            self.log.info("Step 1, Iteration= {}".format(i+1))
            #######################################################################
            '''
            STEP - 1, Update Items, update_itr times

            '''
            while count < self.update_itr:
                self.doc_ops = "update"
                self.update_start = 0
                self.update_end = self.num_items
                if self.rev_update:
                    self.update_start = -int(self.num_items - 1)
                    self.update_end = 1

                self.generate_docs(doc_ops="update")
                _ = self.loadgen_docs(self.retry_exceptions,
                                      self.ignore_exceptions,
                                      _sync=True)

                self.log.info("Waiting for ep-queues to get drained")
                self.bucket_util._wait_for_stats_all_buckets(timeout=3600)

                ###################################################################
                '''
                  STEP - 2
                   -- Space Amplification check after each update iteration.
                   -- Data validation only for last update iteration
                '''
                self.log.info("Step 2, Iteration= {}".format(i+1))
                _result = self.check_fragmentation_using_magma_stats(
                    self.buckets[0],
                    self.cluster.nodes_in_cluster)
                self.assertIs(_result, True,
                              msg_stats.format("magma"))

                _r = self.check_fragmentation_using_bucket_stats(
                    self.buckets[0], self.cluster.nodes_in_cluster)
                self.assertIs(_r, True,
                              msg_stats.format("KV"))
                time_end = time.time() + 60 * 2
                while time.time() < time_end:
                    disk_usage = self.get_disk_usage(self.buckets[0],
                                                     self.cluster.nodes_in_cluster)
                    _res = disk_usage[0]
                    self.log.info("Update Iteration-{}, Disk Usage at time {} is {}MB \
                    ".format(count+1, time_end - time.time(), _res))
                    if _res < 2.5 * self.disk_usage[self.disk_usage.keys()[0]]:
                        break

                self.assertIs(
                    _res > 2.5 * self.disk_usage[self.disk_usage.keys()[0]],
                    False, msg.format("update", count+1, _res,
                                      self.disk_usage[self.disk_usage.keys()[0]]))

                count += 1
            self.update_itr += self.update_itr

            if i+1 == self.test_itr:
                self.validate_data("update", self.gen_update)
            ###################################################################
            '''
            STEP - 3
              -- Delete half of the docs.
            '''

            self.log.debug("Step 3, Iteration {}".format(i+1))
            self.doc_ops = "delete"

            self.delete_start = 0
            self.delete_end = self.num_items//2
            if self.rev_del:
                self.delete_start = -int(self.num_items//2 - 1)
                self.delete_end = 1

            self.generate_docs(doc_ops="delete")
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  _sync=True)

            self.bucket_util._wait_for_stats_all_buckets(timeout=3600)
            self.bucket_util.verify_stats_all_buckets(self.num_items)

            ###################################################################
            '''
            STEP - 4
              -- Space Amplification Check after deletion.
            '''
            self.log.debug("Step 4, Iteration {}".format(i+1))
            _result = self.check_fragmentation_using_magma_stats(
                self.buckets[0],
                self.cluster.nodes_in_cluster)
            self.assertIs(_result, True,
                          msg_stats.format("magma"))

            _r = self.check_fragmentation_using_bucket_stats(
                 self.buckets[0], self.cluster.nodes_in_cluster)
            self.assertIs(_r, True,
                          msg_stats.format("KV"))

            disk_usage = self.get_disk_usage(self.buckets[0],
                                             self.cluster.nodes_in_cluster)
            _res = disk_usage[0]
            self.log.info("Delete Iteration {}, Disk Usage- {}MB\
            ".format(i+1, _res))
            self.assertIs(
                _res > 2.5 * self.disk_usage[
                    self.disk_usage.keys()[0]],
                False, msg.format(
                    "delete", i+1, _res,
                    self.disk_usage[self.disk_usage.keys()[0]]))

            ###################################################################
            '''
            STEP - 5
              -- ReCreation of docs.
            '''
            self.log.debug("Step 5, Iteration= {}".format(i+1))

            self.gen_create = copy.deepcopy(self.gen_delete)
            self.doc_ops = "create"

            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  _sync=True)

            self.bucket_util._wait_for_stats_all_buckets(timeout=3600)
            self.bucket_util.verify_stats_all_buckets(self.num_items)

            ###################################################################
            '''
            STEP - 6
              -- Space Amplification Check after Recreation.
            '''
            self.log.debug("Step 6, Iteration= {}".format(i+1))

            _result = self.check_fragmentation_using_magma_stats(
                self.buckets[0],
                self.cluster.nodes_in_cluster)
            self.assertIs(_result, True,
                          msg_stats.format("magma"))

            _r = self.check_fragmentation_using_bucket_stats(
                self.buckets[0], self.cluster.nodes_in_cluster)
            self.assertIs(_r, True,
                          msg_stats.format("KV"))

            disk_usage = self.get_disk_usage(self.buckets[0],
                                             self.cluster.nodes_in_cluster)
            _res = disk_usage[0]
            self.log.info("Create Iteration{}, Disk Usage= {}MB \
            ".format(i+1, _res))
            self.assertIs(_res > 2.5 * self.disk_usage[
                self.disk_usage.keys()[0]],
                False, msg.format("Create", _res, i+1,
                                  self.disk_usage[self.disk_usage.keys()[0]]))

        ###################################################################
        '''
        STEP - 7
          -- Validate data
           -- Data validation is only for the creates in last iterations.
        '''
        self.log.debug("Step 7, Iteration= {}".format(i+1))
        self.validate_data("create", self.gen_create)
        self.log.info("====test_multiUpdate_delete ends====")

    def test_update_rev_update(self):
        """
        STEPS:
          -- Update num_items // 2 items.
          -- Reverse update remaining num_items // 2 items.
          -- If next.half is false skip above step
          -- And reverse update items in first point
          -- Check space amplification
          -- Repeat above steps x times
          -- Delete all the items
          -- Check space Amplification
          -- Recreate deleted items
          -- Check Space Amplification
          -- Repeat above steps for n times
          -- After all iterations validate the data
        """
        self.log.info("==== test_update_rev_update starts =====")

        msg_stats = "Fragmentation value for {} stats exceeds\
        the configured value"
        msg = "{} Iteration= {}, Disk Usage = {}MB\
        exceeds {} times from Actual disk usage = {}MB"

        count = 0
        mutated = 1
        for i in range(self.test_itr):
            self.log.debug("Step 1, Iteration= {}".format(i+1))
            #######################################################################
            '''
            STEP - 1, Update Items, update_itr times
              -- Update n // 2 items
              -- If self.next_half is true
              -- Update remaining n//2 items
              -- Else, again update items in
                reverse order in first point
            '''
            while count < self.update_itr:
                tasks_info = dict()
                self.doc_ops = "update"
                self.gen_update = self.genrate_docs_basic(0, self.num_items //2,
                                                          mutate=mutated)
                tem_tasks_info = self.loadgen_docs(self.retry_exceptions,
                                                   self.ignore_exceptions,
                                                   _sync=False)
                tasks_info.update(tem_tasks_info.items())
                if self.next_half:
                    start = - (self.num_items - 1)
                    end = - (self.num_items // 2 - 1)
                    self.gen_update = self.genrate_docs_basic(start, end,
                                                              mutate=mutated)
                    tem_tasks_info = self.loadgen_docs(self.retry_exceptions,
                                                       self.ignore_exceptions,
                                                       _sync=False)
                    tasks_info.update(tem_tasks_info.items())

                for task in tasks_info:
                    self.task_manager.get_task_result(task)
                self.bucket_util.verify_doc_op_task_exceptions(
                    tasks_info, self.cluster)
                self.bucket_util.log_doc_ops_task_failures(tasks_info)
                mutated += 1

                if not self.next_half:
                    start = - (self.num_items - 1)
                    end = - (self.num_items // 2 - 1)
                    self.gen_update = self.genrate_docs_basic(start, end,
                                                              mutate=mutated)
                    _ = self.loadgen_docs(self.retry_exceptions,
                                                       self.ignore_exceptions,
                                                       _sync=True)
                    mutated += 1

                self.log.info("Waiting for ep-queues to get drained")
                self.bucket_util._wait_for_stats_all_buckets(timeout=3600)
                ###################################################################
                '''
                STEP - 2
                  -- Space Amplification check after each update iteration.
                '''
                self.log.debug("Step 2, Iteration= {}".format(i+1))
                _result = self.check_fragmentation_using_magma_stats(
                    self.buckets[0],
                    self.cluster.nodes_in_cluster)
                self.assertIs(_result, True,
                              msg_stats.format("magma"))

                _r = self.check_fragmentation_using_bucket_stats(
                    self.buckets[0], self.cluster.nodes_in_cluster)
                self.assertIs(_r, True,
                              msg_stats.format("KV"))

                disk_usage = self.get_disk_usage(
                    self.buckets[0], self.cluster.nodes_in_cluster)
                _res = disk_usage[0]
                self.log.info("Update Iteration- {}, Disk Usage- {}MB\
                ".format(count+1, _res))
                self.assertIs(
                    _res > 2.5 * self.disk_usage[self.disk_usage.keys()[0]],
                    False, msg.format("update", count+1, _res, 2.5,
                                      self.disk_usage[self.disk_usage.keys()[0]]))

                count += 1
            self.update_itr += self.update_itr

            ###################################################################
            '''
            STEP - 3
              -- Delete all the items.
            '''
            self.log.debug("Step 3, Iteration {}".format(i+1))
            self.doc_ops = "delete"
            self.delete_start = 0
            self.delete_end = self.num_items
            if self.rev_del:
                self.delete_start = -int(self.num_items - 1)
                self.delete_end = 1

            self.generate_docs(doc_ops="delete")
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  _sync=True)

            self.bucket_util._wait_for_stats_all_buckets(timeout=3600)
            self.bucket_util.verify_stats_all_buckets(self.num_items)

            ###################################################################
            '''
            STEP - 4
              -- Space Amplification Check after deletion.
            '''
            self.log.debug("Step 4, Iteration {}".format(i+1))
            _result = self.check_fragmentation_using_magma_stats(
                self.buckets[0],
                self.cluster.nodes_in_cluster)
            self.assertIs(_result, True,
                          msg_stats.format("magma"))

            _r = self.check_fragmentation_using_bucket_stats(
                 self.buckets[0], self.cluster.nodes_in_cluster)
            self.assertIs(_r, True,
                          msg_stats.format("KV"))

            disk_usage = self.get_disk_usage(self.buckets[0],
                                             self.cluster.nodes_in_cluster)
            _res = disk_usage[0]
            self.log.info("Delete Iteration {}, Disk Usage- {}MB\
            ".format(i+1, _res))
            self.assertIs(
                _res > 0.5 * self.disk_usage[
                    self.disk_usage.keys()[0]],
                False, msg.format(
                    "delete", i+1, _res, 0.5,
                    self.disk_usage[self.disk_usage.keys()[0]]))
            ###################################################################
            '''
            STEP - 5
              -- ReCreation of docs.
            '''
            self.log.debug("Step 5, Iteration= {}".format(i+1))
            self.gen_create = copy.deepcopy(self.gen_delete)
            self.doc_ops = "create"
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  _sync=True)

            self.bucket_util._wait_for_stats_all_buckets(timeout=3600)
            self.bucket_util.verify_stats_all_buckets(self.num_items)

            ###################################################################
            '''
            STEP - 6
              -- Space Amplification Check after Recreation.
            '''
            self.log.debug("Step 6, Iteration= {}".format(i+1))
            _result = self.check_fragmentation_using_magma_stats(
                self.buckets[0],
                self.cluster.nodes_in_cluster)
            self.assertIs(_result, True,
                          msg_stats.format("magma"))

            _r = self.check_fragmentation_using_bucket_stats(
                self.buckets[0], self.cluster.nodes_in_cluster)
            self.assertIs(_r, True,
                          msg_stats.format("KV"))

            disk_usage = self.get_disk_usage(self.buckets[0],
                                             self.cluster.nodes_in_cluster)
            _res = disk_usage[0]
            self.log.info("Create Iteration{}, Disk Usage= {}MB \
            ".format(i+1, _res))
            self.assertIs(_res > 1.5 * self.disk_usage[
                self.disk_usage.keys()[0]],
                False, msg.format("Create", _res, i+1, 1.5,
                                  self.disk_usage[self.disk_usage.keys()[0]]))

        ###################################################################
        '''
        STEP - 7
          -- Validate data
           -- Data validation is only for the creates in last iterations.
        '''
        self.log.debug("Step 7, Iteration= {}".format(i+1))
        self.validate_data("create", self.gen_create)
        self.log.info("====test_update_rev_update ends====")

    def test_update_single_doc_n_times(self):
        """
        Test Focus: Update single/same doc n times

          Note: MultiThreading is used to update
               single doc, since we are not worried
               about what should be the final mutate
               value of the document semaphores have
               been avoided. MultiThreading also speed up
               the execution of test
        """
        self.log.info("test_update_single_doc_n_times starts")
        self.doc_ops = "update"

        self.client = SDKClient([self.cluster.master],
                                self.bucket_util.buckets[0],
                                scope=CbServer.default_scope,
                                collection=CbServer.default_collection)

        self.gen_update = self.genrate_docs_basic(start=0, end=1)

        key, val = self.gen_update.next()
        for node in self.cluster.nodes_in_cluster:
            shell = RemoteMachineShellConnection(node)
            shell.restart_couchbase()
            shell.disconnect()
            self.assertTrue(
                self.bucket_util._wait_warmup_completed(
                    [self.cluster_util.cluster.master],
                    self.bucket_util.buckets[0],
                    wait_time=self.wait_timeout * 10))

        def upsert_doc(start_num, end_num, key_obj, val_obj):
            for i in range(start_num, end_num):
                val_obj.put("mutated", i)
                self.client.upsert(key_obj, val_obj)

        threads = []
        start = 0
        end = 0
        for _ in range(10):
            start = end
            end += 100000
            th = threading.Thread(
                target=upsert_doc, args=[start, end, key, val])
            th.start()
            threads.append(th)

        for th in threads:
            th.join()

        self.bucket_util._wait_for_stats_all_buckets(timeout=3600)

        # Space amplification check
        msg_stats = "Fragmentation value for {} stats exceeds\
        the configured value"
        _result = self.check_fragmentation_using_magma_stats(
            self.buckets[0],
            self.cluster.nodes_in_cluster)
        self.assertIs(_result, True,
                      msg_stats.format("magma"))

        _r = self.check_fragmentation_using_bucket_stats(
            self.buckets[0], self.cluster.nodes_in_cluster)
        self.assertIs(_r, True,
                      msg_stats.format("KV"))

        disk_usage = self.get_disk_usage(
            self.buckets[0],
            self.cluster.nodes_in_cluster)
        self.log.debug("Disk usage after updates {}".format(
            disk_usage))
        _res = disk_usage[0]
        msg = "Disk Usage = {}MB exceeds 2.2 times \
        from Actual disk usage = {}MB"
        self.assertIs(
            _res > 2.2 * self.disk_usage[
                self.disk_usage.keys()[0]],
            False,
            msg.format(_res, self.disk_usage[self.disk_usage.keys()[0]]))
        # Space amplification check ends

        success, fail = self.client.get_multi([key],
                                              self.wait_timeout)

        self.assertIs(key in success, True,
                      msg="key {} doesn't exist\
                      ".format(key))
        actual_val = dict()
        expected_val = Json.loads(val.toString())
        actual_val = Json.loads(success[key][
            'value'].toString())
        self.log.debug("Expected_val= {} and actual_val = {}\
        ".format(expected_val, actual_val))
        self.assertIs(expected_val == actual_val, True,
                      msg="expected_val-{} != Actual_val-{}\
                      ".format(expected_val, actual_val))

        self.change_swap_space(self.cluster.nodes_in_cluster,
                               disable=False)
        self.log.info("====test_update_single_doc_n_times ends====")

    def test_move_val_btwn_key_and_seq_trees(self):
        """
        Test Focus: Update items such that values moves
                    Sequence Tree and Key Trees.
        STEPS:
          -- Update items with new size , so that
             items move from sequence tree to key
             tree or vice versa
          -- Do data validation
          -- Again update items with initial size
          -- Check space amplification
          -- Again validate documents
        """
        self.log.info("test_move_val_btwn_key_and_seq_trees starts")
        msg_stats = "Fragmentation value for {} stats exceeds\
        the configured value"
        count = 0
        keyTree, seqTree = (self.get_disk_usage(
                        self.buckets[0],
                        self.cluster.nodes_in_cluster)[2:4])
        self.log.debug("Disk usage after pure creates {}".format((
            self.disk_usage, keyTree, seqTree)))
        initial_doc_size = self.doc_size
        upsert_size = 0
        if self.doc_size < 32:
            upsert_size = 2048

        while count < self.test_itr:
            self.log.info("Update Iteration count == {}".format(count))
            for node in self.cluster.nodes_in_cluster:
                shell = RemoteMachineShellConnection(node)
                shell.kill_memcached()
                shell.disconnect()
                self.assertTrue(self.bucket_util._wait_warmup_completed(
                                [self.cluster_util.cluster.master],
                                self.bucket_util.buckets[0],
                                wait_time=self.wait_timeout * 10))
            #######################################################################
            '''
            STEP - 1, Update items with changed/new size
            '''
            self.log.info("Step 1, Iteration= {}".format(count+1))
            self.doc_ops = "update"
            self.update_start = 0
            self.update_end = self.num_items
            if self.rev_update:
                self.update_start = -int(self.num_items - 1)
                self.update_end = 1
            self.doc_size = upsert_size
            self.generate_docs()
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  _sync=True)
            self.bucket_util._wait_for_stats_all_buckets(timeout=3600)

            if upsert_size > 32:
                seqTree_update = (self.get_disk_usage(
                        self.buckets[0],
                        self.cluster.nodes_in_cluster)[-1])
                self.log.info("For upsert_size > 32 seqIndex usage-{}\
                ".format(seqTree_update))

            #######################################################################
            '''
            STEP - 2, Validate data after initial upsert
            '''
            self.log.info("Step 2, Iteration= {}".format(count+1))
            self.validate_data("update", self.gen_update)

            #######################################################################
            '''
            STEP - 3, Updating items with changed doc size
                     to move between tress
            '''
            self.log.info("Step 3, Iteration= {}".format(count+1))
            self.doc_size = initial_doc_size
            self.generate_docs()
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  _sync=True)
            self.bucket_util._wait_for_stats_all_buckets(timeout=3600)

            #######################################################################
            '''
            STEP - 4, Space Amplification Checks
            '''
            _result = self.check_fragmentation_using_magma_stats(
                self.buckets[0],
                self.cluster.nodes_in_cluster)
            self.assertIs(_result, True,
                          msg_stats.format("magma"))

            _r = self.check_fragmentation_using_bucket_stats(
                 self.buckets[0], self.cluster.nodes_in_cluster)
            self.assertIs(_r, True,
                          msg_stats.format("KV"))

            disk_usage = self.get_disk_usage(
                self.buckets[0], self.cluster.nodes_in_cluster)
            _res = disk_usage[0]
            self.log.info("disk usage after upsert count {} is {}MB \
                ".format(count+1, _res))
            if self.doc_size > 32:
                self.assertIs(
                    _res > 1.5 * self.disk_usage[self.disk_usage.keys()[0]],
                    False, "Disk Usage {} After \
                    update count {} exceeds \
                    Actual disk usage {} by 1.5 \
                    times".format(_res, count+1,
                                  self.disk_usage[self.disk_usage.keys()[0]]))
            else:
                self.assertIs(disk_usage[3] > 0.5 * seqTree_update,
                              False, " Current seqTree usage-{} exceeds by'\n'\
                               0.5 times from the earlier '\n' \
                               seqTree usage (after update) -{} \
                              ".format(disk_usage[3], seqTree_update))

            count += 1

            #######################################################################
            '''
            STEP - 5, Data validation
            '''
            self.log.info("Step 5, Iteration= {}".format(count+1))
            self.validate_data("update", self.gen_update)

            #######################################################################
        self.change_swap_space(self.cluster.nodes_in_cluster, disable=False)
        self.log.info("====test_move_docs_btwn_key_and_seq_trees ends====")

    def test_parallel_create_update(self):
        """
        STEPS:
          -- Create new items and update already
             existing items
          -- Check disk_usage after each Iteration
          -- Data validation for last iteration
        """
        self.log.info("test_parallel_create_update starts")
        count = 0
        init_items = self.num_items
        self.doc_ops = "create:update"
        self.update_start = 0
        self.update_end = self.num_items
        while count < self.test_itr:
            self.log.info("Iteration {}".format(count+1))
            self.create_start = self.num_items
            self.create_end = self.num_items+init_items

            if self.rev_write:
                self.create_start = -int(self.num_items+init_items - 1)
                self.create_end = -int(self.num_items - 1)

            self.generate_docs()
            _ = self.loadgen_docs(self.retry_exceptions,
                                  self.ignore_exceptions,
                                  _sync=True)
            self.bucket_util._wait_for_stats_all_buckets(timeout=3600)
            self.bucket_util.verify_stats_all_buckets(self.num_items)
            if count == self.test_itr - 1:
                self.validate_data("update", self.gen_update)
            self.update_start = self.num_items
            self.update_end = self.num_items+init_items
            if self.rev_update:
                self.update_start = -int(self.num_items+init_items - 1)
                self.update_end = -int(self.num_items - 1)

            disk_usage = self.get_disk_usage(
                self.buckets[0],
                self.cluster.nodes_in_cluster)
            if self.doc_size <= 32:
                self.assertIs(
                    disk_usage[2] >= disk_usage[3], True,
                    "seqIndex usage = {}MB'\n' \
                    after Iteration {}'\n' \
                    exceeds keyIndex usage={}MB'\n' \
                    ".format(disk_usage[3],
                             count+1,
                             disk_usage[2]))
            self.assertIs(
                disk_usage[0] > 2.2 * (2 * self.disk_usage[
                    self.disk_usage.keys()[0]]),
                False, "Disk Usage {}MB After '\n\'\
                Updates exceeds '\n\'\
                Actual disk usage {}MB by '\n'\
                2.2 times".format(disk_usage[0],
                                  (2 * self.disk_usage[
                                      self.disk_usage.keys()[0]])))
            count += 1
        self.change_swap_space(self.cluster.nodes_in_cluster, disable=False)
        self.log.info("====test_parallel_create_update ends====")