Пример #1
0
    def common_setup(input, testcase, bucket_ram_ratio=(2.8 / 3.0), replica=0):
        log = logger.Logger.get_logger()
        servers = input.servers
        BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
        ClusterOperationHelper.cleanup_cluster(servers)
        ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase)
        serverInfo = servers[0]

        log.info('picking server : {0} as the master'.format(serverInfo))
        #if all nodes are on the same machine let's have the bucket_ram_ratio as bucket_ram_ratio * 1/len(servers)
        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(servers)
        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
        if "ascii" in TestInputSingleton.input.test_params\
        and TestInputSingleton.input.test_params["ascii"].lower() == "true":
            BucketOperationHelper.create_multiple_buckets(serverInfo, replica, node_ram_ratio * bucket_ram_ratio,
                                                          howmany=1, sasl=False)
        else:
            BucketOperationHelper.create_multiple_buckets(serverInfo, replica, node_ram_ratio * bucket_ram_ratio,
                                                          howmany=1, sasl=True)
        buckets = rest.get_buckets()
        for bucket in buckets:
            ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket.name)
            testcase.assertTrue(ready, "wait_for_memcached failed")
Пример #2
0
 def setUp(self):
     self.log = logger.Logger.get_logger()
     self.input = TestInputSingleton.input
     self.assertTrue(self.input, msg="input parameters missing...")
     self.servers = self.input.servers
     self.master = self.servers[0]
     rest = RestConnection(self.master)
     rest.init_cluster(username=self.master.rest_username,
                       password=self.master.rest_password)
     info = rest.get_nodes_self()
     node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
     rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
     BucketOperationHelper.delete_all_buckets_or_assert(servers=self.servers, test_case=self)
     ClusterOperationHelper.cleanup_cluster(servers=self.servers)
     credentials = self.input.membase_settings
     ClusterOperationHelper.add_all_nodes_or_assert(master=self.master, all_servers=self.servers, rest_settings=credentials, test_case=self)
     rest = RestConnection(self.master)
     nodes = rest.node_statuses()
     otpNodeIds = []
     for node in nodes:
         otpNodeIds.append(node.id)
     rebalanceStarted = rest.rebalance(otpNodeIds, [])
     self.assertTrue(rebalanceStarted,
                     "unable to start rebalance on master node {0}".format(self.master.ip))
     self.log.info('started rebalance operation on master node {0}'.format(self.master.ip))
     rebalanceSucceeded = rest.monitorRebalance()
 def setUp(self):
     super(AutoFailoverAbortsRebalance, self).setUp()
     self.master = self.servers[0]
     self._get_params()
     self.rest = RestConnection(self.orchestrator)
     node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
     self.num_buckets = self.num_buckets - 1  # this is done as default is created by base class
     if self.num_buckets:
         BucketOperationHelper.create_multiple_buckets(self.master, self.num_replicas, node_ram_ratio * (2.0 / 3.0),
                                                       howmany=self.num_buckets, bucket_storage=self.bucket_storage)
     self.buckets = self.rest.get_buckets()
     for bucket in self.buckets:
         ready = BucketOperationHelper.wait_for_memcached(self.master, bucket.name)
         self.assertTrue(ready, "wait_for_memcached failed")
     self.initial_load_gen = BlobGenerator('auto-failover',
                                           'auto-failover-',
                                           self.value_size,
                                           end=self.num_items)
     self.update_load_gen = BlobGenerator('auto-failover',
                                          'auto-failover-',
                                          self.value_size,
                                          end=self.update_items)
     self.delete_load_gen = BlobGenerator('auto-failover',
                                          'auto-failover-',
                                          self.value_size,
                                          start=self.update_items,
                                          end=self.delete_items)
     self._load_all_buckets(self.servers[0], self.initial_load_gen,
                            "create", 0)
     self._async_load_all_buckets(self.orchestrator,
                                  self.update_load_gen, "update", 0)
     self._async_load_all_buckets(self.orchestrator,
                                  self.delete_load_gen, "delete", 0)
Пример #4
0
    def common_setup(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)

        # Clear the state from Previous invalid run
        rest.stop_rebalance()
        self.load_started = False
        self.loaders = []
        SwapRebalanceBase.common_tearDown(self)

        # Initialize test params
        self.replica  = self.input.param("replica", 1)
        self.keys_count = self.input.param("keys-count", 100000)
        self.load_ratio = self.input.param("load-ratio", 1)
        self.num_buckets = self.input.param("num-buckets", 1)
        self.failover_factor = self.num_swap = self.input.param("num-swap", 1)
        self.num_initial_servers = self.input.param("num-initial-servers", 3)
        self.fail_orchestrator = self.swap_orchestrator = self.input.param("swap-orchestrator", False)

        # Make sure the test is setup correctly
        min_servers = int(self.num_initial_servers) + int(self.num_swap)
        msg = "minimum {0} nodes required for running swap rebalance"
        self.assertTrue(len(self.servers) >= min_servers,
            msg=msg.format(min_servers))

        self.log.info('picking server : {0} as the master'.format(serverInfo))
        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
        info = rest.get_nodes_self()
        rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
Пример #5
0
    def common_setup(self):
        self.cluster_helper = Cluster()
        self.log = logger.Logger.get_logger()
        self.cluster_run = False
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)
        if len(set([server.ip for server in self.servers])) == 1:
            ip = rest.get_nodes_self().ip
            for server in self.servers:
                server.ip = ip
            self.cluster_run = True
        self.case_number = self.input.param("case_number", 0)
        self.replica = self.input.param("replica", 1)
        self.keys_count = self.input.param("keys-count", 1000)
        self.load_ratio = self.input.param("load-ratio", 1)
        self.ratio_expiry = self.input.param("ratio-expiry", 0.03)
        self.ratio_deletes = self.input.param("ratio-deletes", 0.13)
        self.num_buckets = self.input.param("num-buckets", 1)
        self.failover_factor = self.num_swap = self.input.param("num-swap", 1)
        self.num_initial_servers = self.input.param("num-initial-servers", 3)
        self.fail_orchestrator = self.swap_orchestrator = self.input.param("swap-orchestrator", False)
        self.do_access = self.input.param("do-access", True)
        self.load_started = False
        self.loaders = []
        try:
            # Clear the state from Previous invalid run
            if rest._rebalance_progress_status() == "running":
                self.log.warning("rebalancing is still running, previous test should be verified")
                stopped = rest.stop_rebalance()
                self.assertTrue(stopped, msg="unable to stop rebalance")
            self.log.info(
                "==============  SwapRebalanceBase setup was started for test #{0} {1}==============".format(
                    self.case_number, self._testMethodName
                )
            )
            SwapRebalanceBase.reset(self)

            # Make sure the test is setup correctly
            min_servers = int(self.num_initial_servers) + int(self.num_swap)
            msg = "minimum {0} nodes required for running swap rebalance"
            self.assertTrue(len(self.servers) >= min_servers, msg=msg.format(min_servers))

            self.log.info("picking server : {0} as the master".format(serverInfo))
            node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
            info = rest.get_nodes_self()
            rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password)
            rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
            if self.num_buckets > 10:
                BaseTestCase.change_max_buckets(self, self.num_buckets)
            self.log.info(
                "==============  SwapRebalanceBase setup was finished for test #{0} {1} ==============".format(
                    self.case_number, self._testMethodName
                )
            )
            SwapRebalanceBase._log_start(self)
        except Exception, e:
            self.cluster_helper.shutdown()
            self.fail(e)
Пример #6
0
    def common_setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.created_views = {}
        self.servers = self.input.servers
        self.replica = self.input.param("replica", 1)
        self.failover_factor = self.input.param("failover-factor", 1)
        self.num_docs = self.input.param("num-docs", 10000)
        self.num_design_docs = self.input.param("num-design-docs", 20)
        self.expiry_ratio = self.input.param("expiry-ratio", 0.1)
        self.num_buckets = self.input.param("num-buckets", 1)
        self.case_number = self.input.param("case_number", 0)
        self.dgm_run = self.input.param("dgm_run", False)

        #avoid clean up if the previous test has been tear down
        if not self.input.param("skip_cleanup", True) or self.case_number == 1:
            ViewBaseTests._common_clenup(self)
        master = self.servers[0]
        rest = RestConnection(master)
        rest.set_reb_cons_view(disable=False)
        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
        mem_quota = int(rest.get_nodes_self().mcdMemoryReserved * node_ram_ratio)
        if self.dgm_run:
            mem_quota = 256
        rest.init_cluster(master.rest_username, master.rest_password)
        rest.init_cluster_memoryQuota(master.rest_username, master.rest_password, memoryQuota=mem_quota)
        if self.num_buckets == 1:
            ViewBaseTests._create_default_bucket(self, replica=self.replica)
        else:
            ViewBaseTests._create_multiple_buckets(self, replica=self.replica)
        ViewBaseTests._log_start(self)
        db_compaction = self.input.param("db_compaction", 30)
        view_compaction = self.input.param("view_compaction", 30)
        rest.set_auto_compaction(dbFragmentThresholdPercentage=db_compaction,
                                 viewFragmntThresholdPercentage=view_compaction)
Пример #7
0
 def create_buckets(servers, testcase, howmany=1, replica=1, bucket_ram_ratio=(2.0 / 3.0)):
     node_ram_ratio = BucketOperationHelper.base_bucket_ratio(servers)
     master = servers[0]
     BucketOperationHelper.create_multiple_buckets(master, replica, node_ram_ratio * bucket_ram_ratio, howmany=howmany)
     rest = RestConnection(master)
     buckets = rest.get_buckets()
     for bucket in buckets:
         ready = BucketOperationHelper.wait_for_memcached(master, bucket.name)
         testcase.assertTrue(ready, "wait_for_memcached failed")
Пример #8
0
 def cluster_initialization(servers):
     log = logger.Logger().get_logger()
     master = servers[0]
     log.info('picking server : {0} as the master'.format(master))
     #if all nodes are on the same machine let's have the bucket_ram_ratio as bucket_ram_ratio * 1/len(servers)
     node_ram_ratio = BucketOperationHelper.base_bucket_ratio(servers)
     rest = RestConnection(master)
     info = rest.get_nodes_self()
     rest.init_cluster(username=master.rest_username, password=master.rest_password)
     rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
Пример #9
0
 def setUp(self):
     self.log = logger.Logger.get_logger()
     self.input = TestInputSingleton.input
     self.servers = self.input.servers
     serverInfo = self.servers[0]
     rest = RestConnection(serverInfo)
     info = rest.get_nodes_self()
     node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
     rest.init_cluster(username=serverInfo.rest_username,
                       password=serverInfo.rest_password)
     rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
Пример #10
0
    def common_setup(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        master = self.servers[0]
        rest = RestConnection(master)

        # Cleanup previous state
        self.task_manager = None
        rest.stop_rebalance()
        RebalanceBaseTest.reset(self)

        # Initialize test params
        self.replica = self.input.param("replica", 1)

        # By default we use keys-count for LoadTask
        # Use keys-count=-1 to use load-ratio
        self.keys_count = self.input.param("keys-count", 30000)
        self.load_ratio = self.input.param("load-ratio", 6)
        self.expiry_ratio = self.input.param("expiry-ratio", 0.1)
        self.delete_ratio = self.input.param("delete-ratio", 0.1)
        self.access_ratio = self.input.param("access-ratio", 0.8)
        self.num_buckets = self.input.param("num-buckets", 1)
        self.num_rebalance = self.input.param("num-rebalance", 1)
        self.do_ascii = self.input.param("ascii", False)
        self.do_verify = self.input.param("do-verify", True)
        self.repeat = self.input.param("repeat", 1)
        self.max_ops_per_second = self.input.param("max_ops_per_second", 500)
        self.min_item_size = self.input.param("min_item_size", 128)
        self.do_stop = self.input.param("do-stop", False)
        self.skip_cleanup = self.input.param("skip-cleanup", False)

        self.checkResidentRatio = self.input.param("checkResidentRatio", False)
        self.activeRatio = self.input.param("activeRatio", 50)
        self.replicaRatio = self.input.param("replicaRatio", 50)
        self.case_number = self.input.param("case_number", 0)

        self.log.info('picking server : {0} as the master'.format(master))

        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
        info = rest.get_nodes_self()
        rest.init_cluster(username=master.rest_username,
            password=master.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
        BucketOperationHelper.create_multiple_buckets(master, self.replica, node_ram_ratio * (2.0 / 3.0),
                howmany=self.num_buckets, sasl=not self.do_ascii)
        buckets = rest.get_buckets()
        for bucket in buckets:
            ready = BucketOperationHelper.wait_for_memcached(master, bucket.name)
            self.assertTrue(ready, "wait_for_memcached failed")

        # Initialize and start the taskManager
        self.task_manager = taskmanager.TaskManager()
        self.task_manager.start()
Пример #11
0
 def setUp(self):
     self.log = logger.Logger.get_logger()
     self.input = TestInputSingleton.input
     self.servers = self.input.servers
     serverInfo = self.servers[0]
     rest = RestConnection(serverInfo)
     info = rest.get_nodes_self()
     node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
     rest.init_cluster(username=serverInfo.rest_username,
                       password=serverInfo.rest_password)
     rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
Пример #12
0
 def _create_default_bucket(self):
     name = "default"
     master = self.master
     rest = RestConnection(master)
     node_ram_ratio = BucketOperationHelper.base_bucket_ratio(TestInputSingleton.input.servers)
     info = rest.get_nodes_self()
     available_ram = info.memoryQuota * node_ram_ratio
     rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram))
     ready = BucketOperationHelper.wait_for_memcached(master, name)
     self.assertTrue(ready, msg="wait_for_memcached failed")
     self.load_thread = None
     self.shutdown_load_data = False
Пример #13
0
 def _create_default_bucket(self):
     helper = RestHelper(self.rest)
     if not helper.bucket_exists(self.bucket):
         node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
         info = self.rest.get_nodes_self()
         available_ram = int(info.memoryQuota * node_ram_ratio)
         if available_ram < 256:
             available_ram = 256
         self.rest.create_bucket(bucket=self.bucket, ramQuotaMB=available_ram)
         ready = BucketOperationHelper.wait_for_memcached(self.master, self.bucket)
         self.testcase.assertTrue(ready, "wait_for_memcached failed")
     self.testcase.assertTrue(helper.bucket_exists(self.bucket), "unable to create {0} bucket".format(self.bucket))
Пример #14
0
    def _modify_bucket(self):
        helper = RestHelper(self.rest)
        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(
            self.servers)
        info = self.rest.get_nodes_self()

        status, content = self.rest.change_bucket_props(bucket=self.bucket,
            ramQuotaMB=512, authType='sasl', timeSynchronization='enabledWithOutDrift')
        if re.search('TimeSyncronization not allowed in update bucket', content):
            self.log.info('[PASS]Expected modify bucket to disallow Time Synchronization.')
        else:
            self.fail('[ERROR] Not expected to allow modify bucket for Time Synchronization')
Пример #15
0
 def _create_default_bucket(self, replica=1):
     name = "default"
     master = self.servers[0]
     rest = RestConnection(master)
     helper = RestHelper(RestConnection(master))
     if not helper.bucket_exists(name):
         node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
         info = rest.get_nodes_self()
         available_ram = info.memoryQuota * node_ram_ratio
         rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram), replicaNumber=replica)
         ready = BucketOperationHelper.wait_for_memcached(master, name)
         self.assertTrue(ready, msg="wait_for_memcached failed")
     self.assertTrue(helper.bucket_exists(name), msg="unable to create {0} bucket".format(name))
Пример #16
0
    def common_setup(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)
        self.case_number = self.input.param("case_number", 0)

        # Clear the state from Previous invalid run
        if rest._rebalance_progress_status() == 'running':
            self.log.warning(
                "rebalancing is still running, previous test should be verified"
            )
            stopped = rest.stop_rebalance()
            self.assertTrue(stopped, msg="unable to stop rebalance")
        self.load_started = False
        self.loaders = []
        self.log.info("==============  SwapRebalanceBase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
        SwapRebalanceBase.reset(self)
        self.cluster_helper = Cluster()
        # Initialize test params
        self.replica = self.input.param("replica", 1)
        self.keys_count = self.input.param("keys-count", 100000)
        self.load_ratio = self.input.param("load-ratio", 1)
        self.ratio_expiry = self.input.param("ratio-expiry", 0.03)
        self.ratio_deletes = self.input.param("ratio-deletes", 0.13)
        self.num_buckets = self.input.param("num-buckets", 1)
        self.failover_factor = self.num_swap = self.input.param("num-swap", 1)
        self.num_initial_servers = self.input.param("num-initial-servers", 3)
        self.fail_orchestrator = self.swap_orchestrator = self.input.param(
            "swap-orchestrator", False)
        self.do_access = self.input.param("do-access", True)

        # Make sure the test is setup correctly
        min_servers = int(self.num_initial_servers) + int(self.num_swap)
        msg = "minimum {0} nodes required for running swap rebalance"
        self.assertTrue(len(self.servers) >= min_servers,
                        msg=msg.format(min_servers))

        self.log.info('picking server : {0} as the master'.format(serverInfo))
        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
        info = rest.get_nodes_self()
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved *
                                                      node_ram_ratio))
        self.log.info(
            "==============  SwapRebalanceBase setup was finished for test #{0} {1} =============="
            .format(self.case_number, self._testMethodName))
        SwapRebalanceBase._log_start(self)
Пример #17
0
 def _create_default_bucket(self):
     name = "default"
     master = self.master
     rest = RestConnection(master)
     helper = RestHelper(RestConnection(master))
     if not helper.bucket_exists(name):
         node_ram_ratio = BucketOperationHelper.base_bucket_ratio(TestInputSingleton.input.servers)
         info = rest.get_nodes_self()
         available_ram = info.memoryQuota * node_ram_ratio
         rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram))
         ready = BucketOperationHelper.wait_for_memcached(master, name)
         self.assertTrue(ready, msg="wait_for_memcached failed")
     self.assertTrue(helper.bucket_exists(name),
                     msg="unable to create {0} bucket".format(name))
Пример #18
0
    def setup_cluster(self, do_rebalance=True):
        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
        mem_quota = int(self.rest.get_nodes_self().mcdMemoryReserved * node_ram_ratio)
        self.rest.init_cluster(self.master.rest_username, self.master.rest_password)
        self.rest.init_cluster_memoryQuota(self.master.rest_username, self.master.rest_password, memoryQuota=mem_quota)
        for server in self.servers:
            ClusterOperationHelper.cleanup_cluster([server])
        ClusterOperationHelper.wait_for_ns_servers_or_assert([self.master], self.testcase)

        if do_rebalance:
            rebalanced = ClusterOperationHelper.add_and_rebalance(self.servers)
            self.testcase.assertTrue(rebalanced, "cluster is not rebalanced")

        self._create_default_bucket()
Пример #19
0
    def test_database_fragmentation(self):
        percent_threshold = self.autocompaction_value
        bucket_name = "default"
        MAX_RUN = 100
        item_size = 1024
        update_item_size = item_size * ((float(97 - percent_threshold)) / 100)
        serverInfo = self.servers[0]
        self.log.info(serverInfo)
        rest = RestConnection(serverInfo)
        remote_client = RemoteMachineShellConnection(serverInfo)

        output, rq_content, header = rest.set_auto_compaction("false", dbFragmentThresholdPercentage=percent_threshold, viewFragmntThresholdPercentage=100)

        if not output and (percent_threshold <= MIN_COMPACTION_THRESHOLD or percent_threshold >= MAX_COMPACTION_THRESHOLD):
            self.assertFalse(output, "it should be  impossible to set compaction value = {0}%".format(percent_threshold))
            import json
            self.assertTrue(json.loads(rq_content).has_key("errors"), "Error is not present in response")
            self.assertTrue(json.loads(rq_content)["errors"].find("Allowed range is 2 - 100") > -1, \
                            "Error 'Allowed range is 2 - 100' expected, but was '{0}'".format(json.loads(rq_content)["errors"]))

            self.log.info("Response contains error = '%(errors)s' as expected" % json.loads(rq_content))
        elif (output and percent_threshold >= MIN_COMPACTION_THRESHOLD
                     and percent_threshold <= MAX_RUN):
            node_ram_ratio = BucketOperationHelper.base_bucket_ratio(TestInputSingleton.input.servers)
            info = rest.get_nodes_self()

            available_ram = info.memoryQuota * (node_ram_ratio) / 2
            items = (int(available_ram * 1000) / 2) / item_size
            rest.create_bucket(bucket=bucket_name, ramQuotaMB=int(available_ram), authType='sasl',
                               saslPassword='******', replicaNumber=1, proxyPort=11211)
            BucketOperationHelper.wait_for_memcached(serverInfo, bucket_name)
            BucketOperationHelper.wait_for_vbuckets_ready_state(serverInfo, bucket_name)

            self.log.info("start to load {0}K keys with {1} bytes/key".format(items, item_size))
            self.insert_key(serverInfo, bucket_name, items, item_size)
            self.log.info("sleep 10 seconds before the next run")
            time.sleep(10)

            self.log.info("start to update {0}K keys with smaller value {1} bytes/key".format(items,
                                                                             int(update_item_size)))
            self.insert_key(serverInfo, bucket_name, items, int(update_item_size))

            compact_run = remote_client.wait_till_compaction_end(rest, bucket_name, timeout_in_seconds=180)
            if not compact_run:
                self.log.error("auto compaction does not run")
            elif compact_run:
                self.log.info("auto compaction runs successfully")
        else:
            self.log.error("Unknown error")
Пример #20
0
 def _create_default_bucket(self, replica=1):
     name = "default"
     master = self.servers[0]
     rest = RestConnection(master)
     helper = RestHelper(RestConnection(master))
     if not helper.bucket_exists(name):
         node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
         info = rest.get_nodes_self()
         available_ram = info.memoryQuota * node_ram_ratio
         rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram), replicaNumber=replica,
                            storageBackend=self.bucket_storage)
         ready = BucketOperationHelper.wait_for_memcached(master, name)
         self.assertTrue(ready, msg="wait_for_memcached failed")
     self.assertTrue(helper.bucket_exists(name),
         msg="unable to create {0} bucket".format(name))
Пример #21
0
 def _create_default_bucket(self, unittest):
     name = "default"
     master = self.master
     rest = RestConnection(master)
     helper = RestHelper(RestConnection(master))
     if not helper.bucket_exists(name):
         node_ram_ratio = BucketOperationHelper.base_bucket_ratio(TestInputSingleton.input.servers)
         info = rest.get_nodes_self()
         available_ram = info.memoryQuota * node_ram_ratio
         rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram))
         ready = BucketOperationHelper.wait_for_memcached(master, name)
         BucketOperationHelper.wait_for_vbuckets_ready_state(master, name)
         unittest.assertTrue(ready, msg="wait_for_memcached failed")
     unittest.assertTrue(helper.bucket_exists(name),
                         msg="unable to create {0} bucket".format(name))
Пример #22
0
 def _create_default_bucket(self):
     rest = RestConnection(self.master)
     helper = RestHelper(RestConnection(self.master))
     if not helper.bucket_exists(self.bucket):
         node_ram_ratio = BucketOperationHelper.base_bucket_ratio([self.master])
         info = rest.get_nodes_self()
         available_ram = info.memoryQuota * node_ram_ratio
         serverInfo = self.master
         rest.init_cluster(username=serverInfo.rest_username,
                           password=serverInfo.rest_password)
         rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
         rest.create_bucket(bucket=self.bucket, ramQuotaMB=int(available_ram))
         ready = BucketOperationHelper.wait_for_memcached(self.master, self.bucket)
         self.assertTrue(ready, msg="wait_for_memcached failed")
     self.assertTrue(helper.bucket_exists(self.bucket),
                     msg="unable to create {0} bucket".format(self.bucket))
Пример #23
0
 def _create_default_bucket(self):
     name = "default"
     master = self.servers[0]
     rest = RestConnection(master)
     helper = RestHelper(RestConnection(master))
     if not helper.bucket_exists(name):
         node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
         info = rest.get_nodes_self()
         available_ram = info.mcdMemoryReserved * node_ram_ratio
         rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram))
         ready = BucketOperationHelper.wait_for_memcached(master, name)
         self.assertTrue(ready, msg="wait_for_memcached failed")
     self.assertTrue(helper.bucket_exists(name),
                     msg="unable to create {0} bucket".format(name))
     self.load_thread = None
     self.shutdown_load_data = False
Пример #24
0
 def _create_default_bucket(self):
     name = "default"
     master = self.servers[0]
     rest = RestConnection(master)
     helper = RestHelper(RestConnection(master))
     if not helper.bucket_exists(name):
         node_ram_ratio = BucketOperationHelper.base_bucket_ratio(
             self.servers)
         info = rest.get_nodes_self()
         available_ram = info.mcdMemoryReserved * node_ram_ratio
         rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram))
         ready = BucketOperationHelper.wait_for_memcached(master, name)
         self.assertTrue(ready, msg="wait_for_memcached failed")
     self.assertTrue(helper.bucket_exists(name),
                     msg="unable to create {0} bucket".format(name))
     self.load_thread = None
     self.shutdown_load_data = False
Пример #25
0
 def _create_default_bucket(self, replica=1):
     name = "default"
     master = self.servers[0]
     rest = RestConnection(master)
     helper = RestHelper(RestConnection(master))
     if not helper.bucket_exists(name):
         node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
         info = rest.get_nodes_self()
         available_ram = info.memoryQuota * node_ram_ratio
         if(available_ram < 256):
             available_ram = 256
         rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram), replicaNumber=replica)
         msg = 'This not_my_vbucket error is part of wait_for_memcached method, not an issue'
         ready = BucketOperationHelper.wait_for_memcached(master, name, log_msg=msg)
         self.assertTrue(ready, msg="wait_for_memcached failed")
     self.assertTrue(helper.bucket_exists(name),
                     msg="unable to create {0} bucket".format(name))
Пример #26
0
    def _database_fragmentation(self, percent_threshold):
        bucket_name = "default"
        MAX_RUN = 99
        item_size = 1024
        update_item_size = item_size*((float(97 - percent_threshold))/100)
        serverInfo = self.servers[0]
        self.log.info(serverInfo)
        rest = RestConnection(serverInfo)
        remote_client = RemoteMachineShellConnection(serverInfo)

        rest.reset_auto_compaction()
        parallelDBAndView = "false"
        output = rest.set_autoCompaction(parallelDBAndView, percent_threshold, 100)
        if not output and percent_threshold < MIN_COMPACTION_THRESHOLD:
            self.log.error("Need to set minimum threshold above {0}%".format(MIN_COMPACTION_THRESHOLD))
        elif not output and percent_threshold > MAX_COMPACTION_THRESHOLD:
            self.log.error("Need to set maximum threshold under {0}".format(MAX_COMPACTION_THRESHOLD))
        elif output and percent_threshold == MAX_COMPACTION_THRESHOLD:
            self.log.info("Auto compaction will not run at {0}% setting".format(MAX_COMPACTION_THRESHOLD))
        elif (output and percent_threshold >= MIN_COMPACTION_THRESHOLD
                     and percent_threshold <= MAX_RUN):
            node_ram_ratio = BucketOperationHelper.base_bucket_ratio(TestInputSingleton.input.servers)
            info = rest.get_nodes_self()

            available_ram = info.memoryQuota * (node_ram_ratio)/2
            items = (int(available_ram*1000)/2)/item_size
            rest.create_bucket(bucket= bucket_name, ramQuotaMB=int(available_ram), authType='sasl',
                               saslPassword='******', replicaNumber=1, proxyPort=11211)
            ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket_name)
            BucketOperationHelper.wait_for_vbuckets_ready_state(serverInfo, bucket_name)

            self.log.info("start to load {0}K keys with {1} bytes/key".format(items, item_size))
            self.insert_key(serverInfo, bucket_name, items, item_size)
            self.log.info("sleep 10 seconds before the next run")
            time.sleep(10)

            self.log.info("start to update {0}K keys with smaller value {1} bytes/key".format(items,
                                                                             int(update_item_size)))
            self.insert_key(serverInfo, bucket_name, items, int(update_item_size))
            compact_run = remote_client.wait_till_compaction_end(rest, bucket_name, timeout_in_seconds=180)
            if not compact_run:
                self.log.error("auto compaction does not run")
            elif compact_run:
                self.log.info("auto compaction runs successfully")
        else:
            self.log.error("Unknown error")
Пример #27
0
 def _create_default_bucket(self):
     helper = RestHelper(self.rest)
     if not helper.bucket_exists(self.bucket):
         node_ram_ratio = BucketOperationHelper.base_bucket_ratio(
             self.servers)
         info = self.rest.get_nodes_self()
         available_ram = int(info.memoryQuota * node_ram_ratio)
         if available_ram < 256:
             available_ram = 256
         self.rest.create_bucket(bucket=self.bucket,
                                 ramQuotaMB=available_ram)
         ready = BucketOperationHelper.wait_for_memcached(self.master,
                                                          self.bucket)
         self.testcase.assertTrue(ready, "wait_for_memcached failed")
     self.testcase.assertTrue(
         helper.bucket_exists(self.bucket),
         "unable to create {0} bucket".format(self.bucket))
Пример #28
0
    def setup_cluster(self):
        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
        mem_quota = int(self.rest.get_nodes_self().mcdMemoryReserved *
                        node_ram_ratio)
        self.rest.init_cluster(self.master.rest_username,
                               self.master.rest_password)
        self.rest.init_cluster_memoryQuota(self.master.rest_username,
                                           self.master.rest_password,
                                           memoryQuota=mem_quota)
        for server in self.servers:
            ClusterOperationHelper.cleanup_cluster([server])
        ClusterOperationHelper.wait_for_ns_servers_or_assert([self.master],
                                                             self.testcase)

        rebalanced = ClusterOperationHelper.add_and_rebalance(self.servers)
        self.testcase.assertTrue(rebalanced, "cluster is not rebalanced")

        self._create_default_bucket()
Пример #29
0
    def common_setup(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)
        self.case_number = self.input.param("case_number", 0)

        # Clear the state from Previous invalid run
        rest.stop_rebalance()
        self.load_started = False
        self.loaders = []
        self.log.info("==============  SwapRebalanceBase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
        SwapRebalanceBase.reset(self)
        self.cluster_helper = Cluster()
        # Initialize test params
        self.replica = self.input.param("replica", 1)
        self.keys_count = self.input.param("keys-count", 100000)
        self.load_ratio = self.input.param("load-ratio", 1)
        self.ratio_expiry = self.input.param("ratio-expiry", 0.03)
        self.ratio_deletes = self.input.param("ratio-deletes", 0.13)
        self.num_buckets = self.input.param("num-buckets", 1)
        self.failover_factor = self.num_swap = self.input.param("num-swap", 1)
        self.num_initial_servers = self.input.param("num-initial-servers", 3)
        self.fail_orchestrator = self.swap_orchestrator = self.input.param("swap-orchestrator", False)
        self.skip_cleanup = self.input.param("skip-cleanup", False)
        self.do_access = self.input.param("do-access", True)

        # Make sure the test is setup correctly
        min_servers = int(self.num_initial_servers) + int(self.num_swap)
        msg = "minimum {0} nodes required for running swap rebalance"
        self.assertTrue(len(self.servers) >= min_servers,
            msg=msg.format(min_servers))

        self.log.info('picking server : {0} as the master'.format(serverInfo))
        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
        info = rest.get_nodes_self()
        rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
        self.log.info("==============  SwapRebalanceBase setup was finished for test #{0} {1} =============="
                      .format(self.case_number, self._testMethodName))
        SwapRebalanceBase._log_start(self)
Пример #30
0
    def _create_bucket(self, lww=True, drift=False, name=None):

        if lww:
            self.lww=lww

        if  name:
            self.bucket=name

        helper = RestHelper(self.rest)
        if not helper.bucket_exists(self.bucket):
            node_ram_ratio = BucketOperationHelper.base_bucket_ratio(
                self.servers)
            info = self.rest.get_nodes_self()
            self.rest.create_bucket(bucket=self.bucket,
                ramQuotaMB=512, authType='sasl', lww=self.lww)
            try:
                ready = BucketOperationHelper.wait_for_memcached(self.master,
                    self.bucket)
            except Exception as e:
                self.fail('unable to create bucket')
Пример #31
0
    def common_setup(input, testcase, bucket_ram_ratio=(2.8 / 3.0), replica=0):
        log = logger.Logger.get_logger()
        servers = input.servers
        BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
        ClusterOperationHelper.cleanup_cluster(servers)
        ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase)
        serverInfo = servers[0]

        log.info('picking server : {0} as the master'.format(serverInfo))
        #if all nodes are on the same machine let's have the bucket_ram_ratio as bucket_ram_ratio * 1/len(servers)
        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(servers)
        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved *
                                                      node_ram_ratio))
        if "ascii" in TestInputSingleton.input.test_params\
        and TestInputSingleton.input.test_params["ascii"].lower() == "true":
            BucketOperationHelper.create_multiple_buckets(serverInfo,
                                                          replica,
                                                          node_ram_ratio *
                                                          bucket_ram_ratio,
                                                          howmany=1,
                                                          sasl=False)
        else:
            BucketOperationHelper.create_multiple_buckets(serverInfo,
                                                          replica,
                                                          node_ram_ratio *
                                                          bucket_ram_ratio,
                                                          howmany=1,
                                                          sasl=True)
        buckets = rest.get_buckets()
        for bucket in buckets:
            ready = BucketOperationHelper.wait_for_memcached(
                serverInfo, bucket.name)
            testcase.assertTrue(ready, "wait_for_memcached failed")
Пример #32
0
    def setUp(self):
        super(BucketConfig, self).setUp()
        self.testcase = '2'
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        #self.time_synchronization = self.input.param("time_sync", "enabledWithoutDrift")
        self.lww = self.input.param("lww", True)
        self.drift = self.input.param("drift", False)
        self.bucket='bucket-1'
        self.master = self.servers[0]
        self.rest = RestConnection(self.master)
        self.cluster = Cluster()
        self.skip_rebalance = self.input.param("skip_rebalance", False)

        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
        mem_quota = int(self.rest.get_nodes_self().mcdMemoryReserved *
                        node_ram_ratio)

        if not self.skip_rebalance:
            self.rest.init_cluster(self.master.rest_username,
                self.master.rest_password)
            self.rest.init_cluster_memoryQuota(self.master.rest_username,
                self.master.rest_password,
                memoryQuota=mem_quota)
            for server in self.servers:
                ClusterOperationHelper.cleanup_cluster([server])
                ClusterOperationHelper.wait_for_ns_servers_or_assert(
                    [self.master], self.testcase)
            try:
                rebalanced = ClusterOperationHelper.add_and_rebalance(
                    self.servers)

            except Exception as e:
                self.fail(e, 'cluster is not rebalanced')

        self._create_bucket(self.lww, self.drift)
Пример #33
0
    def common_setup(self):
        self.cluster_helper = Cluster()
        self.log = logger.Logger.get_logger()
        self.cluster_run = False
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)
        if len(set([server.ip for server in self.servers])) == 1:
            ip = rest.get_nodes_self().ip
            for server in self.servers:
                server.ip = ip
            self.cluster_run = True
        self.case_number = self.input.param("case_number", 0)
        self.replica = self.input.param("replica", 1)
        self.keys_count = self.input.param("keys-count", 1000)
        self.load_ratio = self.input.param("load-ratio", 1)
        self.ratio_expiry = self.input.param("ratio-expiry", 0.03)
        self.ratio_deletes = self.input.param("ratio-deletes", 0.13)
        self.num_buckets = self.input.param("num-buckets", 1)
        self.failover_factor = self.num_swap = self.input.param("num-swap", 1)
        self.num_initial_servers = self.input.param("num-initial-servers", 3)
        self.fail_orchestrator = self.swap_orchestrator = self.input.param(
            "swap-orchestrator", False)
        self.do_access = self.input.param("do-access", True)
        self.load_started = False
        self.loaders = []
        try:
            # Clear the state from Previous invalid run
            if rest._rebalance_progress_status() == 'running':
                self.log.warning(
                    "rebalancing is still running, previous test should be verified"
                )
                stopped = rest.stop_rebalance()
                self.assertTrue(stopped, msg="unable to stop rebalance")
            self.log.info("==============  SwapRebalanceBase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
            SwapRebalanceBase.reset(self)

            # Make sure the test is setup correctly
            min_servers = int(self.num_initial_servers) + int(self.num_swap)
            msg = "minimum {0} nodes required for running swap rebalance"
            self.assertTrue(len(self.servers) >= min_servers,
                            msg=msg.format(min_servers))

            self.log.info(
                'picking server : {0} as the master'.format(serverInfo))
            node_ram_ratio = BucketOperationHelper.base_bucket_ratio(
                self.servers)
            info = rest.get_nodes_self()
            rest.init_cluster(username=serverInfo.rest_username,
                              password=serverInfo.rest_password)
            rest.init_cluster_memoryQuota(
                memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
            SwapRebalanceBase.enable_diag_eval_on_non_local_hosts(
                self, serverInfo)
            # Add built-in user
            testuser = [{
                'id': 'cbadminbucket',
                'name': 'cbadminbucket',
                'password': '******'
            }]
            RbacBase().create_user_source(testuser, 'builtin', self.servers[0])

            # Assign user to role
            role_list = [{
                'id': 'cbadminbucket',
                'name': 'cbadminbucket',
                'roles': 'admin'
            }]
            RbacBase().add_user_role(role_list,
                                     RestConnection(self.servers[0]),
                                     'builtin')

            if self.num_buckets > 10:
                BaseTestCase.change_max_buckets(self, self.num_buckets)
            self.log.info(
                "==============  SwapRebalanceBase setup was finished for test #{0} {1} =============="
                .format(self.case_number, self._testMethodName))
            SwapRebalanceBase._log_start(self)
        except Exception, e:
            self.cluster_helper.shutdown()
            self.fail(e)
Пример #34
0
    def test_database_fragmentation(self):


        self.log.info('start test_database_fragmentation')

        self.err = None
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        percent_threshold = self.autocompaction_value
        bucket_name = "default"
        MAX_RUN = 100
        item_size = 1024
        update_item_size = item_size * ((float(100 - percent_threshold)) / 100)
        serverInfo = self.servers[0]
        self.log.info(serverInfo)

        rest = RestConnection(serverInfo)
        remote_client = RemoteMachineShellConnection(serverInfo)
        output, rq_content, header = rest.set_auto_compaction("false", dbFragmentThresholdPercentage=percent_threshold, viewFragmntThresholdPercentage=None)

        if not output and (percent_threshold <= MIN_COMPACTION_THRESHOLD or percent_threshold >= MAX_COMPACTION_THRESHOLD):
            self.assertFalse(output, "it should be  impossible to set compaction value = {0}%".format(percent_threshold))
            import json
            self.assertTrue(json.loads(rq_content).has_key("errors"), "Error is not present in response")
            self.assertTrue(str(json.loads(rq_content)["errors"]).find("Allowed range is 2 - 100") > -1, \
                            "Error 'Allowed range is 2 - 100' expected, but was '{0}'".format(str(json.loads(rq_content)["errors"])))
            self.log.info("Response contains error = '%(errors)s' as expected" % json.loads(rq_content))

        elif (output and percent_threshold >= MIN_COMPACTION_THRESHOLD
                     and percent_threshold <= MAX_RUN):
            node_ram_ratio = BucketOperationHelper.base_bucket_ratio(TestInputSingleton.input.servers)
            info = rest.get_nodes_self()
            available_ram = info.memoryQuota * (node_ram_ratio) / 2
            items = (int(available_ram * 1000) / 2) / item_size
            print "ITEMS =============%s" % items

            rest.create_bucket(bucket=bucket_name, ramQuotaMB=int(available_ram), authType='sasl',
                               saslPassword='******', replicaNumber=1, proxyPort=11211)
            BucketOperationHelper.wait_for_memcached(serverInfo, bucket_name)
            BucketOperationHelper.wait_for_vbuckets_ready_state(serverInfo, bucket_name)

            self.log.info("******start to load {0}K keys with {1} bytes/key".format(items, item_size))
            #self.insert_key(serverInfo, bucket_name, items, item_size)
            generator = BlobGenerator('compact', 'compact-', int(item_size), start=0, end=(items * 1000))
            self._load_all_buckets(self.master, generator, "create", 0, 1, batch_size=1000)
            self.log.info("sleep 10 seconds before the next run")
            time.sleep(10)

            self.log.info("********start to update {0}K keys with smaller value {1} bytes/key".format(items,
                                                                             int(update_item_size)))
            generator_update = BlobGenerator('compact', 'compact-', int(update_item_size), start=0, end=(items * 1000))
            if self.during_ops:
                if self.during_ops == "change_port":
                    self.change_port(new_port=self.input.param("new_port", "9090"))
                    self.master.port = self.input.param("new_port", "9090")
                elif self.during_ops == "change_password":
                    old_pass = self.master.rest_password
                    self.change_password(new_password=self.input.param("new_password", "new_pass"))
                    self.master.rest_password = self.input.param("new_password", "new_pass")
                rest = RestConnection(self.master)
            insert_thread = Thread(target=self.load,
                                   name="insert",
                                   args=(self.master, self.autocompaction_value,
                                         self.default_bucket_name, generator_update))
            try:
                self.log.info('starting the load thread')
                insert_thread.start()

                compact_run = remote_client.wait_till_compaction_end(rest, bucket_name,
                                                                     timeout_in_seconds=(self.wait_timeout * 10))

                if not compact_run:
                    self.fail("auto compaction does not run")
                elif compact_run:
                    self.log.info("auto compaction run successfully")
            except Exception, ex:
                self.log.info("exception in auto compaction")
                if self.during_ops:
                     if self.during_ops == "change_password":
                         self.change_password(new_password=old_pass)
                     elif self.during_ops == "change_port":
                         self.change_port(new_port='8091',
                                          current_port=self.input.param("new_port", "9090"))
                if str(ex).find("enospc") != -1:
                    self.is_crashed.set()
                    self.log.error("Disk is out of space, unable to load more data")
                    insert_thread._Thread__stop()
                else:
                    insert_thread._Thread__stop()
                    raise ex
            else:
                insert_thread.join()
                if self.err is not None:
                    self.fail(self.err)
Пример #35
0
    def test_database_fragmentation(self):

        self.log.info('start test_database_fragmentation')

        self.err = None
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        percent_threshold = self.autocompaction_value
        bucket_name = "default"
        MAX_RUN = 100
        item_size = 1024
        update_item_size = item_size * ((float(100 - percent_threshold)) / 100)
        serverInfo = self.servers[0]
        self.log.info(serverInfo)

        rest = RestConnection(serverInfo)
        remote_client = RemoteMachineShellConnection(serverInfo)
        output, rq_content, header = rest.set_auto_compaction(
            "false",
            dbFragmentThresholdPercentage=percent_threshold,
            viewFragmntThresholdPercentage=None)

        if not output and (percent_threshold <= MIN_COMPACTION_THRESHOLD
                           or percent_threshold >= MAX_COMPACTION_THRESHOLD):
            self.assertFalse(
                output,
                "it should be  impossible to set compaction value = {0}%".
                format(percent_threshold))
            import json
            self.assertTrue(
                json.loads(rq_content).has_key("errors"),
                "Error is not present in response")
            self.assertTrue(str(json.loads(rq_content)["errors"]).find("Allowed range is 2 - 100") > -1, \
                            "Error 'Allowed range is 2 - 100' expected, but was '{0}'".format(str(json.loads(rq_content)["errors"])))
            self.log.info(
                "Response contains error = '%(errors)s' as expected" %
                json.loads(rq_content))

        elif (output and percent_threshold >= MIN_COMPACTION_THRESHOLD
              and percent_threshold <= MAX_RUN):
            node_ram_ratio = BucketOperationHelper.base_bucket_ratio(
                TestInputSingleton.input.servers)
            info = rest.get_nodes_self()
            available_ram = info.memoryQuota * (node_ram_ratio) / 2
            items = (int(available_ram * 1000) / 2) / item_size
            print "ITEMS =============%s" % items

            rest.create_bucket(bucket=bucket_name,
                               ramQuotaMB=int(available_ram),
                               authType='sasl',
                               saslPassword='******',
                               replicaNumber=1,
                               proxyPort=11211)
            BucketOperationHelper.wait_for_memcached(serverInfo, bucket_name)
            BucketOperationHelper.wait_for_vbuckets_ready_state(
                serverInfo, bucket_name)

            self.log.info(
                "******start to load {0}K keys with {1} bytes/key".format(
                    items, item_size))
            #self.insert_key(serverInfo, bucket_name, items, item_size)
            generator = BlobGenerator('compact',
                                      'compact-',
                                      int(item_size),
                                      start=0,
                                      end=(items * 1000))
            self._load_all_buckets(self.master,
                                   generator,
                                   "create",
                                   0,
                                   1,
                                   batch_size=1000)
            self.log.info("sleep 10 seconds before the next run")
            time.sleep(10)

            self.log.info(
                "********start to update {0}K keys with smaller value {1} bytes/key"
                .format(items, int(update_item_size)))
            generator_update = BlobGenerator('compact',
                                             'compact-',
                                             int(update_item_size),
                                             start=0,
                                             end=(items * 1000))
            if self.during_ops:
                if self.during_ops == "change_port":
                    self.change_port(
                        new_port=self.input.param("new_port", "9090"))
                    self.master.port = self.input.param("new_port", "9090")
                elif self.during_ops == "change_password":
                    old_pass = self.master.rest_password
                    self.change_password(new_password=self.input.param(
                        "new_password", "new_pass"))
                    self.master.rest_password = self.input.param(
                        "new_password", "new_pass")
                rest = RestConnection(self.master)
            insert_thread = Thread(
                target=self.load,
                name="insert",
                args=(self.master, self.autocompaction_value,
                      self.default_bucket_name, generator_update))
            try:
                self.log.info('starting the load thread')
                insert_thread.start()

                compact_run = remote_client.wait_till_compaction_end(
                    rest,
                    bucket_name,
                    timeout_in_seconds=(self.wait_timeout * 10))

                if not compact_run:
                    self.fail("auto compaction does not run")
                elif compact_run:
                    self.log.info("auto compaction run successfully")
            except Exception, ex:
                self.log.info("exception in auto compaction")
                if self.during_ops:
                    if self.during_ops == "change_password":
                        self.change_password(new_password=old_pass)
                    elif self.during_ops == "change_port":
                        self.change_port(new_port='8091',
                                         current_port=self.input.param(
                                             "new_port", "9090"))
                if str(ex).find("enospc") != -1:
                    self.is_crashed.set()
                    self.log.error(
                        "Disk is out of space, unable to load more data")
                    insert_thread._Thread__stop()
                else:
                    insert_thread._Thread__stop()
                    raise ex
            else:
                insert_thread.join()
                if self.err is not None:
                    self.fail(self.err)