Пример #1
0
    def test_database_fragmentation(self):
        percent_threshold = self.autocompaction_value
        bucket_name = "default"
        MAX_RUN = 100
        item_size = 1024
        update_item_size = item_size * ((float(97 - percent_threshold)) / 100)
        serverInfo = self.servers[0]
        self.log.info(serverInfo)
        rest = RestConnection(serverInfo)
        remote_client = RemoteMachineShellConnection(serverInfo)

        output, rq_content, header = rest.set_auto_compaction("false", dbFragmentThresholdPercentage=percent_threshold, viewFragmntThresholdPercentage=100)

        if not output and (percent_threshold <= MIN_COMPACTION_THRESHOLD or percent_threshold >= MAX_COMPACTION_THRESHOLD):
            self.assertFalse(output, "it should be  impossible to set compaction value = {0}%".format(percent_threshold))
            import json
            self.assertTrue(json.loads(rq_content).has_key("errors"), "Error is not present in response")
            self.assertTrue(json.loads(rq_content)["errors"].find("Allowed range is 2 - 100") > -1, \
                            "Error 'Allowed range is 2 - 100' expected, but was '{0}'".format(json.loads(rq_content)["errors"]))

            self.log.info("Response contains error = '%(errors)s' as expected" % json.loads(rq_content))
        elif (output and percent_threshold >= MIN_COMPACTION_THRESHOLD
                     and percent_threshold <= MAX_RUN):
            node_ram_ratio = BucketOperationHelper.base_bucket_ratio(TestInputSingleton.input.servers)
            info = rest.get_nodes_self()

            available_ram = info.memoryQuota * (node_ram_ratio) / 2
            items = (int(available_ram * 1000) / 2) / item_size
            rest.create_bucket(bucket=bucket_name, ramQuotaMB=int(available_ram), authType='sasl',
                               saslPassword='******', replicaNumber=1, proxyPort=11211)
            BucketOperationHelper.wait_for_memcached(serverInfo, bucket_name)
            BucketOperationHelper.wait_for_vbuckets_ready_state(serverInfo, bucket_name)

            self.log.info("start to load {0}K keys with {1} bytes/key".format(items, item_size))
            self.insert_key(serverInfo, bucket_name, items, item_size)
            self.log.info("sleep 10 seconds before the next run")
            time.sleep(10)

            self.log.info("start to update {0}K keys with smaller value {1} bytes/key".format(items,
                                                                             int(update_item_size)))
            self.insert_key(serverInfo, bucket_name, items, int(update_item_size))

            compact_run = remote_client.wait_till_compaction_end(rest, bucket_name, timeout_in_seconds=180)
            if not compact_run:
                self.log.error("auto compaction does not run")
            elif compact_run:
                self.log.info("auto compaction runs successfully")
        else:
            self.log.error("Unknown error")
Пример #2
0
 def _create_default_bucket(self, unittest):
     name = "default"
     master = self.master
     rest = RestConnection(master)
     helper = RestHelper(RestConnection(master))
     if not helper.bucket_exists(name):
         node_ram_ratio = BucketOperationHelper.base_bucket_ratio(TestInputSingleton.input.servers)
         info = rest.get_nodes_self()
         available_ram = info.memoryQuota * node_ram_ratio
         rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram))
         ready = BucketOperationHelper.wait_for_memcached(master, name)
         BucketOperationHelper.wait_for_vbuckets_ready_state(master, name)
         unittest.assertTrue(ready, msg="wait_for_memcached failed")
     unittest.assertTrue(helper.bucket_exists(name),
                         msg="unable to create {0} bucket".format(name))
Пример #3
0
    def _database_fragmentation(self, percent_threshold):
        bucket_name = "default"
        MAX_RUN = 99
        item_size = 1024
        update_item_size = item_size*((float(97 - percent_threshold))/100)
        serverInfo = self.servers[0]
        self.log.info(serverInfo)
        rest = RestConnection(serverInfo)
        remote_client = RemoteMachineShellConnection(serverInfo)

        rest.reset_auto_compaction()
        parallelDBAndView = "false"
        output = rest.set_autoCompaction(parallelDBAndView, percent_threshold, 100)
        if not output and percent_threshold < MIN_COMPACTION_THRESHOLD:
            self.log.error("Need to set minimum threshold above {0}%".format(MIN_COMPACTION_THRESHOLD))
        elif not output and percent_threshold > MAX_COMPACTION_THRESHOLD:
            self.log.error("Need to set maximum threshold under {0}".format(MAX_COMPACTION_THRESHOLD))
        elif output and percent_threshold == MAX_COMPACTION_THRESHOLD:
            self.log.info("Auto compaction will not run at {0}% setting".format(MAX_COMPACTION_THRESHOLD))
        elif (output and percent_threshold >= MIN_COMPACTION_THRESHOLD
                     and percent_threshold <= MAX_RUN):
            node_ram_ratio = BucketOperationHelper.base_bucket_ratio(TestInputSingleton.input.servers)
            info = rest.get_nodes_self()

            available_ram = info.memoryQuota * (node_ram_ratio)/2
            items = (int(available_ram*1000)/2)/item_size
            rest.create_bucket(bucket= bucket_name, ramQuotaMB=int(available_ram), authType='sasl',
                               saslPassword='******', replicaNumber=1, proxyPort=11211)
            ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket_name)
            BucketOperationHelper.wait_for_vbuckets_ready_state(serverInfo, bucket_name)

            self.log.info("start to load {0}K keys with {1} bytes/key".format(items, item_size))
            self.insert_key(serverInfo, bucket_name, items, item_size)
            self.log.info("sleep 10 seconds before the next run")
            time.sleep(10)

            self.log.info("start to update {0}K keys with smaller value {1} bytes/key".format(items,
                                                                             int(update_item_size)))
            self.insert_key(serverInfo, bucket_name, items, int(update_item_size))
            compact_run = remote_client.wait_till_compaction_end(rest, bucket_name, timeout_in_seconds=180)
            if not compact_run:
                self.log.error("auto compaction does not run")
            elif compact_run:
                self.log.info("auto compaction runs successfully")
        else:
            self.log.error("Unknown error")
Пример #4
0
    def test_database_fragmentation(self):


        self.log.info('start test_database_fragmentation')

        self.err = None
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        percent_threshold = self.autocompaction_value
        bucket_name = "default"
        MAX_RUN = 100
        item_size = 1024
        update_item_size = item_size * ((float(100 - percent_threshold)) / 100)
        serverInfo = self.servers[0]
        self.log.info(serverInfo)

        rest = RestConnection(serverInfo)
        remote_client = RemoteMachineShellConnection(serverInfo)
        output, rq_content, header = rest.set_auto_compaction("false", dbFragmentThresholdPercentage=percent_threshold, viewFragmntThresholdPercentage=None)

        if not output and (percent_threshold <= MIN_COMPACTION_THRESHOLD or percent_threshold >= MAX_COMPACTION_THRESHOLD):
            self.assertFalse(output, "it should be  impossible to set compaction value = {0}%".format(percent_threshold))
            import json
            self.assertTrue(json.loads(rq_content).has_key("errors"), "Error is not present in response")
            self.assertTrue(str(json.loads(rq_content)["errors"]).find("Allowed range is 2 - 100") > -1, \
                            "Error 'Allowed range is 2 - 100' expected, but was '{0}'".format(str(json.loads(rq_content)["errors"])))
            self.log.info("Response contains error = '%(errors)s' as expected" % json.loads(rq_content))

        elif (output and percent_threshold >= MIN_COMPACTION_THRESHOLD
                     and percent_threshold <= MAX_RUN):
            node_ram_ratio = BucketOperationHelper.base_bucket_ratio(TestInputSingleton.input.servers)
            info = rest.get_nodes_self()
            available_ram = info.memoryQuota * (node_ram_ratio) / 2
            items = (int(available_ram * 1000) / 2) / item_size
            print "ITEMS =============%s" % items

            rest.create_bucket(bucket=bucket_name, ramQuotaMB=int(available_ram), authType='sasl',
                               saslPassword='******', replicaNumber=1, proxyPort=11211)
            BucketOperationHelper.wait_for_memcached(serverInfo, bucket_name)
            BucketOperationHelper.wait_for_vbuckets_ready_state(serverInfo, bucket_name)

            self.log.info("******start to load {0}K keys with {1} bytes/key".format(items, item_size))
            #self.insert_key(serverInfo, bucket_name, items, item_size)
            generator = BlobGenerator('compact', 'compact-', int(item_size), start=0, end=(items * 1000))
            self._load_all_buckets(self.master, generator, "create", 0, 1, batch_size=1000)
            self.log.info("sleep 10 seconds before the next run")
            time.sleep(10)

            self.log.info("********start to update {0}K keys with smaller value {1} bytes/key".format(items,
                                                                             int(update_item_size)))
            generator_update = BlobGenerator('compact', 'compact-', int(update_item_size), start=0, end=(items * 1000))
            if self.during_ops:
                if self.during_ops == "change_port":
                    self.change_port(new_port=self.input.param("new_port", "9090"))
                    self.master.port = self.input.param("new_port", "9090")
                elif self.during_ops == "change_password":
                    old_pass = self.master.rest_password
                    self.change_password(new_password=self.input.param("new_password", "new_pass"))
                    self.master.rest_password = self.input.param("new_password", "new_pass")
                rest = RestConnection(self.master)
            insert_thread = Thread(target=self.load,
                                   name="insert",
                                   args=(self.master, self.autocompaction_value,
                                         self.default_bucket_name, generator_update))
            try:
                self.log.info('starting the load thread')
                insert_thread.start()

                compact_run = remote_client.wait_till_compaction_end(rest, bucket_name,
                                                                     timeout_in_seconds=(self.wait_timeout * 10))

                if not compact_run:
                    self.fail("auto compaction does not run")
                elif compact_run:
                    self.log.info("auto compaction run successfully")
            except Exception, ex:
                self.log.info("exception in auto compaction")
                if self.during_ops:
                     if self.during_ops == "change_password":
                         self.change_password(new_password=old_pass)
                     elif self.during_ops == "change_port":
                         self.change_port(new_port='8091',
                                          current_port=self.input.param("new_port", "9090"))
                if str(ex).find("enospc") != -1:
                    self.is_crashed.set()
                    self.log.error("Disk is out of space, unable to load more data")
                    insert_thread._Thread__stop()
                else:
                    insert_thread._Thread__stop()
                    raise ex
            else:
                insert_thread.join()
                if self.err is not None:
                    self.fail(self.err)
Пример #5
0
    def test_database_fragmentation(self):

        self.log.info('start test_database_fragmentation')

        self.err = None
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        percent_threshold = self.autocompaction_value
        bucket_name = "default"
        MAX_RUN = 100
        item_size = 1024
        update_item_size = item_size * ((float(100 - percent_threshold)) / 100)
        serverInfo = self.servers[0]
        self.log.info(serverInfo)

        rest = RestConnection(serverInfo)
        remote_client = RemoteMachineShellConnection(serverInfo)
        output, rq_content, header = rest.set_auto_compaction(
            "false",
            dbFragmentThresholdPercentage=percent_threshold,
            viewFragmntThresholdPercentage=None)

        if not output and (percent_threshold <= MIN_COMPACTION_THRESHOLD
                           or percent_threshold >= MAX_COMPACTION_THRESHOLD):
            self.assertFalse(
                output,
                "it should be  impossible to set compaction value = {0}%".
                format(percent_threshold))
            import json
            self.assertTrue(
                json.loads(rq_content).has_key("errors"),
                "Error is not present in response")
            self.assertTrue(str(json.loads(rq_content)["errors"]).find("Allowed range is 2 - 100") > -1, \
                            "Error 'Allowed range is 2 - 100' expected, but was '{0}'".format(str(json.loads(rq_content)["errors"])))
            self.log.info(
                "Response contains error = '%(errors)s' as expected" %
                json.loads(rq_content))

        elif (output and percent_threshold >= MIN_COMPACTION_THRESHOLD
              and percent_threshold <= MAX_RUN):
            node_ram_ratio = BucketOperationHelper.base_bucket_ratio(
                TestInputSingleton.input.servers)
            info = rest.get_nodes_self()
            available_ram = info.memoryQuota * (node_ram_ratio) / 2
            items = (int(available_ram * 1000) / 2) / item_size
            print "ITEMS =============%s" % items

            rest.create_bucket(bucket=bucket_name,
                               ramQuotaMB=int(available_ram),
                               authType='sasl',
                               saslPassword='******',
                               replicaNumber=1,
                               proxyPort=11211)
            BucketOperationHelper.wait_for_memcached(serverInfo, bucket_name)
            BucketOperationHelper.wait_for_vbuckets_ready_state(
                serverInfo, bucket_name)

            self.log.info(
                "******start to load {0}K keys with {1} bytes/key".format(
                    items, item_size))
            #self.insert_key(serverInfo, bucket_name, items, item_size)
            generator = BlobGenerator('compact',
                                      'compact-',
                                      int(item_size),
                                      start=0,
                                      end=(items * 1000))
            self._load_all_buckets(self.master,
                                   generator,
                                   "create",
                                   0,
                                   1,
                                   batch_size=1000)
            self.log.info("sleep 10 seconds before the next run")
            time.sleep(10)

            self.log.info(
                "********start to update {0}K keys with smaller value {1} bytes/key"
                .format(items, int(update_item_size)))
            generator_update = BlobGenerator('compact',
                                             'compact-',
                                             int(update_item_size),
                                             start=0,
                                             end=(items * 1000))
            if self.during_ops:
                if self.during_ops == "change_port":
                    self.change_port(
                        new_port=self.input.param("new_port", "9090"))
                    self.master.port = self.input.param("new_port", "9090")
                elif self.during_ops == "change_password":
                    old_pass = self.master.rest_password
                    self.change_password(new_password=self.input.param(
                        "new_password", "new_pass"))
                    self.master.rest_password = self.input.param(
                        "new_password", "new_pass")
                rest = RestConnection(self.master)
            insert_thread = Thread(
                target=self.load,
                name="insert",
                args=(self.master, self.autocompaction_value,
                      self.default_bucket_name, generator_update))
            try:
                self.log.info('starting the load thread')
                insert_thread.start()

                compact_run = remote_client.wait_till_compaction_end(
                    rest,
                    bucket_name,
                    timeout_in_seconds=(self.wait_timeout * 10))

                if not compact_run:
                    self.fail("auto compaction does not run")
                elif compact_run:
                    self.log.info("auto compaction run successfully")
            except Exception, ex:
                self.log.info("exception in auto compaction")
                if self.during_ops:
                    if self.during_ops == "change_password":
                        self.change_password(new_password=old_pass)
                    elif self.during_ops == "change_port":
                        self.change_port(new_port='8091',
                                         current_port=self.input.param(
                                             "new_port", "9090"))
                if str(ex).find("enospc") != -1:
                    self.is_crashed.set()
                    self.log.error(
                        "Disk is out of space, unable to load more data")
                    insert_thread._Thread__stop()
                else:
                    insert_thread._Thread__stop()
                    raise ex
            else:
                insert_thread.join()
                if self.err is not None:
                    self.fail(self.err)