Example #1
0
    def _warmup_check(self, timeout=1800):
        warmed_up = {}
        stats_all_buckets = {}
        for bucket in self.buckets:
            stats_all_buckets[bucket.name] = StatsCommon()
            warmed_up[bucket.name] = {}
            for server in self.servers:
                warmed_up[bucket.name][server] = False

        for bucket in self.buckets:
            for server in self.servers:
                start = time.time()
                end_time = start + timeout
                warmup_complete = False

                while not warmup_complete and time.time() < end_time:
                    try:
                        if stats_all_buckets[bucket.name].get_stats([server], bucket, 'warmup', 'ep_warmup_thread')[server] == "complete":
                            self.log.info("warmup completed for %s in bucket %s" % (server.ip, bucket.name))
                            warmup_complete = True
                        elif stats_all_buckets[bucket.name].get_stats([server], bucket, 'warmup', 'ep_warmup_thread')[server] == "running":
                            self.log.info("warming up is still running for %s in bucket %s....curr_items_tot : %s" %
                                          (server.ip, bucket.name, stats_all_buckets[bucket.name].get_stats([server], bucket, '', 'curr_items_tot')[server]))

                        warmup_time = int(stats_all_buckets[bucket.name].get_stats([server], bucket, 'warmup', 'ep_warmup_time')[server])
                        if warmup_time is not None:
                            self.log.info("ep_warmup_time is %s for %s in bucket %s" % (warmup_time, server.ip, bucket.name))
                        self.sleep(5, "waiting for warmup...")
                    except Exception as e:
                        self.log.error("Could not get warmup_time stats from server %s:%s, exception %s" % (server.ip, server.port, e))

                self.assertTrue(warmup_complete, "Warm up wasn't complete in %s sec" % timeout)

                start = time.time()
                while time.time() - start < self.timeout and not warmed_up[bucket.name][server]:
                    if stats_all_buckets[bucket.name].get_stats([server], bucket, '', 'curr_items_tot')[server] == \
                        self.pre_warmup_stats[bucket.name]["%s:%s" % (server.ip, server.port)]["curr_items_tot"]:
                        if stats_all_buckets[bucket.name].get_stats([server], bucket, '', 'curr_items')[server] == \
                           self.pre_warmup_stats[bucket.name]["%s:%s" % (server.ip, server.port)]["curr_items"]:
                            if self._warmup_check_without_access_log():
                                warmed_up[bucket.name][server] = True
                                self._stats_report(server, bucket, stats_all_buckets[bucket.name])
                        else:
                            self.log.info("curr_items is %s not equal to %s" % (stats_all_buckets[bucket.name].get_stats([server], bucket, '', 'curr_items')[server],
                                                                                    self.pre_warmup_stats[bucket.name]["%s:%s" % (server.ip, server.port)]["curr_items"]))
                    else:
                        self.log.info("curr_items_tot is %s not equal to %s" % (stats_all_buckets[bucket.name].get_stats([server], bucket, '', 'curr_items_tot')[server],
                                                                            self.pre_warmup_stats[bucket.name]["%s:%s" % (server.ip, server.port)]["curr_items_tot"]))

                    self.sleep(10)

        for bucket in self.buckets:
            for server in self.servers:
                if warmed_up[bucket.name][server] == True:
                    continue
                elif warmed_up[bucket.name][server] == False:
                    return False
        return True
    def _warmup_check_without_access_log(self):
        if not self.without_access_log:
            return True

        warmed_up = {}
        stats_all_buckets = {}
        for bucket in self.buckets:
            stats_all_buckets[bucket.name] = StatsCommon()
            warmed_up[bucket.name] = {}
            for server in self.servers:
                warmed_up[bucket.name][server] = False

        for bucket in self.buckets:
            for server in self.servers:
                start = time.time()
                while time.time() - start < self.timeout and not warmed_up[
                        bucket.name][server]:
                    if stats_all_buckets[bucket.name].get_stats([server], bucket, 'warmup', 'ep_warmup_key_count')[server] >= \
                       stats_all_buckets[bucket.name].get_stats([server], bucket, 'warmup', 'ep_warmup_min_item_threshold')[server] or \
                       stats_all_buckets[bucket.name].get_stats([server], bucket, '', 'mem_used')[server] >= \
                       stats_all_buckets[bucket.name].get_stats([server], bucket, 'warmup', 'ep_warmup_min_memory_threshold')[server] or \
                       stats_all_buckets[bucket.name].get_stats([server], bucket, '', 'mem_used')[server] >= \
                       stats_all_buckets[bucket.name].get_stats([server], bucket, '', 'ep_mem_low_wat')[server]:
                        warmed_up[bucket.name][server] = True
                    else:
                        self.log.info(
                            "curr_items is %s and ep_warmup_min_item_threshold is %s"
                            % (stats_all_buckets[bucket.name].get_stats(
                                [server], bucket, '', 'curr_items')[server],
                               stats_all_buckets[bucket.name].get_stats(
                                   [server], bucket, 'warmup',
                                   'ep_warmup_min_item_threshold')[server]))
                        self.log.info(
                            "vb_active_perc_mem_resident is %s and ep_warmup_min_memory_threshold is %s"
                            % (stats_all_buckets[bucket.name].get_stats(
                                [server], bucket, '',
                                'vb_active_perc_mem_resident')[server],
                               stats_all_buckets[bucket.name].get_stats(
                                   [server], bucket, 'warmup',
                                   'ep_warmup_min_memory_threshold')[server]))
                        self.log.info(
                            "mem_used is %s and ep_mem_low_wat is %s" %
                            (stats_all_buckets[bucket.name].get_stats(
                                [server], bucket, '', 'mem_used')[server],
                             stats_all_buckets[bucket.name].get_stats(
                                 [server], bucket, '',
                                 'ep_mem_low_wat')[server]))

                    self.sleep(5)

        for bucket in self.buckets:
            for server in self.servers:
                if warmed_up[bucket.name][server] == True:
                    continue
                elif warmed_up[bucket.name][server] == False:
                    return False
        return True
Example #3
0
    def _create_access_log(self):
        stats_all_buckets = {}
        for bucket in self.buckets:
            stats_all_buckets[bucket.name] = StatsCommon()

        for bucket in self.buckets:
            for server in self.servers:
                scanner_runs = stats_all_buckets[bucket.name].get_stats([server], bucket, '', 'ep_num_access_scanner_runs')[server]
                self.log.info("current access scanner run for %s in bucket %s is %s times" % (server.ip, bucket.name, scanner_runs))
                self.log.info("setting access scanner time %s minutes for %s in bucket %s" % (self.access_log_time, server.ip, bucket.name))
                ClusterOperationHelper.flushctl_set(server, "alog_sleep_time", self.access_log_time , bucket.name)
                if not self._wait_for_access_run(self.access_log_time, scanner_runs, server, bucket, stats_all_buckets[bucket.name]):
                    self.fail("Not able to create access log within %s minutes" % self.access_log_time)
Example #4
0
    def load_document_until_ram_percentage(self):
        self.start = 0
        self.num_items = 30000
        self.end = self.num_items
        while True:
            self.log.info("Add documents to bucket")
            self.perform_doc_ops_in_all_cb_buckets(self.num_items, "create",
                                                   self.start, self.end)

            self.log.info("Calculate available free memory")
            stats_all_buckets = {}
            stats_all_buckets[self.cb_bucket_name] = StatsCommon()
            memory_used = int(stats_all_buckets[self.cb_bucket_name].get_stats(
                [self.master], self.cb_bucket_name, '',
                'mem_used')[self.servers[0]])

            if memory_used < (self.document_ram_percentage * self.bucket_ram *
                              1000000):
                self.log.info("Continue loading we have more free memory")
                self.start = self.end
                self.end = self.end + self.num_items
            else:
                break
Example #5
0
    def test_items_append(self):
        self.desired_item_size = self.input.param("desired_item_size", 2048)
        self.append_size = self.input.param("append_size", 1024)
        self.fixed_append_size = self.input.param("fixed_append_size", True)
        self.append_ratio = self.input.param("append_ratio", 0.5)
        self._load_all_buckets(self.master,
                               self.gen_create,
                               "create",
                               0,
                               batch_size=1000,
                               pause_secs=5,
                               timeout_secs=100)

        for bucket in self.buckets:
            self.value_size = self.input.param("value_size", 512)
            verify_dict = {}
            vkeys, dkeys = bucket.kvs[1].key_set()

            key_count = len(vkeys)
            app_ratio = self.append_ratio * key_count
            selected_keys = []
            i = 0
            for key in vkeys:
                i += 1
                if i >= app_ratio:
                    break
                selected_keys.append(key)

            awareness = VBucketAwareMemcached(RestConnection(self.master),
                                              bucket.name)
            if self.kv_verify:
                for key in selected_keys:
                    value = awareness.memcached(key).get(key)[2]
                    verify_dict[key] = value

            self.log.info("Bucket: {0}".format(bucket.name))
            self.log.info("Appending to have items whose initial size was " +
                          "{0} to equal or cross a size of {1}".format(
                              self.value_size, self.desired_item_size))
            self.log.info("Item-appending of {0} items starting ..".format(
                len(selected_keys) + 1))

            index = 3
            while self.value_size < self.desired_item_size:
                str_len = self.append_size
                if not self.fixed_append_size:
                    str_len = int(math.pow(2, index))

                for key in selected_keys:
                    random_string = self.random_str_generator(str_len)
                    awareness.memcached(key).append(key, random_string)
                    if self.kv_verify:
                        verify_dict[key] = verify_dict[key] + random_string
                self.log.info(
                    "for {0} items size was increased to {1} Bytes".format(
                        len(selected_keys) + 1, self.value_size))
                self.value_size += str_len
                index += 1

            self.log.info("The appending of {0} items ended".format(
                len(selected_keys) + 1))

        for bucket in self.buckets:
            msg = "Bucket:{0}".format(bucket.name)
            self.log.info("VERIFICATION <" + msg +
                          ">: Phase 0 - Check the gap between " +
                          "mem_used by the bucket and total_allocated_bytes")
            stats = StatsCommon()
            mem_used_stats = stats.get_stats(self.servers, bucket, 'memory',
                                             'mem_used')
            total_allocated_bytes_stats = stats.get_stats(
                self.servers, bucket, 'memory', 'total_allocated_bytes')
            total_fragmentation_bytes_stats = stats.get_stats(
                self.servers, bucket, 'memory', 'total_fragmentation_bytes')

            for server in self.servers:
                self.log.info(
                    "In {0} bucket {1}, total_fragmentation_bytes + the total_allocated_bytes = {2}"
                    .format(server.ip, bucket.name,
                            (int(total_fragmentation_bytes_stats[server]) +
                             int(total_allocated_bytes_stats[server]))))
                self.log.info("In {0} bucket {1}, mem_used = {2}".format(
                    server.ip, bucket.name, mem_used_stats[server]))
                self.log.info(
                    "In {0} bucket {1}, the difference between actual memory used by memcached and mem_used is {2} times"
                    .format(
                        server.ip, bucket.name,
                        float(
                            int(total_fragmentation_bytes_stats[server]) +
                            int(total_allocated_bytes_stats[server])) /
                        float(mem_used_stats[server])))

            self.log.info(
                "VERIFICATION <" + msg + ">: Phase1 - Check if any of the " +
                "selected keys have value less than the desired value size")
            for key in selected_keys:
                value = awareness.memcached(key).get(key)[2]
                if len(value) < self.desired_item_size:
                    self.fail(
                        "Failed to append enough to make value size surpass the "
                        + "size {0}, key {1} has size {2}".format(
                            self.desired_item_size, key, len(value)))

            if self.kv_verify:
                self.log.info("VERIFICATION <" + msg +
                              ">: Phase2 - Check if the content " +
                              "after the appends match what's expected")
                for k in verify_dict:
                    if awareness.memcached(k).get(k)[2] != verify_dict[k]:
                        self.fail(
                            "Content at key {0}: not what's expected.".format(
                                k))
                self.log.info("VERIFICATION <" + msg + ">: Successful")

        shell = RemoteMachineShellConnection(self.master)
        shell.execute_cbstats("", "raw", keyname="allocator", vbid="")
        shell.disconnect()
Example #6
0
    def test_gsi_on_ephemeral_with_eviction_policy(self):
        num_of_docs = self.num_of_docs_per_collection
        self.prepare_collection_for_indexing(num_of_docs_per_collection=self.num_of_docs_per_collection)
        collection_namespace = self.namespaces[0]
        _, keyspace = collection_namespace.split(':')
        bucket, scope, collection = keyspace.split('.')
        index_gen = QueryDefinition(index_name='idx', index_fields=['age', 'country', 'city'])
        meta_index_gen = QueryDefinition(index_name='meta_idx', index_fields=['meta().id'])

        query = index_gen.generate_index_create_query(namespace=collection_namespace, defer_build=self.defer_build)
        self.run_cbq_query(query)
        if self.defer_build:
            build_query = index_gen.generate_build_query(namespace=collection_namespace)
            self.run_cbq_query(build_query)
        self.wait_until_indexes_online()
        query = meta_index_gen.generate_index_create_query(namespace=collection_namespace, defer_build=self.defer_build)
        self.run_cbq_query(query)
        if self.defer_build:
            build_query = meta_index_gen.generate_build_query(namespace=collection_namespace)
            self.run_cbq_query(build_query)
        self.wait_until_indexes_online()

        select_query = f'Select * from {collection_namespace} where age >10 and country like "A%";'
        select_meta_id_query = f'Select meta().id from {collection} where meta().id like "doc_%";'
        count_query = f'Select count(*) from {collection_namespace} where age >= 0;'
        named_collection_query_context = f'default:{bucket}.{scope}'

        select_result = self.run_cbq_query(query=select_query)['results']
        meta_result = self.run_cbq_query(query=select_meta_id_query,
                                         query_context=named_collection_query_context)['results']
        count_result = self.run_cbq_query(query=count_query)['results'][0]['$1']
        self.assertTrue(len(select_result) > 0)
        self.assertEqual(len(meta_result), self.num_of_docs_per_collection)
        self.assertEqual(count_result, self.num_of_docs_per_collection)

        new_inserts = 10 ** 4
        is_memory_full = False
        stats_all_buckets = {}
        for bucket in self.buckets:
            stats_all_buckets[bucket.name] = StatsCommon()

        threshold = 0.93
        last_memory_used_val = 0
        while not is_memory_full:
            gen_create = SDKDataLoader(num_ops=new_inserts, percent_create=100,
                                       percent_update=0, percent_delete=0, scope=scope,
                                       collection=collection, output=True, start_seq_num=num_of_docs+1)
            task = self.cluster.async_load_gen_docs(self.master, bucket, gen_create)
            task.result()
            # Updating the doc counts
            num_of_docs = num_of_docs + new_inserts
            self.sleep(30)
            memory_used = int(stats_all_buckets[bucket.name].get_stats([self.master], bucket, '',
                                                                       'mem_used')[self.master])
            self.log.info(f"Current memory usage: {memory_used}")
            if self.eviction_policy == 'noEviction':
                # memory is considered full if mem_used is at say 90% of the available memory
                if memory_used > threshold * self.bucket_size * 1000000:
                    # Just filling the leftover memory to be double sure
                    gen_create = SDKDataLoader(num_ops=new_inserts, percent_create=100,
                                               percent_update=0, percent_delete=0, scope=scope,
                                               collection=collection, output=True, start_seq_num=num_of_docs + 1)
                    task = self.cluster.async_load_gen_docs(self.master, bucket, gen_create)
                    task.result()
                    num_of_docs = num_of_docs + new_inserts
                    memory_used = int(stats_all_buckets[bucket.name].get_stats([self.master], bucket, '',
                                                                               'mem_used')[self.master])
                    self.log.info(f"Current memory usage: {memory_used}")
                    is_memory_full = True
            else:
                if memory_used < last_memory_used_val:
                    break
                last_memory_used_val = memory_used

        meta_ids = self.run_cbq_query(query=select_meta_id_query,
                                      query_context=named_collection_query_context)['results']

        ids_at_threshold = sorted([item['id'] for item in meta_ids])

        # Pushing new docs to check the eviction policy
        new_inserts = 10 ** 4
        gen_create = SDKDataLoader(num_ops=new_inserts, percent_create=100, json_template="Employee",
                                   percent_update=0, percent_delete=0, scope=scope,
                                   collection=collection, output=True, start_seq_num=num_of_docs+1)
        tasks = self.data_ops_javasdk_loader_in_batches(sdk_data_loader=gen_create, batch_size=10000)
        for task in tasks:
            out = task.result()
            self.log.info(out)

        meta_ids_with_eviction_enforced = self.run_cbq_query(query=select_meta_id_query,
                                                             query_context=named_collection_query_context)['results']
        ids_after_threshold = sorted([item['id'] for item in meta_ids_with_eviction_enforced])

        if self.eviction_policy == 'noEviction':
            self.assertEqual(len(meta_ids_with_eviction_enforced), len(meta_ids))
            self.assertEqual(ids_at_threshold, ids_after_threshold)
        else:
            self.assertTrue(len(meta_ids_with_eviction_enforced) != len(meta_ids))
            self.assertTrue(ids_after_threshold != ids_at_threshold)