Beispiel #1
0
 def verify_bucket_count_with_index_count(self, indexMap, totalCount,
                                          field):
     """
     :param query_definitions: Query definition
     :param buckets: List of bucket objects to verify
     :return:
     """
     count = 0
     indexer_rest = GsiHelper(self.cluster.index_nodes[0], self.log)
     for bucket, bucket_data in indexMap.items():
         indexer_rest.wait_for_indexing_to_complete(bucket)
         for scope, collection_data in bucket_data.items():
             for collection, gsi_index_names in collection_data.items():
                 for gsi_index_name in gsi_index_names:
                     count_query = "select count(*) from `%s`.`%s`.`%s` use index(`%s`) where %s is not missing" \
                                   % (bucket,
                                      scope, collection, gsi_index_name, field)
                     self.log.debug("Count query is {}".format(count_query))
                     status, content, header = indexer_rest.execute_query(
                         server=self.cluster.query_nodes[0],
                         query=count_query)
                     index_count = int(
                         json.loads(content)['results'][0]['$1'])
                     if (int(index_count) != int(totalCount)):
                         self.log.info(
                             "Expected count is {} and actual count is {}".
                             format(index_count, totalCount))
                         return False
     self.log.info("Items Indexed Verified with bucket count...")
     return True
Beispiel #2
0
 def _verify_items_count(self):
     """
     Compares Items indexed count is sample
     as items in the bucket.
     """
     indexer_rest = GsiHelper(self.cluster.index_nodes[0], self.log)
     index_map = indexer_rest.get_index_stats()
     for bucket_name in index_map.keys():
         self.log.info("Bucket: {0}".format(bucket_name))
         for index_name, index_val in index_map[bucket_name].items():
             self.log.info("Index: {0}".format(index_name))
             self.log.info("number of docs pending: {0}".format(
                 index_val["num_docs_pending"]))
             self.log.info("number of docs queued: {0}".format(
                 index_val["num_docs_queued"]))
             if index_val["num_docs_pending"] and index_val[
                     "num_docs_queued"]:
                 return False
     return True
Beispiel #3
0
 def wait_for_indexes_online(self, logger, indexes, timeout=86400):
     self.rest = GsiHelper(self.cluster.master, logger)
     status = False
     for index_name, details in indexes.items():
         stop_time = time.time() + timeout
         while time.time() < stop_time:
             bucket = [
                 bucket for bucket in self.cluster.buckets
                 if bucket.name == details[1]
             ]
             status = self.rest.polling_create_index_status(
                 bucket[0], index_name)
             print("index: {}, status: {}".format(index_name, status))
             if status is True:
                 break
             time.sleep(5)
         if status is False:
             return status
     return status
Beispiel #4
0
class DoctorN1QL():
    def __init__(self,
                 cluster,
                 bucket_util,
                 num_idx=10,
                 server_port=8095,
                 querycount=100,
                 batch_size=50):
        self.port = server_port
        self.failed_count = 0
        self.success_count = 0
        self.rejected_count = 0
        self.error_count = 0
        self.cancel_count = 0
        self.timeout_count = 0
        self.total_query_count = 0
        self.concurrent_batch_size = batch_size
        self.total_count = querycount
        self.num_indexes = num_idx
        self.bucket_util = bucket_util
        self.cluster = cluster

        self.sdkClient = SDKClient(cluster.query_nodes, None)
        self.cluster_conn = self.sdkClient.cluster
        self.stop_run = False
        self.queries = list()
        self.indexes = dict()
        i = 0
        while i < self.num_indexes:
            for b in self.cluster.buckets:
                for s in self.bucket_util.get_active_scopes(b,
                                                            only_names=True):
                    for c in sorted(
                            self.bucket_util.get_active_collections(
                                b, s, only_names=True)):
                        self.idx_q = indexes[i % len(indexes)].format(
                            "idx", i, b.name, s, c)
                        self.indexes.update(
                            {"idx" + str(i): (self.idx_q, b.name, s, c)})
                        self.queries.append(queries[i % len(indexes)].format(
                            b.name, s, c))
                        i += 1
                        if i >= self.num_indexes:
                            break
                    if i >= self.num_indexes:
                        break
                if i >= self.num_indexes:
                    break

    def discharge_N1QL(self):
        self.stop_run = True

    def create_indexes(self):
        for index in self.indexes.values():
            time.sleep(1)
            self.execute_statement_on_n1ql(index[0])

    def wait_for_indexes_online(self, logger, indexes, timeout=86400):
        self.rest = GsiHelper(self.cluster.master, logger)
        status = False
        for index_name, details in indexes.items():
            stop_time = time.time() + timeout
            while time.time() < stop_time:
                bucket = [
                    bucket for bucket in self.cluster.buckets
                    if bucket.name == details[1]
                ]
                status = self.rest.polling_create_index_status(
                    bucket[0], index_name)
                print("index: {}, status: {}".format(index_name, status))
                if status is True:
                    break
                time.sleep(5)
            if status is False:
                return status
        return status

    def build_indexes(self):
        for index, b_s_c in self.indexes.items():
            build_query = "BUILD INDEX on `%s`.`%s`.`%s`(%s) USING GSI" % (
                b_s_c[1], b_s_c[2], b_s_c[3], index)
            time.sleep(1)
            try:
                self.execute_statement_on_n1ql(build_query)
            except Exception as e:
                print(e)
                print("Failed %s" % build_query)

    def drop_indexes(self):
        for index, b_s_c in self.indexes.items():
            build_query = "DROP INDEX %s on `%s`.`%s`.`%s`" % (
                index, b_s_c[1], b_s_c[2], b_s_c[3])
            self.execute_statement_on_n1ql(build_query)

    def start_query_load(self):
        th = threading.Thread(target=self._run_concurrent_queries,
                              kwargs=dict(num_queries=self.num_indexes))
        th.start()

        monitor = threading.Thread(target=self.monitor_query_status,
                                   kwargs=dict(duration=0, print_duration=60))
        monitor.start()

    def _run_concurrent_queries(self, num_queries):
        threads = []
        self.total_query_count = 0
        query_count = 0
        for i in range(0, num_queries):
            self.total_query_count += 1
            threads.append(
                Thread(target=self._run_query,
                       name="query_thread_{0}".format(self.total_query_count),
                       args=(random.choice(self.queries), False, 0)))

        i = 0
        for thread in threads:
            i += 1
            if i % self.concurrent_batch_size == 0:
                time.sleep(5)
            thread.start()
            query_count += 1

        i = 0
        while not self.stop_run:
            threads = []
            new_queries_to_run = num_queries - self.total_count
            for i in range(0, new_queries_to_run):
                self.total_query_count += 1
                threads.append(
                    Thread(target=self._run_query,
                           name="query_thread_{0}".format(
                               self.total_query_count),
                           args=(random.choice(self.queries), False, 0)))
            i = 0
            self.total_count += new_queries_to_run
            for thread in threads:
                i += 1
                thread.start()

            time.sleep(2)
        if self.failed_count + self.error_count != 0:
            raise Exception("Queries Failed:%s , Queries Error Out:%s" %
                            (self.failed_count, self.error_count))

    def _run_query(self, query, validate_item_count=False, expected_count=0):
        name = threading.currentThread().getName()
        client_context_id = name
        try:
            status, _, _, results, _ = self.execute_statement_on_n1ql(
                query, client_context_id=client_context_id)
            if status == QueryStatus.SUCCESS:
                if validate_item_count:
                    if results[0]['$1'] != expected_count:
                        self.failed_count += 1
                        self.total_count -= 1
                    else:
                        self.success_count += 1
                        self.total_count -= 1
                else:
                    self.success_count += 1
                    self.total_count -= 1
            else:
                self.failed_count += 1
                self.total_count -= 1
        except Exception as e:
            if e == TimeoutException or e == AmbiguousTimeoutException:
                self.timeout_count += 1
                self.total_count -= 1
            elif e == RequestCanceledException:
                self.cancel_count += 1
                self.total_count -= 1
            elif e == CouchbaseException:
                self.rejected_count += 1
                self.total_count -= 1
            else:
                self.error_count += 1
                self.total_count -= 1

    def execute_statement_on_n1ql(self, statement, client_context_id=None):
        """
        Executes a statement on CBAS using the REST API using REST Client
        """
        try:
            response = self.execute_via_sdk(statement, False,
                                            client_context_id)

            if type(response) == str:
                response = json.loads(response)
            if "errors" in response:
                errors = response["errors"]
            else:
                errors = None

            if "results" in response:
                results = response["results"]
            else:
                results = None

            if "handle" in response:
                handle = response["handle"]
            else:
                handle = None

            if "metrics" in response:
                metrics = response["metrics"]
            else:
                metrics = None
            if "status" in response:
                status = response["status"]
            else:
                status = None
            return status, metrics, errors, results, handle

        except Exception as e:
            raise Exception(str(e))

    def execute_via_sdk(self,
                        statement,
                        readonly=False,
                        client_context_id=None):
        options = QueryOptions.queryOptions()
        options.scanConsistency(QueryScanConsistency.NOT_BOUNDED)
        options.readonly(readonly)
        if client_context_id:
            options.clientContextId(client_context_id)

        output = {}
        try:
            result = self.cluster_conn.query(statement)

            output["status"] = result.metaData().status()
            output["metrics"] = result.metaData().metrics()

            try:
                output["results"] = result.rowsAsObject()
            except:
                output["results"] = None

            if str(output['status']) == QueryStatus.FATAL:
                msg = output['errors'][0]['msg']
                if "Job requirement" in msg and "exceeds capacity" in msg:
                    raise Exception("Capacity cannot meet job requirement")
            elif output['status'] == QueryStatus.SUCCESS:
                output["errors"] = None
            else:
                raise Exception("N1QL query failed")

        except InternalServerFailureException as e:
            print(e)
            traceback.print_exc()
            raise Exception(e)
        except TimeoutException | AmbiguousTimeoutException as e:
            raise Exception(e)
        except RequestCanceledException as e:
            raise Exception(e)
        except CouchbaseException as e:
            raise Exception(e)
        except Exception as e:
            print(e)
            traceback.print_exc()
        return output

    def monitor_query_status(self, duration=0, print_duration=600):
        st_time = time.time()
        update_time = time.time()
        if duration == 0:
            while not self.stop_run:
                if st_time + print_duration < time.time():
                    print("%s N1QL queries submitted, %s failed, \
                        %s passed, %s rejected, \
                        %s cancelled, %s timeout, %s errored" %
                          (self.total_query_count, self.failed_count,
                           self.success_count, self.rejected_count,
                           self.cancel_count, self.timeout_count,
                           self.error_count))
                    st_time = time.time()
        else:
            while st_time + duration > time.time():
                if update_time + print_duration < time.time():
                    print("%s N1QL queries submitted, %s failed, \
                        %s passed, %s rejected, \
                        %s cancelled, %s timeout, %s errored" %
                          (self.total_query_count, self.failed_count,
                           self.success_count, self.rejected_count,
                           self.cancel_count, self.timeout_count,
                           self.error_count))
                    update_time = time.time()

    def crash_index_plasma(self, nodes=None):
        self.crash_count = 0
        if not nodes:
            nodes = self.cluster.index_nodes
        shells = list()
        for node in nodes:
            shells.append(RemoteMachineShellConnection(node))
        while not self.stop_run:
            sleep = random.randint(120, 240)
            self.sleep(
                sleep,
                "Iteration:{} waiting to kill indexer on nodes: {}".format(
                    self.crash_count, nodes))
            for shell in shells:
                shell.kill_indexer()
            self.crash_count += 1
            if self.crash_count > self.crashes:
                break
        for shell in shells:
            shell.disconnect()
        self.sleep(300)
    def test_upgrade(self):
        self.log.info("Upgrading cluster nodes to target version")
        self.index_replicas = self.input.param("index_replicas", 0)
        self.index_count = self.input.param("index_count", 1)
        major_version = float(self.initial_version[:3])
        self.log.info("major version is {}".format(major_version))
        rest = RestConnection(self.cluster.master)
        # Update RAM quota allocated to buckets created before upgrade
        cluster_info = rest.get_nodes_self()
        kv_quota = \
            cluster_info.__getattribute__(CbServer.Settings.KV_MEM_QUOTA)
        bucket_size = kv_quota // (self.input.param("num_buckets", 1) + 1)
        for bucket in self.cluster.buckets:
            self.bucket_util.update_bucket_property(
                self.cluster.master, bucket, bucket_size)
        self.log.info("Creating new buckets with scopes and collections")
        for i in range(1, self.input.param("num_buckets", 1) + 1):
            self.bucket_util.create_default_bucket(
                self.cluster,
                replica=self.num_replicas,
                compression_mode=self.compression_mode,
                ram_quota=bucket_size,
                bucket_type=self.bucket_type,
                storage=self.bucket_storage,
                eviction_policy=self.bucket_eviction_policy,
                bucket_durability=self.bucket_durability_level,
                bucket_name="bucket_{0}".format(i))
        if major_version >= 7.0:
            self.over_ride_spec_params = self.input.param(
                "override_spec_params", "").split(";")
            self.doc_spec_name = self.input.param("doc_spec", "initial_load")
            self.load_data_into_buckets()
        else:
            for bucket in self.cluster.buckets[1:]:
                gen_load = doc_generator(
                    self.key, 0, self.num_items,
                    randomize_doc_size=True, randomize_value=True,
                    randomize=True)
                async_load_task = self.task.async_load_gen_docs(
                    self.cluster, bucket, gen_load,
                    DocLoading.Bucket.DocOps.CREATE,
                    active_resident_threshold=self.active_resident_threshold,
                    timeout_secs=self.sdk_timeout,
                    process_concurrency=8,
                    batch_size=500,
                    sdk_client_pool=self.sdk_client_pool)
                self.task_manager.get_task_result(async_load_task)
                # Update num_items in case of DGM run
                if self.active_resident_threshold != 100:
                    self.num_items = async_load_task.doc_index
                bucket.scopes[CbServer.default_scope].collections[
                    CbServer.default_collection].num_items = self.num_items
                # Verify doc load count
                self.bucket_util._wait_for_stats_all_buckets(
                    self.cluster, self.cluster.buckets)
                self.sleep(30, "Wait for num_items to get reflected")
                current_items = self.bucket_util.get_bucket_current_item_count(
                    self.cluster, bucket)
        field = 'body'
        self.timer = self.input.param("timer", 600)
        self.indexUtil = IndexUtils(server_task=self.task)
        rest.set_indexer_storage_mode(storageMode="plasma")
        self.cluster.index_nodes = self.cluster_util.get_nodes_from_services_map(self.cluster, service_type="index",
                                                                                 get_all_nodes=True)
        self.cluster.query_nodes = self.cluster_util.get_nodes_from_services_map(self.cluster, service_type="n1ql",
                                                                                 get_all_nodes=True)
        indexMap, createIndexTasklist = self.indexUtil.create_gsi_on_each_collection(self.cluster,
                                                                                     gsi_base_name="Emp_id_index",
                                                                                     replica=self.index_replicas,
                                                                                     defer=False,
                                                                                     number_of_indexes_per_coll=self.index_count,
                                                                                     field='emp_id', sync=False,
                                                                                     timeout=self.wait_timeout)

        for taskInstance in createIndexTasklist:
            self.task.jython_task_manager.get_task_result(taskInstance)

        indexMap, createIndexTasklist = self.indexUtil.create_gsi_on_each_collection(self.cluster,
                                                                                     gsi_base_name="Name_index",
                                                                                     replica=self.index_replicas,
                                                                                     defer=False,
                                                                                     number_of_indexes_per_coll=self.index_count,
                                                                                     field='name', sync=False,
                                                                                     timeout=self.wait_timeout)

        for taskInstance in createIndexTasklist:
            self.task.jython_task_manager.get_task_result(taskInstance)
        node_to_upgrade = self.fetch_node_to_upgrade()
        while node_to_upgrade is not None:
            self.log.info("Selected node for upgrade: %s"
                          % node_to_upgrade.ip)
            self.upgrade_function[self.upgrade_type](node_to_upgrade,
                                                     self.upgrade_version)
            self.cluster_util.print_cluster_stats(self.cluster)
            self.log.info("Changing master")
            try:
                self.cluster.update_master_using_diag_eval(
                    self.cluster.servers[0])
            except Exception:
                self.cluster.update_master_using_diag_eval(
                    self.cluster.servers[self.nodes_init - 1])
            node_to_upgrade = self.fetch_node_to_upgrade()

        self.cluster.index_nodes = self.cluster_util.get_nodes_from_services_map(self.cluster, service_type="index",
                                                                                 get_all_nodes=True)
        self.cluster.query_nodes = self.cluster_util.get_nodes_from_services_map(self.cluster, service_type="n1ql",
                                                                                 get_all_nodes=True)
        self.sweep_interval = self.input.param("sweep_interval", 120)
        rest = GsiHelper(self.cluster.index_nodes[0], self.log)
        self.moi_snapshot_interval = self.input.param("moi_snapshot_interval", 120)
        rest.set_index_settings({"indexer.settings.persisted_snapshot.moi.interval": self.moi_snapshot_interval})
        rest.set_index_settings({"indexer.plasma.mainIndex.evictSweepInterval": self.sweep_interval})
        rest.set_index_settings({"indexer.plasma.backIndex.evictSweepInterval": self.sweep_interval})
        rest.set_index_settings({"indexer.plasma.backIndex.enableInMemoryCompression": True})
        rest.set_index_settings({"indexer.plasma.mainIndex.enableInMemoryCompression": True})
        rest.set_index_settings({"indexer.plasma.backIndex.enableCompressDuringBurst": True})
        rest.set_index_settings({"indexer.plasma.mainIndex.enableCompressDuringBurst": True})
        self.sleep(2 * self.sweep_interval, "Waiting for items to compress")
        self.check_compression_stat(self.cluster.index_nodes)
Beispiel #6
0
    def test_memory_usage_without_index_nodes(self):
        indexDict = dict()

        self.cluster_util.print_cluster_stats()
        self.num_replicas = self.input.param("num_replicas", 1)
        self.log.info("Starting upsert test")
        i = 0
        query_nodes_list = self.cluster_util.get_nodes_from_services_map(
            service_type="n1ql", get_all_nodes=True)
        indexer_nodes_list = self.cluster_util.get_nodes_from_services_map(
            service_type="index", get_all_nodes=True)
        indexer_rest = GsiHelper(self.cluster.master, self.log)

        rest = RestConnection(self.cluster.master)
        cluster_stat = rest.get_cluster_stats()

        index_Mem_Map = dict()
        for indexNode in indexer_nodes_list:
            index_Mem_Map[indexNode.ip +
                          ":8091"] = cluster_stat[indexNode.ip +
                                                  ":8091"]['mem_free']

        self.cluster_util.print_cluster_stats()

        self.index_count = self.input.param("index_count", 2)
        index_list = list()
        listCounter = 0
        for bucket in self.cluster.buckets:
            for scope_name in bucket.scopes.keys():
                for collection in bucket.scopes[scope_name].collections.keys():
                    index_list_instance = list()
                    for i in range(self.index_count):
                        indexName = "Index" + str(listCounter) + str(i)
                        index_query = "CREATE INDEX `%s` ON `%s`.`%s`.`%s`(`body`) Using GSI with {\"defer_build\":True}" % (
                            indexName, bucket.name, scope_name, collection)
                        self.query_client = RestConnection(query_nodes_list[0])
                        indexDict[indexName] = index_query
                        self.query_client.query_tool(index_query)
                        index_list_instance.append(indexName)
                    index_list.append(index_list_instance)
                    listCounter += 1

        i = 0

        for bucket in self.cluster.buckets:
            for scope_name in bucket.scopes.keys():
                for collection in bucket.scopes[scope_name].collections.keys():
                    index_query = "Build INDEX  ON `%s`.`%s`.`%s` (%s) USING GSI" % (
                        bucket.name, scope_name, collection, index_list[i])
                    self.query_client = RestConnection(query_nodes_list[0])
                    self.query_client.query_tool(index_query)
                    i = i + 1

        for index_list_instance in index_list:
            for indexName in index_list_instance:
                indexer_rest.polling_create_index_status(
                    bucket=self.buckets[0], index=indexName)
        self.bucket_util.delete_all_buckets(self.cluster)
        nodes_out = indexer_nodes_list[1:]
        result = self.task.rebalance([self.cluster.master],
                                     to_add=[],
                                     to_remove=nodes_out)

        self.cluster_util.print_cluster_stats()
        self.time_counter = self.input.param("time_counter", 300)
        timer = 0
        flag = False
        for item, itemValue in index_Mem_Map.items():
            status = self.isServerListContainsNode(serverList=nodes_out,
                                                   ip=item)
            while (status == False):
                flag = False
                cluster_stat = rest.get_cluster_stats()
                expectedValue = int(cluster_stat[item]['mem_free'])
                inputValue = int(itemValue)
                if (.9 * inputValue <= expectedValue):
                    flag = True
                    break
                timer += 1
                self.sleep(1)
                # self.log.info("Initial memory is {0} recent memory is {1}".format(str(inputValue), str(expectedValue)))
                if timer > self.time_counter:
                    break

        self.log.info("Timer value is:" + str(timer))
        self.assertTrue(flag, "Memory not recored")
        print("stop here")
Beispiel #7
0
    def test_plasma_stats(self):
        indexDict = dict()

        self.cluster_util.print_cluster_stats()
        self.num_replicas = self.input.param("num_replicas", 1)
        self.log.info("Starting upsert test")
        i = 0
        query_nodes_list = self.cluster_util.get_nodes_from_services_map(
            service_type="n1ql", get_all_nodes=True)
        indexer_nodes_list = self.cluster_util.get_nodes_from_services_map(
            service_type="index", get_all_nodes=True)
        indexer_rest = GsiHelper(self.cluster.master, self.log)

        rest = RestConnection(self.cluster.master)
        cluster_stat = rest.get_cluster_stats()

        index_Mem_Map = dict()
        for indexNode in indexer_nodes_list:
            index_Mem_Map[indexNode.ip +
                          ":8091"] = cluster_stat[indexNode.ip +
                                                  ":8091"]['mem_free']

        initialPlasmaStats = indexer_rest.get_plasma_stats(
            nodes_list=indexer_nodes_list)

        bucket_Index_key = self.buckets[0].name + ":" + "initial_idx"
        self.assertEqual(initialPlasmaStats[bucket_Index_key + '_items_count'],
                         self.num_items, "Count is expected")
        self.assertEqual(initialPlasmaStats[bucket_Index_key + '_inserts'],
                         self.num_items, "insert count is not expected")

        self.print_plasma_stats(plasmaDict=initialPlasmaStats,
                                bucket=self.buckets[0],
                                indexname="initial_idx")

        self.index_count = self.input.param("index_count", 5)

        for i in range(self.index_count):
            indexName = "Index" + str(i)
            index_query = "CREATE INDEX `%s` ON `%s`(`body`) with {\"num_replica\":%s}" % (
                indexName, self.buckets[0].name, self.num_replicas)
            self.query_client = RestConnection(query_nodes_list[0])
            indexDict[indexName] = index_query
            self.query_client.query_tool(index_query)
            result = indexer_rest.polling_create_index_status(
                bucket=self.buckets[0], index=indexName)
            self.log.info("Status is:" + str(result))

        interMediatePlasmaStats = indexer_rest.get_plasma_stats(
            nodes_list=indexer_nodes_list)

        for indexName in indexDict.items():
            bucket_Index_key = self.buckets[0].name + ":" + indexName[0]
            self.assertEqual(
                interMediatePlasmaStats[bucket_Index_key + '_items_count'],
                self.num_items, "Count is expected")
            self.assertEqual(
                interMediatePlasmaStats[bucket_Index_key + '_inserts'],
                self.num_items, "insert count is not expected")
            self.print_plasma_stats(plasmaDict=interMediatePlasmaStats,
                                    bucket=self.buckets[0],
                                    indexname=indexName[0])

        self.items_add = self.input.param("items_add", 1000000)
        start = self.num_items
        end = self.num_items + self.items_add
        initial_load = doc_generator(self.key,
                                     start,
                                     end,
                                     doc_size=self.doc_size)
        insertTask = self.task.async_load_gen_docs(
            self.cluster,
            self.cluster.buckets[0],
            initial_load,
            DocLoading.Bucket.DocOps.CREATE,
            0,
            batch_size=100,
            process_concurrency=8,
            compression=self.sdk_compression,
            timeout_secs=self.sdk_timeout,
            sdk_client_pool=self.sdk_client_pool)

        for indexName in indexDict.items():
            drop_query = "Drop INDEX `%s`.`%s` Using GSI" % (
                self.buckets[0].name, indexName[0])
            self.query_client = RestConnection(query_nodes_list[0])
            self.query_client.query_tool(drop_query)
            indexer_rest.polling_delete_index(bucket=self.buckets[0],
                                              index=indexName)

        p_stats_with_Five_index = indexer_rest.get_plasma_stats(
            nodes_list=indexer_nodes_list)

        for indexName in indexDict.items():
            bucket_Index_key = self.buckets[0].name + ":" + indexName[0]
            self.assertTrue(
                p_stats_with_Five_index.get(bucket_Index_key + "_memory_size")
                is None, "Dropped index are still present in the plams stats")

        indexName = "initial_idx"
        self.print_plasma_stats(plasmaDict=p_stats_with_Five_index,
                                bucket=self.buckets[0],
                                indexname=indexName)
        bucket_Index_key = self.buckets[0].name + ":" + indexName
        self.log.info(
            "item count is {0} insert count is {1} expected item count {2}".
            format(
                str(p_stats_with_Five_index[bucket_Index_key +
                                            '_items_count']),
                str(p_stats_with_Five_index[bucket_Index_key + '_inserts']),
                str(self.num_items)))
        self.assertTrue(
            int(p_stats_with_Five_index[bucket_Index_key + '_items_count']) >=
            int(self.num_items),
            "Expected item count is {0} but actual value is {1}".format(
                str(p_stats_with_Five_index[bucket_Index_key +
                                            '_items_count']),
                str(self.num_items)))
        self.assertTrue(
            int(p_stats_with_Five_index[bucket_Index_key + '_inserts']) >= int(
                self.num_items),
            "Expected insert count is {0} but actual count is {1}".format(
                str(p_stats_with_Five_index[bucket_Index_key + '_inserts']),
                str(self.num_items)))
        self.cluster_util.print_cluster_stats()
        self.task_manager.stop_task(insertTask)
        self.time_counter = self.input.param("time_counter", 300)

        timer = 0
        flag = False
        for item, itemValue in index_Mem_Map.items():
            flag = False
            while (True):
                cluster_stat = rest.get_cluster_stats()
                expectedValue = int(cluster_stat[item]['mem_free'])
                inputValue = int(itemValue)
                if (.9 * inputValue <= expectedValue):
                    flag = True
                    break
                timer += 1
                self.sleep(1)
                # self.log.info("Initial memory is {0} recent memory is {1}".format(str(inputValue), str(expectedValue)))
                if timer > self.time_counter:
                    break

        self.log.info("Timer value is:" + str(timer))
        self.assertTrue(flag, "Memory not recored")
Beispiel #8
0
    def test_item_count_stats(self):
        query_nodes_list = self.cluster_util.get_nodes_from_services_map(
            service_type="n1ql", get_all_nodes=True)
        self.log.info("Starting test")
        i = 0
        indexDict = dict()
        self.index_count = self.input.param("index_count", 11)
        for i in range(self.index_count):
            indexName = "Index0" + str(i)
            index_query = "CREATE INDEX `%s` ON `%s`(`body`)" % (
                indexName, self.buckets[0].name)
            self.query_client = RestConnection(query_nodes_list[0])
            indexDict[indexName] = index_query
            result = self.query_client.query_tool(index_query)
            self.assertTrue(result["status"] == "success",
                            "Index query failed!")

        indexer_rest = GsiHelper(self.cluster.master, self.log)
        indexStatMap = dict()

        counter = 0
        for server in self.cluster.servers:
            server.services = self.services[counter]
            counter = counter + 1
        total_count = 0
        ipIndexDict = dict()
        for server in self.cluster.servers:
            if server.services.find("index") != -1:
                generic_url = "http://%s:%s/"
                ip = server.ip
                port = "9102"
                baseURL = generic_url % (ip, port)
                self.log.info(
                    "Try to get index from URL as {}".format(baseURL))
                indexStatMap = indexer_rest.get_index_stats(URL=baseURL)

                self.log.info(
                    "Memory quota is {} and Memory used is {} for baseURL".
                    format(str(indexStatMap['memory_quota']),
                           str(indexStatMap['memory_used']), str(baseURL)))
                total_count = 0
                if indexStatMap.has_key(self.buckets[0].name):
                    for key in indexStatMap[self.buckets[0].name]:
                        ipIndexDict[key] = ip + ":" + self.query_client.port
                        self.assertEqual(
                            indexStatMap[self.buckets[0].name][key]
                            ['num_docs_indexed'], self.num_items,
                            "Expected no of items are not coming")
                        self.log.info("Data size for index {} is:{}".format(
                            key,
                            str(indexStatMap[self.buckets[0].name][key]
                                ['data_size'])))
                        self.log.info("Disk size for index:{} is:{}".format(
                            key,
                            str(indexStatMap[self.buckets[0].name][key]
                                ['disk_size'])))

                        self.log.info(
                            "Fragmentation for index {} is {}".format(
                                key,
                                str(indexStatMap[self.buckets[0].name][key]
                                    ['frag_percent'])))

                        for dist in dict(
                                indexStatMap[self.buckets[0].name][key]
                            ['key_size_distribution']).items():
                            total_count = total_count + dist[1]

                        self.assertEqual(
                            total_count, self.num_items,
                            "Expected total no of items are {} in key size distribution list But actual is {}"
                            .format(str(total_count), str(self.num_items)))
                        total_count = 0
        for x in range(5):
            result = indexer_rest.index_status()
            if result[self.buckets[0].name].has_key(indexDict.items()[0][0]):
                break
            self.sleep(1)

        # Validating the Definition associated with the indexes
        self.log.info("Comparing the actual index list with expected one")
        for key in indexDict:
            self.assertEqual(
                result[self.buckets[0].name][key]['definition'],
                indexDict[key],
                "Index queries is:{} and definition is :{}".format(
                    result[self.buckets[0].name][key]['definition'],
                    indexDict[key]))
            self.assertEqual(
                result[self.buckets[0].name][key]['hosts'], (ipIndexDict[key]),
                "Actual hosts values is: {} and expected host value is: {}".
                format(result[self.buckets[0].name][key]['hosts'],
                       ipIndexDict[key]))
Beispiel #9
0
    def test_index_upsert_ops(self):
        indexDict = dict()
        indexTask_list = list()
        count = 0

        self.num_replicas = self.input.param("num_replicas", 1)
        self.log.info("Starting upsert test")
        i = 0
        query_nodes_list = self.cluster_util.get_nodes_from_services_map(
            service_type="n1ql", get_all_nodes=True)

        indexer_rest = GsiHelper(self.cluster.master, self.log)
        self.index_count = self.input.param("index_count", 1)
        query_len = len(query_nodes_list)

        for i in range(self.index_count):
            indexName = "Index" + str(i)
            index_query = "CREATE INDEX `%s` ON `%s`(`body`) with {\"num_replica\":%s}" % (
                indexName, self.buckets[0].name, self.num_replicas)
            instance = i % query_len
            self.query_client = RestConnection(query_nodes_list[instance])
            indexDict[indexName] = index_query
            self.query_client.query_tool(index_query)
            result = indexer_rest.polling_create_index_status(
                bucket=self.buckets[0], index=indexName)
            # self.assertTrue(result, "Index query failed!")
            self.log.info("Status is:" + str(result))

        # Run 20 queries
        tasks_info = list()
        queryList = list()
        contentType = 'application/x-www-form-urlencoded'
        connection = 'keep-alive'
        count = self.index_count
        for x in range(20):
            index_instance = x % count
            queryString = self.randStr(Num=4)
            query = "select * from `%s` data USE INDEX (%s USING GSI) where body like '%%%s%%' limit 10" % (
                self.buckets[0].name, indexDict.keys()[index_instance],
                queryString)
            task = self.task.aysnc_execute_query(query_nodes_list[0], query,
                                                 contentType, connection)
            tasks_info.append(task)

        for taskInstance in tasks_info:
            self.task.jython_task_manager.get_task_result(taskInstance)

        self.items_add = self.input.param("items_add", 1000000)
        start = self.num_items
        end = self.num_items + self.items_add
        initial_load = doc_generator(self.key,
                                     start,
                                     end,
                                     doc_size=self.doc_size)
        insertTask = self.task.async_load_gen_docs(
            self.cluster,
            self.cluster.buckets[0],
            initial_load,
            DocLoading.Bucket.DocOps.CREATE,
            0,
            batch_size=100,
            process_concurrency=8,
            compression=self.sdk_compression,
            timeout_secs=self.sdk_timeout,
            sdk_client_pool=self.sdk_client_pool)

        # Add 2 more index nodes to the cluster
        self.nodes_in = self.input.param("nodes_in", 2)
        count = len(self.dcp_services) + self.nodes_init
        nodes_in = self.cluster.servers[count:count + self.nodes_in]
        services = ["index", "n1ql"]
        result = self.task.rebalance([self.cluster.master],
                                     nodes_in, [],
                                     services=services)

        # Create 5 index in parallel
        count = count + 5
        counter = self.index_count
        self.iterations = self.input.param("iterations", 20)
        for counter in range(self.index_count, count):
            indexName = "Index" + str(counter)
            query = "CREATE INDEX `%s` ON `%s`(`body`) with {\"num_replica\":%s}" % (
                indexName, self.buckets[0].name, self.num_replicas)
            task = self.task.aysnc_execute_query(server=query_nodes_list[0],
                                                 query=query,
                                                 bucket=self.buckets[0],
                                                 indexName=indexName,
                                                 isIndexerQuery=True)
            indexTask_list.append(task)
            indexDict[indexName] = query
            counter += 1
        for taskInstance in indexTask_list:
            self.task.jython_task_manager.get_task_result(taskInstance)

        # Run 5  queries
        tasks_info = list()
        for x in range(5):
            query_node_index = x % len(query_nodes_list)
            index_instance = x % count
            self.log.info("Index for query node is:" + str(query_node_index))
            queryString = self.randStr(Num=8)
            query = "select * from `%s` data USE INDEX (%s USING GSI) where body like '%%%s%%' limit 10" % (
                self.buckets[0].name, indexDict.keys()[index_instance],
                queryString)
            task = self.task.aysnc_execute_query(
                query_nodes_list[query_node_index], query, contentType,
                connection)
            tasks_info.append(task)

        for taskInstance in tasks_info:
            self.task.jython_task_manager.get_task_result(taskInstance)

        new_task_info = list()
        self.log.info("Starting executing 10 queries")
        # Run 20 queries
        for x in range(20):
            query_node_index = x % len(query_nodes_list)
            index_instance = x % count
            queryString = self.randStr(Num=8)
            query = "select * from `%s` data USE INDEX (%s USING GSI) where body like '%%%s%%' limit 10" % (
                self.buckets[0].name, indexDict.keys()[index_instance],
                queryString)
            task = self.task.aysnc_execute_query(
                query_nodes_list[query_node_index], query, contentType,
                connection)
            new_task_info.append(task)

        for taskInstance in new_task_info:
            self.task.jython_task_manager.get_task_result(taskInstance)
        task_list = list()
        # Run 40 queries
        for x in range(40):
            query_node_index = x % len(query_nodes_list)
            queryString = self.randStr(Num=8)
            index_instance = x % count
            query = "select * from `%s` data USE INDEX (%s USING GSI) where body like '%%%s%%' limit 10" % (
                self.buckets[0].name, indexDict.keys()[index_instance],
                queryString)
            task = self.task.aysnc_execute_query(
                query_nodes_list[query_node_index], query, contentType,
                connection)
            task_list.append(task)

        for taskInstance in task_list:
            self.task.jython_task_manager.get_task_result(taskInstance)
        self.task_manager.stop_task(insertTask)
        self.cluster_util.print_cluster_stats()