示例#1
0
    def upgrade_all_nodes_post_463(self):
        servers_in = self.servers[1:]
        self._install(self.servers)
        self.cluster.rebalance(self.servers, servers_in, [])

        for server in self.servers:
            self.secretmgmt_base_obj.setup_pass_node(server, self.password)
            self.secretmgmt_base_obj.restart_server_with_env(
                self.master, self.password)
            temp_result = self.secretmgmt_base_obj.check_log_files(
                self.master, "/babysitter.log", "Booted")
            self.assertTrue(
                temp_result,
                "Babysitter.log does not contain node initialization code")

        upgrade_threads = self._async_update(
            upgrade_version=self.upgrade_version, servers=self.servers)
        for threads in upgrade_threads:
            threads.join()

        for server in self.servers:
            rest = RestConnection(server)
            temp = rest.cluster_status()
            self.log.info("Initial status of {0} cluster is {1}".format(
                server.ip, temp['nodes'][0]['status']))
            while (temp['nodes'][0]['status'] == 'warmup'):
                self.log.info("Waiting for cluster to become healthy")
                self.sleep(5)
                temp = rest.cluster_status()
            self.log.info("current status of {0}  is {1}".format(
                server.ip, temp['nodes'][0]['status']))
示例#2
0
 def setUp(self):
     super(XDCRTests, self).setUp()
     self.bucket = Bucket()
     self._initialize_nodes()
     self.master = self.servers[0]
     for server in self.servers:
         rest=RestConnection(server)
         cluster_status = rest.cluster_status()
         self.log.info("Initial status of {0} cluster is {1}".format(server.ip,
                                                                     cluster_status['nodes'][0]['status']))
         while cluster_status['nodes'][0]['status'] == 'warmup':
             self.log.info("Waiting for cluster to become healthy")
             self.sleep(5)
             cluster_status = rest.cluster_status()
         self.log.info("current status of {0}  is {1}".format(server.ip,
                                                              cluster_status['nodes'][0]['status']))
     # Delete all buckets before creating new buckets
     self.log.info("Deleting all existing buckets")
     BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
     self.log.info("Creating new buckets")
     src_bucket = self.input.param('src_bucket', self.bucket)
     dest_bucket = self.input.param('dest_bucket', self.bucket)
     if src_bucket:
         RestConnection(self.servers[0]).create_bucket(bucket='default', ramQuotaMB=500)
     if dest_bucket:
         RestConnection(self.servers[1]).create_bucket(bucket='default', ramQuotaMB=500)
     helper = BaseHelper(self)
     helper.login()
示例#3
0
 def setUp(self):
     super(XDCRTests, self).setUp()
     self.bucket = Bucket()
     self._initialize_nodes()
     self.master = self.servers[0]
     for server in self.servers:
         rest = RestConnection(server)
         cluster_status = rest.cluster_status()
         self.log.info("Initial status of {0} cluster is {1}".format(
             server.ip, cluster_status['nodes'][0]['status']))
         while cluster_status['nodes'][0]['status'] == 'warmup':
             self.log.info("Waiting for cluster to become healthy")
             self.sleep(5)
             cluster_status = rest.cluster_status()
         self.log.info("current status of {0}  is {1}".format(
             server.ip, cluster_status['nodes'][0]['status']))
     # Delete all buckets before creating new buckets
     self.log.info("Deleting all existing buckets")
     BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
     self.log.info("Creating new buckets")
     src_bucket = self.input.param('src_bucket', self.bucket)
     dest_bucket = self.input.param('dest_bucket', self.bucket)
     if src_bucket:
         RestConnection(self.servers[0]).create_bucket(bucket='default',
                                                       ramQuotaMB=500)
     if dest_bucket:
         RestConnection(self.servers[1]).create_bucket(bucket='default',
                                                       ramQuotaMB=500)
     helper = BaseHelper(self)
     helper.login()
示例#4
0
    def wait_for_warmup_or_assert(master, warmup_count, timeout, testcase):
        time_start = time.time()
        time_max_end = time_start + timeout
        bucket_name = testcase.rest.get_buckets()[0].name
        while time.time() < time_max_end:
            num_nodes_with_warmup = 0
            for node in testcase.rest.get_bucket(bucket_name).nodes:
                if node.status == 'warmup':
                    num_nodes_with_warmup += 1
            if num_nodes_with_warmup == warmup_count:
                testcase.log.info("{0} nodes warmup as expected".format(
                    num_nodes_with_warmup))
                testcase.log.info("expected warmup in {0} seconds, actual time {1} seconds".format \
                                      (timeout - AutoReprovisionBaseTest.MAX_FAIL_DETECT_TIME,
                                       time.time() - time_start))
                return
            time.sleep(2)

        rest = RestConnection(master)
        rest.print_UI_logs()
        testcase.log.warn("pools/default from {0} : {1}".format(
            master.ip, rest.cluster_status()))
        testcase.fail("{0} nodes warmup, expected {1} in {2} seconds".format(
            num_nodes_with_warmup, warmup_count,
            time.time() - time_start))
示例#5
0
    def wait_for_failover_or_assert(master, autofailover_count, timeout,
                                    testcase):
        time_start = time.time()
        time_max_end = time_start + timeout
        failover_count = 0
        while time.time() < time_max_end:
            failover_count = AutoFailoverBaseTest.get_failover_count(master)
            if failover_count == autofailover_count:
                testcase.log.info(
                    "{0} nodes failed over as expected".format(failover_count))
                testcase.log.info("expected failover in {0} seconds, actual time {1} seconds".format\
                              (timeout - AutoFailoverBaseTest.MAX_FAIL_DETECT_TIME, time.time() - time_start))
                return
            time.sleep(2)

        rest = RestConnection(master)
        testcase.log.info("Latest logs from UI:")
        for i in rest.get_logs():
            testcase.log.error(i)
        testcase.log.warn("pools/default from {0} : {1}".format(
            master.ip, rest.cluster_status()))
        testcase.fail(
            "{0} nodes failed over, expected {1} in {2} seconds".format(
                failover_count, autofailover_count,
                time.time() - time_start))
示例#6
0
 def get_indexer_mem_quota(self, indexer_node):
     """
     Get Indexer memory Quota
     :param indexer_node:
     """
     rest = RestConnection(indexer_node)
     content = rest.cluster_status()
     return int(content['indexMemoryQuota'])
 def get_failover_count(self):
     rest = RestConnection(self.master)
     cluster_status = rest.cluster_status()
     failover_count = 0
     # check for inactiveFailed
     for node in cluster_status['nodes']:
         if node['clusterMembership'] == "inactiveFailed":
             failover_count += 1
     return failover_count
示例#8
0
    def upgrade_half_nodes(self):
        serv_upgrade = self.servers[2:4]
        servers_in = self.servers[1:]
        self._install(self.servers)
        self.cluster.rebalance(self.servers, servers_in, [])

        upgrade_threads = self._async_update(upgrade_version=self.upgrade_version, servers=serv_upgrade)
        for threads in upgrade_threads:
            threads.join()

        for server in serv_upgrade:
            rest = RestConnection(server)
            temp = rest.cluster_status()
            self.log.info("Initial status of {0} cluster is {1}".format(server.ip, temp['nodes'][0]['status']))
            while (temp['nodes'][0]['status'] == 'warmup'):
                self.log.info("Waiting for cluster to become healthy")
                self.sleep(5)
                temp = rest.cluster_status()
            self.log.info("current status of {0}  is {1}".format(server.ip, temp['nodes'][0]['status']))
 def get_indexer_mem_quota(self):
     """
     Sets Indexer memory Quota
     :param memQuota:
     :return:
     int indexer memory quota
     """
     rest = RestConnection(self.oomServer)
     content = rest.cluster_status()
     return int(content['indexMemoryQuota'])
示例#10
0
 def get_indexer_mem_quota(self):
     """
     Sets Indexer memory Quota
     :param memQuota:
     :return:
     int indexer memory quota
     """
     rest = RestConnection(self.oomServer)
     content = rest.cluster_status()
     return int(content['indexMemoryQuota'])
示例#11
0
    def get_failover_count(master):
        rest = RestConnection(master)
        cluster_status = rest.cluster_status()

        failover_count = 0
        # check for inactiveFailed
        for node in cluster_status['nodes']:
            if node['clusterMembership'] == "inactiveFailed":
                failover_count += 1

        return failover_count
示例#12
0
    def get_failover_count(master):
        rest = RestConnection(master)
        cluster_status = rest.cluster_status()

        failover_count = 0
        # check for inactiveFailed
        for node in cluster_status['nodes']:
            log.info("'clusterMembership' for node {0} is {1}".format(node["otpNode"], node['status']))
            if node['status'] == "unhealthy":
                failover_count += 1

        return failover_count
    def get_failover_count(self, master):
        rest = RestConnection(master)
        cluster_status = rest.cluster_status()

        failover_count = 0
        # check for inactiveFailed
        for node in cluster_status['nodes']:
            self.log.info("'clusterMembership' for node {0} is {1}".format(node["otpNode"], node['clusterMembership']))
            if node['clusterMembership'] == "inactiveFailed":
                failover_count += 1

        return failover_count
示例#14
0
    def get_failover_count(master):
        rest = RestConnection(master)
        cluster_status = rest.cluster_status()
        log = logger.Logger.get_logger()

        failover_count = 0
        # check for inactiveFailed
        for node in cluster_status["nodes"]:
            log.info("'clusterMembership' for node {0} is {1}".format(node["otpNode"], node["clusterMembership"]))
            if node["clusterMembership"] == "inactiveFailed":
                failover_count += 1

        return failover_count
示例#15
0
    def wait_for_failover_or_assert(master, autofailover_count, age,  testcase):
        testcase.log.info("waiting for {0} seconds for autofailover".format((age + 30)))
        time.sleep(age + 30)

        rest = RestConnection(master)
        cluster_status = rest.cluster_status()

        failover_count = 0
        # check for inactiveFailed
        for node in cluster_status['nodes']:
            testcase.log.info("{0} is in state {1} and {2}".format(node['hostname'],node['status'],node['clusterMembership']))
            if node['clusterMembership'] == "inactiveFailed":
                failover_count += 1

        testcase.assertTrue(failover_count == autofailover_count, "{0} nodes failed over, expected {1}".format(failover_count, autofailover_count))
    def wait_for_failover_or_assert(self, master, autofailover_count, timeout):
        time_start = time.time()
        time_max_end = time_start + 300
        failover_count = 0
        while time.time() < time_max_end:
            failover_count = self.get_failover_count(master)
            if failover_count == autofailover_count:
                break
            self.sleep(30)

        if failover_count != autofailover_count:
            rest = RestConnection(master)
            self.log.warn("pools/default from {0} : {1}".format(master.ip, rest.cluster_status()))
            self.fail("{0} node(s) failed over, expected {1} in {2} seconds".
                            format(failover_count, autofailover_count, time.time() - time_start))
        else:
            self.log.info("{0} node(s) failed over as expected".format(failover_count))
示例#17
0
    def wait_for_failover_or_assert(self, master, autofailover_count, timeout):
        time_start = time.time()
        time_max_end = time_start + 300
        failover_count = 0
        while time.time() < time_max_end:
            failover_count = self.get_failover_count(master)
            if failover_count == autofailover_count:
                break
            self.sleep(30)

        if failover_count != autofailover_count:
            rest = RestConnection(master)
            self.log.warn("pools/default from {0} : {1}".format(master.ip, rest.cluster_status()))
            self.fail("{0} node(s) failed over, expected {1} in {2} seconds".
                            format(failover_count, autofailover_count, time.time() - time_start))
        else:
            self.log.info("{0} node(s) failed over as expected".format(failover_count))
示例#18
0
文件: task.py 项目: jchris/testrunner
    def _get_current_auto_compaction_percentage(self):
        """ check at bucket level and cluster level for compaction percentage """

        auto_compact_percentage = None
        rest = RestConnection(self.server)

        content = rest.get_bucket_json(self.bucket)
        if content["autoCompactionSettings"] != False:
            auto_compact_percentage =\
                content["autoCompactionSettings"]["viewFragmentationThreshold"]["percentage"]
        else:
            # try to read cluster level compaction settings
            content = rest.cluster_status()
            auto_compact_percentage =\
                content["autoCompactionSettings"]["viewFragmentationThreshold"]["percentage"]

        return auto_compact_percentage
示例#19
0
    def wait_for_failover_or_assert(master, autofailover_count, timeout, testcase):
        time_start = time.time()
        time_max_end = time_start + timeout
        failover_count = 0
        while time.time() < time_max_end:
            failover_count = AutoFailoverBaseTest.get_failover_count(master)
            if failover_count == autofailover_count:
                testcase.log.info("{0} nodes failed over as expected".format(failover_count))
                testcase.log.info("expected failover in {0} seconds, actual time {1} seconds".format\
                              (timeout - AutoFailoverBaseTest.MAX_FAIL_DETECT_TIME, time.time() - time_start))
                return
            time.sleep(2)

        rest = RestConnection(master)
        rest.print_UI_logs()
        testcase.log.warn("pools/default from {0} : {1}".format(master.ip, rest.cluster_status()))
        testcase.fail("{0} nodes failed over, expected {1} in {2} seconds".
                         format(failover_count, autofailover_count, time.time() - time_start))
示例#20
0
文件: cluster.py 项目: umang-cb/TAF
    def scenario_failover_node_delta_recovery(self, kwargs):
        services = kwargs.get("services")
        nodes_in_failed_state = list()

        rest = RestConnection(self.cluster.master)
        nodes_status = rest.cluster_status()["nodes"]
        for node in nodes_status:
            if node["clusterMembership"] == "inactiveFailed":
                nodes_in_failed_state.append(node["otpNode"].split("@")[1])

        self.log.info("Nodes in failover state: %s" % nodes_in_failed_state)
        if len(services) > len(nodes_in_failed_state):
            self.log.warning(
                "Failover nodes '%s' < '%s' expected node to recover" %
                (len(nodes_in_failed_state), len(services)))
        for index, service in enumerate(services):
            rest.set_recovery_type(
                otpNode="ns_1@" + nodes_in_failed_state[index],
                recoveryType=CbServer.Failover.RecoveryType.DELTA)
    def wait_for_failover_or_assert(master, autofailover_count, timeout, testcase):
        time_start = time.time()
        time_max_end = time_start + timeout + 60
        failover_count = 0
        while time.time() < time_max_end:
            failover_count = AutoFailoverBaseTest.get_failover_count(master)
            if failover_count == autofailover_count:
                break
            time.sleep(2)

        if failover_count != autofailover_count:
            rest = RestConnection(master)
            testcase.log.info("Latest logs from UI:")
            for i in rest.get_logs(): testcase.log.error(i)
            testcase.log.warn("pools/default from {0} : {1}".format(master.ip, rest.cluster_status()))
            testcase.fail("{0} nodes failed over, expected {1} in {2} seconds".
                            format(failover_count, autofailover_count, time.time() - time_start))
        else:
            testcase.log.info("{O} nodes failed over as expected")
示例#22
0
    def wait_for_warmup_or_assert(master, warmup_count, timeout, testcase):
        time_start = time.time()
        time_max_end = time_start + timeout
        bucket_name = testcase.rest.get_buckets()[0].name
        while time.time() < time_max_end:
            num_nodes_with_warmup = 0
            for node in testcase.rest.get_bucket(bucket_name).nodes:
                if node.status == 'warmup':
                    num_nodes_with_warmup += 1
            if num_nodes_with_warmup == warmup_count:
                testcase.log.info("{0} nodes warmup as expected".format(num_nodes_with_warmup))
                testcase.log.info("expected warmup in {0} seconds, actual time {1} seconds".format \
                                      (timeout - AutoReprovisionBaseTest.MAX_FAIL_DETECT_TIME,
                                       time.time() - time_start))
                return
            time.sleep(2)

        rest = RestConnection(master)
        rest.print_UI_logs()
        testcase.log.warn("pools/default from {0} : {1}".format(master.ip, rest.cluster_status()))
        testcase.fail("{0} nodes warmup, expected {1} in {2} seconds".
                      format(num_nodes_with_warmup, warmup_count, time.time() - time_start))
示例#23
0
    def testSettingCompacttion(self):
        '''setting-compacttion OPTIONS:
        --compaction-db-percentage=PERCENTAGE     at which point database compaction is triggered
        --compaction-db-size=SIZE[MB]             at which point database compaction is triggered
        --compaction-view-percentage=PERCENTAGE   at which point view compaction is triggered
        --compaction-view-size=SIZE[MB]           at which point view compaction is triggered
        --compaction-period-from=HH:MM            allow compaction time period from
        --compaction-period-to=HH:MM              allow compaction time period to
        --enable-compaction-abort=[0|1]           allow compaction abort when time expires
        --enable-compaction-parallel=[0|1]        allow parallel compaction for database and view'''
        compaction_db_percentage = self.input.param("compaction-db-percentage", None)
        compaction_db_size = self.input.param("compaction-db-size", None)
        compaction_view_percentage = self.input.param("compaction-view-percentage", None)
        compaction_view_size = self.input.param("compaction-view-size", None)
        compaction_period_from = self.input.param("compaction-period-from", None)
        compaction_period_to = self.input.param("compaction-period-to", None)
        enable_compaction_abort = self.input.param("enable-compaction-abort", None)
        enable_compaction_parallel = self.input.param("enable-compaction-parallel", None)
        bucket = self.input.param("bucket", "default")
        output = self.input.param("output", '')
        rest = RestConnection(self.master)
        remote_client = RemoteMachineShellConnection(self.master)
        self.testBucketCreation()
        cli_command = "setting-compacttion"
        options = "--bucket={0}".format(bucket)
        options += (" --compaction-db-percentage={0}".format(compaction_db_percentage), "")[compaction_db_percentage is None]
        options += (" --compaction-db-size={0}".format(compaction_db_size), "")[compaction_db_size is None]
        options += (" --compaction-view-percentage={0}".format(compaction_view_percentage), "")[compaction_view_percentage is None]
        options += (" --compaction-view-size={0}".format(compaction_view_size), "")[compaction_view_size is None]
        options += (" --compaction-period-from={0}".format(compaction_period_from), "")[compaction_period_from is None]
        options += (" --compaction-period-to={0}".format(compaction_period_to), "")[compaction_period_to is None]
        options += (" --enable-compaction-abort={0}".format(enable_compaction_abort), "")[enable_compaction_abort is None]
        options += (" --enable-compaction-parallel={0}".format(enable_compaction_parallel), "")[enable_compaction_parallel is None]

        output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user="******", password="******")
        self.assertEqual(output, ['SUCCESS: bucket-edit'])
        cluster_status = rest.cluster_status()
        remote_client.disconnect()
示例#24
0
    def offline(self, node_to_upgrade, version, rebalance_required=False):
        rest = RestConnection(node_to_upgrade)
        shell = RemoteMachineShellConnection(node_to_upgrade)
        appropriate_build = self.__get_build(version, shell)
        self.assertTrue(appropriate_build.url,
                        msg="Unable to find build %s" % version)
        self.assertTrue(shell.download_build(appropriate_build),
                        "Failed while downloading the build!")

        self.log.info("Starting node upgrade")
        upgrade_success = shell.couchbase_upgrade(
            appropriate_build, save_upgrade_config=False,
            forcefully=self.is_downgrade)
        shell.disconnect()
        if not upgrade_success:
            self.log_failure("Upgrade failed")
            return

        self.log.info("Wait for ns_server to accept connections")
        if not rest.is_ns_server_running(timeout_in_seconds=120):
            self.log_failure("Server not started post upgrade")
            return

        self.log.info("Validate the cluster rebalance status")
        if not rest.cluster_status()["balanced"]:
            if rebalance_required:
                otp_nodes = [node.id for node in rest.node_statuses()]
                rest.rebalance(otpNodes=otp_nodes, ejectedNodes=[])
                rebalance_passed = rest.monitorRebalance()
                if not rebalance_passed:
                    self.log_failure(
                        "Rebalance failed post node upgrade of {0}"
                        .format(node_to_upgrade))
                    return
            else:
                self.log_failure("Cluster reported (/pools/default) balanced=false")
                return
示例#25
0
    def setUp(self):
        super(QueryTests, self).setUp()
        self.expiry = self.input.param("expiry", 0)
        self.batch_size = self.input.param("batch_size", 1)
        self.scan_consistency = self.input.param("scan_consistency",
                                                 "request_plus")
        self.skip_cleanup = self.input.param("skip_cleanup", False)
        self.run_async = self.input.param("run_async", True)
        self.version = self.input.param("cbq_version", "git_repo")
        for server in self.servers:
            rest = RestConnection(server)
            temp = rest.cluster_status()
            self.log.info("Initial status of {0} cluster is {1}".format(
                server.ip, temp['nodes'][0]['status']))
            while (temp['nodes'][0]['status'] == 'warmup'):
                self.log.info("Waiting for cluster to become healthy")
                self.sleep(5)
                temp = rest.cluster_status()
            self.log.info("current status of {0}  is {1}".format(
                server.ip, temp['nodes'][0]['status']))

        indexer_node = self.get_nodes_from_services_map(service_type="index",
                                                        get_all_nodes=True)
        # Set indexer storage mode
        indexer_rest = RestConnection(indexer_node[0])
        doc = {"indexer.settings.storage_mode": self.gsi_type}
        indexer_rest.set_index_settings_internal(doc)
        doc = {"indexer.api.enableTestServer": True}
        indexer_rest.set_index_settings_internal(doc)
        self.indexer_scanTimeout = self.input.param("indexer_scanTimeout",
                                                    None)
        if self.indexer_scanTimeout is not None:
            for server in indexer_node:
                rest = RestConnection(server)
                rest.set_index_settings({
                    "indexer.settings.scan_timeout":
                    self.indexer_scanTimeout
                })
        if self.input.tuq_client and "client" in self.input.tuq_client:
            self.shell = RemoteMachineShellConnection(
                self.input.tuq_client["client"])
        else:
            self.shell = RemoteMachineShellConnection(self.master)
        self.use_gsi_for_primary = self.input.param("use_gsi_for_primary",
                                                    True)
        self.use_gsi_for_secondary = self.input.param("use_gsi_for_secondary",
                                                      True)
        self.create_primary_index = self.input.param("create_primary_index",
                                                     True)
        self.use_rest = self.input.param("use_rest", True)
        self.max_verify = self.input.param("max_verify", None)
        self.buckets = RestConnection(self.master).get_buckets()
        self.docs_per_day = self.input.param("doc-per-day", 49)
        self.item_flag = self.input.param("item_flag", 4042322160)
        self.n1ql_port = self.input.param("n1ql_port", 8093)
        self.dataset = self.input.param("dataset", "default")
        self.value_size = self.input.param("value_size", 1024)
        self.doc_ops = self.input.param("doc_ops", False)
        self.create_ops_per = self.input.param("create_ops_per", 0)
        self.expiry_ops_per = self.input.param("expiry_ops_per", 0)
        self.delete_ops_per = self.input.param("delete_ops_per", 0)
        self.update_ops_per = self.input.param("update_ops_per", 0)
        self.gens_load = self.generate_docs(self.docs_per_day)
        if self.input.param("gomaxprocs", None):
            self.n1ql_helper.configure_gomaxprocs()
        self.full_docs_list = self.generate_full_docs_list(self.gens_load)
        self.gen_results = TuqGenerators(self.log, self.full_docs_list)
        verify_data = False
        if self.scan_consistency != "request_plus":
            verify_data = True
        self.load(self.gens_load,
                  flag=self.item_flag,
                  verify_data=verify_data,
                  batch_size=self.batch_size)
        if self.doc_ops:
            self.ops_dist_map = self.calculate_data_change_distribution(
                create_per=self.create_ops_per,
                update_per=self.update_ops_per,
                delete_per=self.delete_ops_per,
                expiry_per=self.expiry_ops_per,
                start=0,
                end=self.docs_per_day)
            self.log.info(self.ops_dist_map)
            self.docs_gen_map = self.generate_ops_docs(self.docs_per_day, 0)
            self.full_docs_list_after_ops = self.generate_full_docs_list_after_ops(
                self.docs_gen_map)
        # Define Helper Method which will be used for running n1ql queries, create index, drop index
        self.n1ql_helper = N1QLHelper(version=self.version,
                                      shell=self.shell,
                                      use_rest=self.use_rest,
                                      max_verify=self.max_verify,
                                      buckets=self.buckets,
                                      item_flag=self.item_flag,
                                      n1ql_port=self.n1ql_port,
                                      full_docs_list=self.full_docs_list,
                                      log=self.log,
                                      input=self.input,
                                      master=self.master)
        self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
        self.log.info(self.n1ql_node)
        #self.n1ql_helper._start_command_line_query(self.n1ql_node)
        # sleep to avoid race condition during bootstrap
        if self.create_primary_index:
            try:
                self.n1ql_helper.create_primary_index(
                    using_gsi=self.use_gsi_for_primary, server=self.n1ql_node)
            except Exception as ex:
                self.log.info(ex)
                raise ex