Example #1
0
    def remove_node(self, otpnode=None, wait_for_rebalance=True):
        nodes = self.rest.node_statuses()
        '''This is the case when master node is running cbas service as well'''
        if len(nodes) <= len(otpnode):
            return

        helper = RestHelper(self.rest)
        try:
            removed = helper.remove_nodes(
                knownNodes=[node.id for node in nodes],
                ejectedNodes=[node.id for node in otpnode],
                wait_for_rebalance=wait_for_rebalance)
        except Exception as e:
            self.sleep(
                5,
                "First time rebalance failed on Removal. Wait and try again. THIS IS A BUG."
            )
            removed = helper.remove_nodes(
                knownNodes=[node.id for node in nodes],
                ejectedNodes=[node.id for node in otpnode],
                wait_for_rebalance=wait_for_rebalance)
        if wait_for_rebalance:
            self.assertTrue(
                removed,
                "Rebalance operation failed while removing %s," % otpnode)
Example #2
0
    def setUp(self):
        super(FTSServerGroups, self).setUp()
        self.rest = RestConnection(self._cb_cluster.get_master_node())
        self.helper = RestHelper(self.rest)
        self.default_group_name = "Group 1"
        self.fts_query = {"match": "emp", "field": "type"}

        self._cleanup_server_groups()
Example #3
0
 def test_ns_server_with_rebalance_failover_with_redaction_enabled(self):
     kv_node = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=False)
     rest = RestConnection(self.master)
     # load bucket and do some ops
     gen_create = BlobGenerator('logredac', 'logredac-', self.value_size, end=self.num_items)
     self._load_all_buckets(self.master, gen_create, "create", 0)
     gen_delete = BlobGenerator('logredac', 'logredac-', self.value_size, start=self.num_items / 2,
                                end=self.num_items)
     gen_update = BlobGenerator('logredac', 'logredac-', self.value_size, start=self.num_items + 1,
                                end=self.num_items * 3 / 2)
     self._load_all_buckets(self.master, gen_delete, "create", 0)
     self._load_all_buckets(self.master, gen_update, "create", 0)
     # set log redaction level, collect logs, verify log files exist and verify them for redaction
     self.set_redaction_level()
     self.start_logs_collection()
     services_in = ["kv"]
     to_add_nodes = [self.servers[self.nodes_init]]
     rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
     reached = RestHelper(rest).rebalance_reached()
     self.assertTrue(reached, "rebalance failed, stuck or did not complete")
     rebalance.result()
     # failover a node
     server_failed_over = self.servers[self.nodes_init]
     fail_over_task = self.cluster.async_failover([self.master], failover_nodes=[server_failed_over], graceful=True)
     fail_over_task.result()
     rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [server_failed_over])
     reached = RestHelper(rest).rebalance_reached()
     self.assertTrue(reached, "rebalance failed, stuck or did not complete")
     rebalance.result()
     result = self.monitor_logs_collection()
     log.info(result)
     try:
         logs_path = result["perNode"]["ns_1@" + str(self.master.ip)]["path"]
     except KeyError:
         logs_path = result["perNode"]["*****@*****.**"]["path"]
     redactFileName = logs_path.split('/')[-1]
     nonredactFileName = logs_path.split('/')[-1].replace('-redacted', '')
     remotepath = logs_path[0:logs_path.rfind('/') + 1]
     self.verify_log_files_exist(remotepath=remotepath,
                                 redactFileName=redactFileName,
                                 nonredactFileName=nonredactFileName)
     self.verify_log_redaction(remotepath=remotepath,
                               redactFileName=redactFileName,
                               nonredactFileName=nonredactFileName,
                               logFileName="ns_server.debug.log")
Example #4
0
 def test_opposite_address_family_is_blocked(self):
     services_in = []
     for service in self.services_in.split("-"):
         services_in.append(service.split(":")[0])
     # Validate before the test starts
     self._validate_ip_addrress_family()
     nodes_in = self.servers[self.nodes_init:]
     rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], nodes_in, [],
                                              services=services_in)
     self.sleep(2)
     rest = RestConnection(self.master)
     reached = RestHelper(rest).rebalance_reached(percentage=30)
     if self.change_addr_family:
         if self.ipv4_only:
             cli = CouchbaseCLI(self.master, self.master.rest_username, self.master.rest_password)
             cli.setting_autofailover(0, 60)
             _, _, success = cli.set_ip_family("ipv6only")
             if not success:
                 self.fail("Unable to change ip-family to ipv6only")
             self.check_ip_family_enforcement(ip_family="ipv6_only")
             self.sleep(2)
             _, _, success = cli.set_ip_family("ipv4only")
             if not success:
                 self.fail("Unable to change ip-family to ipv4only")
             cli.setting_autofailover(1, 60)
             self.check_ip_family_enforcement(ip_family="ipv4_only")
         if self.ipv6_only:
             cli = CouchbaseCLI(self.master, self.master.rest_username, self.master.rest_password)
             cli.setting_autofailover(0, 60)
             _, _, success = cli.set_ip_family("ipv4only")
             if not success:
                 self.fail("Unable to change ip-family to ipv4only")
             self.check_ip_family_enforcement(ip_family="ipv4_only")
             self.sleep(2)
             _, _, success = cli.set_ip_family("ipv6only")
             if not success:
                 self.fail("Unable to change ip-family to ipv6only")
             cli.setting_autofailover(1, 60)
             self.check_ip_family_enforcement(ip_family="ipv6_only")
     self.assertTrue(reached, "rebalance failed, stuck or did not complete")
     # Validate during rebalance
     self._validate_ip_addrress_family()
     rebalance.result()
     self.sleep(20)
     # Validate post rebalance
     self._validate_ip_addrress_family()
     # Reboot the master node
     shell = RemoteMachineShellConnection(self.master)
     shell.reboot_node()
     self.sleep(180)
     # Validate post reboot
     self._validate_ip_addrress_family()
    def test_drop_scope_collection_rebalance_kv(self):
        bucket_name = "bucket1"
        scope_name = "scope1"
        collection_name = "collection1"

        self.cluster.create_standard_bucket(bucket_name, 11222,
                                            self.bucket_params)
        scope_created = self.collections_helper.create_scope(
            bucket_name=bucket_name, scope_name=scope_name)
        self.assertTrue(scope_created, "Cannot create scope")
        collection_created = self.collections_helper.create_collection(
            bucket_name=bucket_name,
            scope_name="_default",
            collection_name=collection_name)
        self.assertTrue(collection_created, "Cannot create collection")

        rebalance_result = self.cluster.async_rebalance(
            self.servers, [], [self.servers[1]])
        try:
            collection_dropped = self.collections_helper.delete_collection(
                bucket_name=bucket_name,
                scope_name="_default",
                collection_name=collection_name)
            self.assertTrue(collection_dropped,
                            "Cannot drop collection during rebalance.")
            scope_dropped = self.collections_helper.delete_scope(
                bucket_name=bucket_name, scope_name=scope_name)
            self.assertTrue(scope_dropped,
                            "Cannot drop scope during rebalance")
        finally:
            #wait until rebalance is done
            RestHelper(self.rest).rebalance_reached(retry_count=150)
            time_limit = 100
            while time_limit > 0:
                if RestHelper(self.rest).is_cluster_rebalanced():
                    break
                else:
                    time_limit = time_limit - 1
                    self.sleep(10, "Waiting for rebalance finish.")
 def test_eventing_rebalance_in_delete_recreate_collections(self):
     self.create_save_handlers()
     self.deploy_all_handlers()
     # load data
     self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default",wait_for_loading=False)
     self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket.scope_1.coll_1",wait_for_loading=False)
     # rebalance in a eventing node when eventing is processing mutations
     services_in = ["eventing"]
     rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
                                              services=services_in)
     self.collection_rest.delete_collection("dst_bucket","scope_1","coll_0")
     self.collection_rest.delete_collection("dst_bucket","scope_1","coll_1")
     self.collection_rest.delete_collection("dst_bucket","scope_1","coll_2")
     reached = RestHelper(self.rest).rebalance_reached(retry_count=150)
     self.assertTrue(reached, "rebalance failed, stuck or did not complete")
     rebalance.result()
     # Wait for eventing to catch up with all the update mutations and verify results after rebalance
     # self.verify_all_handler(self.docs_per_day * self.num_docs)
     self.verify_doc_count_collections("dst_bucket.scope_1.coll_3", self.docs_per_day * self.num_docs)
     self.verify_doc_count_collections("dst_bucket.scope_1.coll_4", self.docs_per_day * self.num_docs)
     self.verify_doc_count_collections("src_bucket.scope_1.coll_1", self.docs_per_day * self.num_docs*2)
     # rebalance in a eventing node when eventing is processing mutations
     services_in = ["eventing"]
     rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init+1], [self.servers[self.nodes_init+1]], [],
                                              services=services_in)
     self.collection_rest.create_collection("dst_bucket", "scope_1", "coll_0")
     self.collection_rest.create_collection("dst_bucket", "scope_1", "coll_1")
     self.collection_rest.create_collection("dst_bucket", "scope_1", "coll_2")
     # delete json documents
     self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default", is_delete=True)
     self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket.scope_1.coll_1", is_delete=True)
     reached = RestHelper(self.rest).rebalance_reached(retry_count=150)
     self.assertTrue(reached, "rebalance failed, stuck or did not complete")
     rebalance.result()
     self.verify_doc_count_collections("dst_bucket.scope_1.coll_3", 0)
     self.verify_doc_count_collections("dst_bucket.scope_1.coll_4", 0)
     self.verify_doc_count_collections("src_bucket.scope_1.coll_1", self.docs_per_day * self.num_docs)
     self.undeploy_delete_all_functions()
 def test_eventing_rebalance_swap_delete_recreate_collections(self):
     self.create_save_handlers()
     self.deploy_all_handlers()
     # load data
     self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default",wait_for_loading=False)
     self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket.scope_1.coll_1",wait_for_loading=False)
     # swap rebalance an eventing node when eventing is processing mutations
     services_in = ["eventing"]
     nodes_out_ev = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=True)
     rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]],
                                              nodes_out_ev, services=services_in)
     self.collection_rest.delete_collection("dst_bucket", "scope_1", "coll_0")
     self.collection_rest.delete_collection("dst_bucket", "scope_1", "coll_1")
     self.collection_rest.delete_collection("dst_bucket", "scope_1", "coll_2")
     reached = RestHelper(self.rest).rebalance_reached(retry_count=150)
     self.assertTrue(reached, "rebalance failed, stuck or did not complete")
     rebalance.result()
     self.verify_doc_count_collections("dst_bucket.scope_1.coll_3", self.docs_per_day * self.num_docs)
     self.verify_doc_count_collections("dst_bucket.scope_1.coll_4", self.docs_per_day * self.num_docs)
     self.verify_doc_count_collections("src_bucket.scope_1.coll_1", self.docs_per_day * self.num_docs * 2)
     # rebalance out a eventing node when eventing is processing mutations
     services_in = ["eventing"]
     nodes_out_ev = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=True)
     rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init+1], [self.servers[self.nodes_init+1]],
                                              nodes_out_ev, services=services_in)
     self.collection_rest.create_collection("dst_bucket", "scope_1", "coll_0")
     self.collection_rest.create_collection("dst_bucket", "scope_1", "coll_1")
     self.collection_rest.create_collection("dst_bucket", "scope_1", "coll_2")
     # delete json documents
     self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default", is_delete=True)
     self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket.scope_1.coll_1", is_delete=True)
     reached = RestHelper(self.rest).rebalance_reached(retry_count=150)
     self.assertTrue(reached, "rebalance failed, stuck or did not complete")
     rebalance.result()
     self.verify_all_handler(0)
     self.verify_doc_count_collections("src_bucket.scope_1.coll_1", self.docs_per_day * self.num_docs)
     self.undeploy_delete_all_functions()
Example #8
0
 def test_n1ql_gc_rebalance(self):
     self.n1ql_helper.create_primary_index(using_gsi=True,
                                           server=self.n1ql_node)
     self.load_sample_buckets(self.server, "travel-sample")
     worker_count = self.input.param('worker_count', 12)
     body = self.create_save_function_body(self.function_name,
                                           self.handler_code,
                                           worker_count=worker_count)
     self.deploy_function(body)
     # load data
     self.load(self.gens_load,
               buckets=self.src_bucket,
               flag=self.item_flag,
               verify_data=False,
               batch_size=self.batch_size)
     if self.pause_resume:
         self.pause_function(body)
     # rebalance in a eventing node when eventing is processing mutations
     services_in = ["eventing"]
     rebalance = self.cluster.async_rebalance(
         self.servers[:self.nodes_init], [self.servers[self.nodes_init]],
         [],
         services=services_in)
     reached = RestHelper(self.rest).rebalance_reached(retry_count=150)
     self.assertTrue(reached, "rebalance failed, stuck or did not complete")
     rebalance.result()
     if self.pause_resume:
         self.resume_function(body)
     # Wait for eventing to catch up with all the update mutations and verify results after rebalance
     self.verify_eventing_results(self.function_name,
                                  self.docs_per_day * 2016,
                                  skip_stats_validation=True)
     # delete json documents
     self.load(self.gens_load,
               buckets=self.src_bucket,
               flag=self.item_flag,
               verify_data=False,
               batch_size=self.batch_size,
               op_type='delete')
     if self.pause_resume:
         self.pause_function(body)
         self.sleep(30)
         self.resume_function(body)
     # Wait for eventing to catch up with all the delete mutations and verify results
     self.verify_eventing_results(self.function_name,
                                  0,
                                  skip_stats_validation=True)
     self.undeploy_and_delete_function(body)
    def test_volume(self):
        nodes_in_cluster = [self.servers[0]]
        print "Start Time: %s" % str(
            time.strftime("%H:%M:%S", time.gmtime(time.time())))

        ########################################################################################################################
        self.log.info("Add a N1QL/Index nodes")
        self.query_node = self.servers[1]
        rest = RestConnection(self.query_node)
        rest.set_data_path(data_path=self.query_node.data_path,
                           index_path=self.query_node.index_path,
                           cbas_path=self.query_node.cbas_path)
        result = self.add_node(self.query_node, rebalance=False)
        self.assertTrue(result, msg="Failed to add N1QL/Index node.")

        self.log.info("Add a KV nodes")
        result = self.add_node(self.servers[2],
                               services=["kv"],
                               rebalance=True)
        self.assertTrue(result, msg="Failed to add KV node.")

        nodes_in_cluster = nodes_in_cluster + [
            self.servers[1], self.servers[2]
        ]
        ########################################################################################################################
        self.log.info("Step 2: Create Couchbase buckets.")
        self.create_required_buckets()
        for node in nodes_in_cluster:
            NodeHelper.do_a_warm_up(node)
            NodeHelper.wait_service_started(node)
        ########################################################################################################################
        self.log.info(
            "Step 3: Create 10M docs average of 1k docs for 8 couchbase buckets."
        )
        env = DefaultCouchbaseEnvironment.builder().mutationTokensEnabled(
            True).computationPoolSize(5).socketConnectTimeout(
                100000).connectTimeout(100000).maxRequestLifetime(
                    TimeUnit.SECONDS.toMillis(300)).build()
        cluster = CouchbaseCluster.create(env, self.master.ip)
        cluster.authenticate("Administrator", "password")
        bucket = cluster.openBucket("GleambookUsers")

        pool = Executors.newFixedThreadPool(5)
        items_start_from = 0
        total_num_items = self.input.param("num_items", 5000)

        executors = []
        num_executors = 5
        doc_executors = 5
        num_items = total_num_items / num_executors
        for i in xrange(doc_executors):
            executors.append(
                GleambookUser_Docloader(bucket,
                                        num_items,
                                        items_start_from + i * num_items,
                                        batch_size=2000))
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items / 10
        items_start_from += total_num_items
        ########################################################################################################################
        self.sleep(120, "Sleeping after 1st cycle.")
        self.log.info("Step 8: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4

        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, updates_from,
                                    "update"))
        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, deletes_from,
                                    "delete"))
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        ########################################################################################################################
        self.sleep(120, "Sleeping after 2nd cycle.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 5
        num_items = total_num_items / doc_executors

        for i in xrange(doc_executors):
            executors.append(
                GleambookUser_Docloader(bucket,
                                        num_items,
                                        items_start_from + i * num_items,
                                        batch_size=2000))
        rebalance = self.cluster.async_rebalance(nodes_in_cluster,
                                                 [self.servers[3]], [])
        futures = pool.invokeAll(executors)

        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)
        rebalance.get_result()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        bucket.close()
        cluster.disconnect()

        print "End Time: %s" % str(
            time.strftime("%H:%M:%S", time.gmtime(time.time())))
Example #10
0
    def test_cbcollect_with_redaction_enabled_with_xdcr(self):
        rest_src = RestConnection(self.master)
        rest_src.remove_all_replications()
        rest_src.remove_all_remote_clusters()

        rest_dest = RestConnection(self.servers[1])
        rest_dest_helper = RestHelper(rest_dest)

        try:
            rest_src.remove_all_replications()
            rest_src.remove_all_remote_clusters()
            self.set_redaction_level()
            rest_src.add_remote_cluster(self.servers[1].ip,
                                        self.servers[1].port,
                                        self.servers[1].rest_username,
                                        self.servers[1].rest_password, "C2")
            """ at dest cluster """
            self.add_built_in_server_user(node=self.servers[1])
            rest_dest.create_bucket(bucket='default', ramQuotaMB=512)
            bucket_ready = rest_dest_helper.vbucket_map_ready('default')
            if not bucket_ready:
                self.fail(
                    "Bucket default at dest not created after 120 seconds.")
            repl_id = rest_src.start_replication('continuous', 'default', "C2")
            if repl_id is not None:
                self.log.info("Replication created successfully")
            gen = BlobGenerator("ent-backup",
                                "ent-backup-",
                                self.value_size,
                                end=self.num_items)
            tasks = self._async_load_all_buckets(self.master, gen, "create", 0)
            for task in tasks:
                task.result()
            self.sleep(10)
            """ enable firewall """
            if self.interrupt_replication:
                RemoteUtilHelper.enable_firewall(self.master, xdcr=True)
            """ start collect logs """
            self.start_logs_collection()
            result = self.monitor_logs_collection()
            """ verify logs """
            try:
                logs_path = result["perNode"]["ns_1@" +
                                              str(self.master.ip)]["path"]
            except KeyError:
                logs_path = result["perNode"]["[email protected]"]["path"]
            redactFileName = logs_path.split('/')[-1]
            nonredactFileName = logs_path.split('/')[-1].replace(
                '-redacted', '')
            remotepath = logs_path[0:logs_path.rfind('/') + 1]
            self.verify_log_files_exist(remotepath=remotepath,
                                        redactFileName=redactFileName,
                                        nonredactFileName=nonredactFileName)
            self.log.info("Verify on log ns_server.goxdcr.log")
            self.verify_log_redaction(remotepath=remotepath,
                                      redactFileName=redactFileName,
                                      nonredactFileName=nonredactFileName,
                                      logFileName="ns_server.goxdcr.log")
        finally:
            """ clean up xdcr """
            rest_dest.delete_bucket()
            rest_src.remove_all_replications()
            rest_src.remove_all_remote_clusters()
            if self.interrupt_replication:
                shell = RemoteMachineShellConnection(self.master)
                shell.disable_firewall()
                shell.disconnect()
Example #11
0
    def test_volume(self):
        nodes_in_cluster = [self.servers[0]]
        print "Start Time: %s" % str(
            time.strftime("%H:%M:%S", time.gmtime(time.time())))

        #######################################################################
        self.log.info("Step 1: Add a N1QL/Index nodes")
        self.query_node = self.servers[1]
        rest = RestConnection(self.query_node)
        rest.set_data_path(data_path=self.query_node.data_path,
                           index_path=self.query_node.index_path,
                           cbas_path=self.query_node.cbas_path)
        result = self.add_node(self.query_node, rebalance=False)
        self.assertTrue(result, msg="Failed to add N1QL/Index node.")

        self.log.info("Step 2: Add a KV nodes")
        result = self.add_node(self.servers[2],
                               services=["kv"],
                               rebalance=True)
        self.assertTrue(result, msg="Failed to add KV node.")

        nodes_in_cluster = nodes_in_cluster + [
            self.servers[1], self.servers[2]
        ]

        #######################################################################

        self.log.info("Step 3: Create Couchbase buckets.")
        self.create_required_buckets()

        #######################################################################

        env = DefaultCouchbaseEnvironment.builder().mutationTokensEnabled(
            True).computationPoolSize(5).socketConnectTimeout(
                10000000).connectTimeout(10000000).maxRequestLifetime(
                    TimeUnit.SECONDS.toMillis(1200)).build()

        try:
            System.setProperty("com.couchbase.forceIPv4", "false")
            logger = Logger.getLogger("com.couchbase.client")
            logger.setLevel(Level.SEVERE)
            for h in logger.getParent().getHandlers():
                if isinstance(h, ConsoleHandler):
                    h.setLevel(Level.SEVERE)

            cluster = CouchbaseCluster.create(env, self.master.ip)
            cluster.authenticate("Administrator", "password")
            self.bucket = cluster.openBucket("GleambookUsers")
            self.msg_bucket = cluster.openBucket("GleambookMessages")
        except CouchbaseException:
            print "cannot login from user: %s/%s" % (self.username,
                                                     self.password)
            raise

        self.c = cluster
        self.items_start_from = 0
        self.total_num_items = self.input.param("num_items", 5000)
        self.load_data()

        self.sleep(20, "Sleeping after 4th step.")

        self.validate_items_count()

        self.log.info("Step 4: Add node")
        result = self.add_node(self.servers[3], rebalance=False)
        self.assertTrue(result, msg="Failed to add node.")
        self.log.info("Step 5: Loading %s items" % self.total_num_items)
        self.load_data()

        self.log.info("Step 6: Rebalance Cluster")
        rebalance = self.rebalance()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        nodes_in_cluster = nodes_in_cluster + [self.servers[3]]

        self.log.info("Step 7: Start Verification")
        self.validate_items_count()
        self.check_snap_start_corruption()

        #######################################################################
        self.sleep(20)
        self.log.info("Step 8: Delete/Update docs.")
        self.update_data()

        self.log.info("Step 9: Verifying Data")
        self.validate_items_count()
        self.check_snap_start_corruption()

        #######################################################################
        self.log.info("Step 10: Removing node and Rebalance cluster")
        rebalance = self.cluster.async_rebalance(nodes_in_cluster, [],
                                                 [self.servers[3]])
        nodes_in_cluster.remove(self.servers[3])

        self.log.info("Step 11: Loading %s items" % self.total_num_items)
        self.load_data()

        rebalance.get_result()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        self.log.info("Step 12: Verifying Data")
        self.validate_items_count()
        self.check_snap_start_corruption()

        #######################################################################
        self.sleep(20)
        self.log.info("Step 13: Delete/Update docs.")
        self.update_data()

        self.log.info("Step 14: Verifying Data")
        self.validate_items_count()
        self.check_snap_start_corruption()

        #######################################################################
        self.sleep(20)
        self.log.info("Step 15: Add node")
        result = self.add_node(self.servers[3], rebalance=False)
        nodes_in_cluster = nodes_in_cluster + [self.servers[3]]

        self.log.info("Step 16: Loading %s items" % self.total_num_items)
        self.load_data()

        self.log.info("Step 17: Rebalancing Cluster")
        rebalance = self.cluster.async_rebalance(nodes_in_cluster, [],
                                                 [self.servers[2]])

        rebalance.get_result()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        nodes_in_cluster.remove(self.servers[2])

        self.log.info("Step 18: Verifying Data")
        self.validate_items_count()
        self.check_snap_start_corruption()

        #######################################################################
        self.sleep(20)
        self.log.info("Step 19: Delete/Update docs.")
        self.update_data()

        self.log.info("Step 20: Verifying Data")
        self.validate_items_count()
        self.check_snap_start_corruption()

        #######################################################################
        self.sleep(20)
        self.log.info("Step 21: Add node")
        result = self.add_node(self.servers[2], rebalance=False)

        self.log.info("Step 22: Loading %s items" % self.total_num_items)
        self.load_data()

        self.log.info("Step 23: Rebalancing Cluster")
        rebalance = self.rebalance()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        nodes_in_cluster = nodes_in_cluster + [self.servers[2]]

        self.log.info("Step 24: Verifying Data")
        self.validate_items_count()
        self.check_snap_start_corruption()

        #######################################################################
        self.sleep(20)
        self.log.info("Step 25: Delete/Update docs.")
        self.update_data()

        self.log.info("Step 26: Verifying Data")
        self.validate_items_count()
        self.check_snap_start_corruption()

        #######################################################################
        self.sleep(20)

        self.log.info("Step 27: Add node")
        result = self.add_node(self.servers[4], rebalance=False)

        self.log.info("Step 28: Loading %s items" % self.total_num_items)
        self.load_data()

        self.log.info("Step 29: Rebalancing Cluster")
        rebalance = self.rebalance()
        nodes_in_cluster = nodes_in_cluster + [self.servers[4]]
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        self.log.info("Step 30: Verifying Data")
        self.validate_items_count()
        self.check_snap_start_corruption()

        #######################################################################
        self.sleep(20)
        self.log.info("Step 31: Delete/Update docs.")
        self.update_data()

        self.log.info("Step 32: Verifying Data")
        self.validate_items_count()
        self.check_snap_start_corruption()

        #######################################################################
        self.sleep(20)
        self.log.info("Step 33: Removing node, Rebalancing Cluster")
        rebalance = self.cluster.async_rebalance(nodes_in_cluster, [],
                                                 [self.servers[3]])
        nodes_in_cluster.remove(self.servers[3])

        self.log.info("Step 34: Loading %s items" % self.total_num_items)
        self.load_data()

        rebalance.get_result()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        self.log.info("Step 35: Verifying Data")
        self.validate_items_count()
        self.check_snap_start_corruption()

        #######################################################################
        self.sleep(20)

        self.log.info("Step 36: Adding 3 nodes")
        otp1 = self.add_node(self.servers[5], rebalance=False)
        otp2 = self.add_node(self.servers[6], rebalance=False)
        otp3 = self.add_node(self.servers[7], rebalance=False)

        self.log.info("Step 37: Loading %s items" % self.total_num_items)
        self.load_data()

        self.log.info("Step 38: Rebalancing Cluster")
        rebalance = self.rebalance()
        nodes_in_cluster = nodes_in_cluster + [
            self.servers[5], self.servers[6], self.servers[7]
        ]
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        self.log.info("Step 39: Verifying Data")
        self.validate_items_count()
        self.check_snap_start_corruption()

        #######################################################################
        self.log.info("Step 40: Graceful failover node")
        self.rest.fail_over(otp3.id, graceful=True)
        self.log.info("Step 41: Loading %s items" % self.total_num_items)
        self.load_data()
        self.sleep(10)
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        self.log.info("Step 42: Rebalancing Cluster")
        rebalance = self.rebalance()
        nodes_in_cluster.remove(self.servers[7])
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        #######################################################################
        self.log.info("Step 43: Adding node and rebalancing")
        otp3 = self.add_node(self.servers[7], rebalance=True)
        nodes_in_cluster = nodes_in_cluster + [self.servers[7]]

        #######################################################################

        self.log.info("Step 44: Graceful failover node")
        self.rest.fail_over(otp3.id, graceful=True)
        self.log.info("Step 41: Loading %s items" % self.total_num_items)
        self.load_data()
        self.sleep(10)
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        self.log.info("Step 45: Delta recover node")
        self.rest.set_recovery_type(otp3.id, "delta")

        self.log.info("Step 46: Add node back to cluster")
        self.rest.add_back_node(otp3.id)

        rebalance = self.rebalance()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        self.log.info("Step 47: Graceful failover node")
        self.rest.fail_over(otp2.id, graceful=True)
        self.log.info("Step 48: Loading %s items" % self.total_num_items)
        self.load_data()
        self.sleep(10)
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        self.log.info("Step 49: Delta recover node")
        self.rest.set_recovery_type(otp2.id, "full")

        self.log.info("Step 50: Add node back to cluster")
        self.rest.add_back_node(otp2.id)

        rebalance = self.rebalance()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        self.bucket.close()
        self.msg_bucket.close()
        cluster.disconnect()
Example #12
0
 def set_up_rest(self, master):
     self.rest = RestConnection(master)
     self.rest_helper = RestHelper(self.rest)
Example #13
0
 def test_eventing_rebalance_in_kill_eventing_producer(self):
     eventing_node = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=False)
     sock_batch_size = self.input.param('sock_batch_size', 1)
     worker_count = self.input.param('worker_count', 3)
     cpp_worker_thread_count = self.input.param('cpp_worker_thread_count', 1)
     body = self.create_save_function_body(self.function_name, self.handler_code,
                                           sock_batch_size=sock_batch_size, worker_count=worker_count,
                                           cpp_worker_thread_count=cpp_worker_thread_count)
     if self.is_curl:
         body['depcfg']['curl'] = []
         body['depcfg']['curl'].append({"hostname": self.hostname, "value": "server", "auth_type": self.auth_type,
                                        "username": self.curl_username, "password": self.curl_password,"cookies": self.cookies})
     self.deploy_function(body)
     # load data
     self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
               batch_size=self.batch_size)
     if self.pause_resume:
         self.pause_function(body, wait_for_pause=False)
     try:
         # rebalance in a eventing node when eventing is processing mutations
         services_in = ["eventing"]
         rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
                                                  services=services_in)
         self.sleep(5)
         reached = RestHelper(self.rest).rebalance_reached(percentage=60)
         self.assertTrue(reached, "rebalance failed, stuck or did not complete")
         # kill eventing producer when eventing is processing mutations
         self.kill_producer(eventing_node)
         if self.pause_resume:
             self.wait_for_handler_state(body['appname'], "paused")
             self.resume_function(body)
         else:
             self.wait_for_handler_state(body['appname'], "deployed")
         rebalance.result()
     except Exception as ex:
         log.info("Rebalance failed as expected after eventing got killed: {0}".format(str(ex)))
     else:
         self.fail("Rebalance succeeded even after killing eventing processes")
     if self.pause_resume:
         self.resume_function(body)
     # Wait for eventing to catch up with all the update mutations and verify results after rebalance
     if self.is_sbm:
         self.verify_eventing_results(self.function_name, self.docs_per_day * 2016*2, skip_stats_validation=True)
     else:
         self.verify_eventing_results(self.function_name, self.docs_per_day * 2016, skip_stats_validation=True)
     # retry the failed rebalance
     rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init + 1], [], [])
     self.sleep(30)
     reached = RestHelper(self.rest).rebalance_reached()
     self.assertTrue(reached, "retry of the failed rebalance failed, stuck or did not complete")
     rebalance.result()
     # delete json documents
     self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
               batch_size=self.batch_size, op_type='delete')
     if self.pause_resume:
         self.pause_function(body)
         self.sleep(30)
         self.resume_function(body)
     # kill eventing producer when eventing is processing mutations
     self.kill_producer(eventing_node)
     self.sleep(120)
     if self.pause_resume:
         self.wait_for_handler_state(body['appname'], "paused")
         self.resume_function(body)
     else:
         self.wait_for_handler_state(body['appname'], "deployed")
     # Wait for eventing to catch up with all the delete mutations and verify results
     # This is required to ensure eventing works after rebalance goes through successfully
     if self.is_sbm:
         self.verify_eventing_results(self.function_name, self.docs_per_day * 2016, skip_stats_validation=True)
     else:
         self.verify_eventing_results(self.function_name, 0, skip_stats_validation=True)
     self.undeploy_and_delete_function(body)
     # Get all eventing nodes
     nodes_out_list = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=True)
     # rebalance out all eventing nodes
     rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init + 1], [], nodes_out_list)
     reached = RestHelper(self.rest).rebalance_reached()
     self.assertTrue(reached, "rebalance failed, stuck or did not complete")
     rebalance.result()
Example #14
0
    def test_ups_volume(self):
        nodes_in_cluster = [self.servers[0]]
        print "Start Time: %s" % str(
            time.strftime("%H:%M:%S", time.gmtime(time.time())))

        ########################################################################################################################
        self.log.info("Add a KV nodes - 2")
        self.query_node = self.servers[1]
        rest = RestConnection(self.servers[1])
        rest.set_data_path(data_path=self.servers[1].data_path,
                           index_path=self.servers[1].index_path,
                           cbas_path=self.servers[1].cbas_path)
        result = self.add_node(self.servers[1], rebalance=False)
        self.assertTrue(result, msg="Failed to add N1QL/Index node.")

        self.log.info("Add a KV nodes - 3")
        rest = RestConnection(self.servers[2])
        rest.set_data_path(data_path=self.kv_servers[1].data_path,
                           index_path=self.kv_servers[1].index_path,
                           cbas_path=self.kv_servers[1].cbas_path)
        result = self.add_node(self.kv_servers[1],
                               services=["kv"],
                               rebalance=False)
        self.assertTrue(result, msg="Failed to add KV node.")

        self.log.info("Add one more KV node")
        rest = RestConnection(self.servers[3])
        rest.set_data_path(data_path=self.kv_servers[3].data_path,
                           index_path=self.kv_servers[3].index_path,
                           cbas_path=self.kv_servers[3].cbas_path)
        result = self.add_node(self.kv_servers[3],
                               services=["kv"],
                               rebalance=False)
        self.assertTrue(result, msg="Failed to add KV node.")

        self.log.info("Add one more KV node")
        rest = RestConnection(self.servers[4])
        rest.set_data_path(data_path=self.kv_servers[4].data_path,
                           index_path=self.kv_servers[4].index_path,
                           cbas_path=self.kv_servers[4].cbas_path)
        result = self.add_node(self.kv_servers[4],
                               services=["kv"],
                               rebalance=False)
        self.assertTrue(result, msg="Failed to add KV node.")

        nodes_in_cluster = nodes_in_cluster + [
            self.servers[1], self.servers[2], self.servers[3], self.servers[4]
        ]
        ########################################################################################################################
        self.log.info("Step 2: Create Couchbase buckets.")
        self.create_required_buckets()

        ########################################################################################################################
        self.log.info(
            "Step 3: Create 10M docs average of 1k docs for 8 couchbase buckets."
        )
        env = DefaultCouchbaseEnvironment.builder().mutationTokensEnabled(
            True).computationPoolSize(5).socketConnectTimeout(
                100000).connectTimeout(100000).maxRequestLifetime(
                    TimeUnit.SECONDS.toMillis(300)).build()
        cluster = CouchbaseCluster.create(env, self.master.ip)
        cluster.authenticate("Administrator", "password")
        bucket = cluster.openBucket("GleambookUsers")
        msg_bucket = cluster.openBucket("GleambookMessages")

        pool = Executors.newFixedThreadPool(5)
        items_start_from = 0
        total_num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 5
        num_items = total_num_items / num_executors
        for i in xrange(doc_executors):
            executors.append(
                GleambookUser_Docloader(bucket, num_items,
                                        items_start_from + i * num_items))
            executors.append(
                GleambookMessages_Docloader(msg_bucket, num_items,
                                            items_start_from + i * num_items))
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items / 10
        items_start_from += total_num_items

        ########################################################################################################################
        self.log.info("Step 6: Verify the items count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 8: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4

        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, updates_from,
                                    "update"))
        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, deletes_from,
                                    "delete"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        updates_from, "update"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        deletes_from, "delete"))
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)
        ########################################################################################################################
        self.log.info("Step 9: Connect cbas buckets.")
        self.connect_cbas_buckets()
        self.sleep(10, "Wait for the ingestion to complete")

        ########################################################################################################################
        self.log.info("Step 10: Verify the items count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info(
            "Step 12: When 11 is in progress do a KV Rebalance in of 1 nodes.")
        rest = RestConnection(self.servers[5])
        rest.set_data_path(data_path=self.servers[5].data_path,
                           index_path=self.servers[5].index_path,
                           cbas_path=self.servers[5].cbas_path)
        rebalance = self.cluster.async_rebalance(nodes_in_cluster,
                                                 [self.servers[5]], [])
        nodes_in_cluster += [self.servers[2]]
        ########################################################################################################################
        self.log.info("Step 11: Create 10M docs.")
        pool = Executors.newFixedThreadPool(5)
        total_num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4
        num_items = total_num_items / doc_executors
        for i in xrange(doc_executors):
            executors.append(
                GleambookUser_Docloader(bucket, num_items,
                                        items_start_from + i * num_items))
            executors.append(
                GleambookMessages_Docloader(msg_bucket, num_items,
                                            items_start_from + i * num_items))
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items / 10
        items_start_from += total_num_items

        ########################################################################################################################
        self.log.info("Step 13: Wait for rebalance to complete.")
        rebalance.get_result()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        self.sleep(20)

        ########################################################################################################################
        self.log.info("Step 14: Verify the items count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 15: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4

        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, updates_from,
                                    "update"))
        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, deletes_from,
                                    "delete"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        updates_from, "update"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        deletes_from, "delete"))

        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)
        ########################################################################################################################
        self.log.info(
            "Step 16: Verify Results that 1M docs gets deleted from analytics datasets."
        )
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 17: Disconnect CBAS buckets.")
        self.disconnect_cbas_buckets()

        ########################################################################################################################
        self.log.info("Step 18: Create 10M docs.")
        pool = Executors.newFixedThreadPool(5)
        total_num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4
        num_items = total_num_items / doc_executors
        for i in xrange(doc_executors):
            executors.append(
                GleambookUser_Docloader(bucket, num_items,
                                        items_start_from + i * num_items))
            executors.append(
                GleambookMessages_Docloader(msg_bucket, num_items,
                                            items_start_from + i * num_items))
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items / 10
        items_start_from += total_num_items

        ########################################################################################################################
        self.log.info("Step 20: Verify the docs count.")
        self.validate_items_count()

        ########################################################################################################################
        pool = Executors.newFixedThreadPool(5)
        executors = []
        num_executors = 5

        self.log.info(
            "Step 22: When 21 is in progress do a KV Rebalance out of 2 nodes."
        )
        rebalance = self.cluster.async_rebalance(nodes_in_cluster, [],
                                                 self.servers[1:3])
        nodes_in_cluster = [
            node for node in nodes_in_cluster if node not in self.servers[1:3]
        ]

        futures = pool.invokeAll(executors)
        self.log.info("Step 23: Wait for rebalance.")
        rebalance.get_result()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        self.sleep(20)

        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        ########################################################################################################################
        self.log.info("Step 24: Create 10M docs.")
        pool = Executors.newFixedThreadPool(5)
        total_num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 6
        doc_executors = 4
        num_items = total_num_items / doc_executors
        for i in xrange(doc_executors):
            executors.append(
                GleambookUser_Docloader(bucket, num_items,
                                        items_start_from + i * num_items))
            executors.append(
                GleambookMessages_Docloader(msg_bucket, num_items,
                                            items_start_from + i * num_items))

        ##################################################### NEED TO BE UPDATED ##################################################################
        self.log.info(
            "Step 25: When 24 is in progress do a KV Rebalance in of 2 nodes.")
        for node in self.servers[1:3]:
            rest = RestConnection(node)
            rest.set_data_path(data_path=node.data_path,
                               index_path=node.index_path,
                               cbas_path=node.cbas_path)
        rebalance = self.cluster.async_rebalance(nodes_in_cluster,
                                                 self.servers[1:3], [])
        nodes_in_cluster = nodes_in_cluster + self.servers[1:3]
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items / 10
        items_start_from += total_num_items

        self.log.info("Step 27: Wait for rebalance to complete.")
        rebalance.get_result()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        ########################################################################################################################
        self.log.info("Step 28: Verify the docs count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 29: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4

        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, updates_from,
                                    "update"))
        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, deletes_from,
                                    "delete"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        updates_from, "update"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        deletes_from, "delete"))

        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)
        ########################################################################################################################
        self.log.info("Step 30: Verify the docs count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 31: Create 10M docs.")
        pool = Executors.newFixedThreadPool(5)
        total_num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4
        num_items = total_num_items / doc_executors
        for i in xrange(doc_executors):
            executors.append(
                GleambookUser_Docloader(bucket, num_items,
                                        items_start_from + i * num_items))
            executors.append(
                GleambookMessages_Docloader(msg_bucket, num_items,
                                            items_start_from + i * num_items))

        ###################################################### NEED TO BE UPDATED ##################################################################
        self.log.info(
            "Step 32: When 31 is in progress do a KV Rebalance out of 2 nodes."
        )
        rebalance = self.cluster.async_rebalance(nodes_in_cluster, [],
                                                 self.servers[1:3])
        nodes_in_cluster = [
            node for node in nodes_in_cluster if node not in self.servers[1:3]
        ]
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items / 10
        items_start_from += total_num_items
        ########################################################################################################################
        self.log.info("Step 33: Wait for rebalance to complete.")
        rebalance.get_result()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        self.sleep(20)

        ########################################################################################################################
        self.log.info("Step 34: Verify the docs count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 35: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4

        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, updates_from,
                                    "update"))
        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, deletes_from,
                                    "delete"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        updates_from, "update"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        deletes_from, "delete"))

        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        ########################################################################################################################
        self.log.info("Step 36: Verify the docs count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 37: Create 10M docs.")
        pool = Executors.newFixedThreadPool(5)
        total_num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4
        num_items = total_num_items / doc_executors
        for i in xrange(doc_executors):
            executors.append(
                GleambookUser_Docloader(bucket, num_items,
                                        items_start_from + i * num_items))
            executors.append(
                GleambookMessages_Docloader(msg_bucket, num_items,
                                            items_start_from + i * num_items))

        ###################################################### NEED TO BE UPDATED ##################################################################
        self.log.info(
            "Step 38: When 37 is in progress do a CBAS SWAP Rebalance of 2 nodes."
        )
        for node in self.cbas_servers[-1:]:
            rest = RestConnection(node)
            rest.set_data_path(data_path=node.data_path,
                               index_path=node.index_path,
                               cbas_path=node.cbas_path)
        rebalance = self.cluster.async_rebalance(nodes_in_cluster,
                                                 self.servers[6],
                                                 [self.servers[5]],
                                                 services=["kv"],
                                                 check_vbucket_shuffling=False)
        nodes_in_cluster += self.servers[6]
        nodes_in_cluster.remove(self.servers[5])
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        ########################################################################################################################
        self.log.info("Step 39: Wait for rebalance to complete.")
        rebalance.get_result()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        self.sleep(20)

        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items / 10
        items_start_from += total_num_items

        ########################################################################################################################
        self.log.info("Step 40: Verify the docs count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 41: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4

        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, updates_from,
                                    "update"))
        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, deletes_from,
                                    "delete"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        updates_from, "update"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        deletes_from, "delete"))

        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        ########################################################################################################################
        self.log.info("Step 42: Verify the docs count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 43: Create 10M docs.")
        pool = Executors.newFixedThreadPool(5)
        total_num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4
        num_items = total_num_items / doc_executors
        for i in xrange(doc_executors):
            executors.append(
                GleambookUser_Docloader(bucket, num_items,
                                        items_start_from + i * num_items))
            executors.append(
                GleambookMessages_Docloader(msg_bucket, num_items,
                                            items_start_from + i * num_items))

        ###################################################### NEED TO BE UPDATED ##################################################################
        self.log.info("Step 44: When 43 is in progress do a KV Rebalance IN.")
        rest = RestConnection(self.servers[5])
        rest.set_data_path(data_path=self.servers[5].data_path,
                           index_path=self.servers[5].index_path,
                           cbas_path=self.servers[5].cbas_path)
        rebalance = self.cluster.async_rebalance(nodes_in_cluster,
                                                 [self.servers[5]], [],
                                                 services=["kv"])
        nodes_in_cluster += [self.servers[5]]
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        ########################################################################################################################
        self.log.info("Step 45: Wait for rebalance to complete.")
        rebalance.get_result()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        self.sleep(20)

        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items / 10
        items_start_from += total_num_items

        ########################################################################################################################
        self.log.info("Step 46: Verify the docs count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 47: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4

        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, updates_from,
                                    "update"))
        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, deletes_from,
                                    "delete"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        updates_from, "update"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        deletes_from, "delete"))

        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        ########################################################################################################################
        self.log.info("Step 48: Verify the docs count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 49: Create 10M docs.")
        pool = Executors.newFixedThreadPool(5)
        total_num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4
        num_items = total_num_items / doc_executors
        for i in xrange(doc_executors):
            executors.append(
                GleambookUser_Docloader(bucket, num_items,
                                        items_start_from + i * num_items))
            executors.append(
                GleambookMessages_Docloader(msg_bucket, num_items,
                                            items_start_from + i * num_items))

        ########################################################################################################################
        self.log.info(
            "Step 50: When 49 is in progress do a KV+CBAS Rebalance OUT.")
        rest = RestConnection(self.servers[6])
        rest.set_data_path(data_path=self.servers[6].data_path,
                           index_path=self.servers[6].index_path,
                           cbas_path=self.kv_servers[6].cbas_path)
        rebalance = self.cluster.async_rebalance(nodes_in_cluster, [],
                                                 [self.servers[6]])
        nodes_in_cluster.remove(self.servers[6])

        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        ########################################################################################################################
        self.log.info("Step 51: Wait for rebalance to complete.")
        rebalance.get_result()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        self.sleep(20)
        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items / 10
        items_start_from += total_num_items

        ########################################################################################################################
        self.log.info("Step 52: Verify the docs count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 53: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4

        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, updates_from,
                                    "update"))
        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, deletes_from,
                                    "delete"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        updates_from, "update"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        deletes_from, "delete"))

        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        ########################################################################################################################
        self.log.info("Step 54: Verify the docs count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 55: Create 10M docs.")
        pool = Executors.newFixedThreadPool(5)
        total_num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4
        num_items = total_num_items / doc_executors
        for i in xrange(doc_executors):
            executors.append(
                GleambookUser_Docloader(bucket, num_items,
                                        items_start_from + i * num_items))
            executors.append(
                GleambookMessages_Docloader(msg_bucket, num_items,
                                            items_start_from + i * num_items))

        ########################################################################################################################
        self.log.info(
            "Step 56: When 55 is in progress do a KV+CBAS SWAP Rebalance .")
        rest = RestConnection(self.servers[7])
        rest.set_data_path(data_path=self.servers[7].data_path,
                           index_path=self.servers[7].index_path,
                           cbas_path=self.servers[7].cbas_path)
        rebalance = self.cluster.async_rebalance(nodes_in_cluster,
                                                 [self.servers[7]],
                                                 [self.servers[6]])
        #         rebalance.get_result()
        nodes_in_cluster.remove(self.servers[6])
        nodes_in_cluster += [self.servers[7]]

        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        ########################################################################################################################
        self.log.info("Step 57: Wait for rebalance to complete.")
        rebalance.get_result()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=240)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        self.sleep(20)

        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items / 10
        items_start_from += total_num_items

        ########################################################################################################################
        self.log.info("Step 58: Verify the docs count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 59: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4

        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, updates_from,
                                    "update"))
        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, deletes_from,
                                    "delete"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        updates_from, "update"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        deletes_from, "delete"))

        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        ########################################################################################################################
        self.log.info("Step 60: Verify the docs count.")
        self.validate_items_count()

        bucket.close()
        msg_bucket.close()
        cluster.disconnect()

        print "End Time: %s" % str(
            time.strftime("%H:%M:%S", time.gmtime(time.time())))
Example #15
0
    def test_analytics_volume(self):
        queries = ['SELECT VALUE u FROM `GleambookUsers_ds` u WHERE u.user_since >= "2010-09-13T16-48-15" AND u.user_since < "2010-10-13T16-48-15" AND (SOME e IN u.employment SATISFIES e.end_date IS UNKNOWN) LIMIT 100;',
           'SELECT VALUE u FROM `GleambookUsers_ds` u WHERE u.user_since >= "2010-11-13T16-48-15" AND u.user_since < "2010-12-13T16-48-15" limit 1;',
           ]
        nodes_in_cluster= [self.servers[0],self.cbas_node]
        print "Start Time: %s"%str(time.strftime("%H:%M:%S", time.gmtime(time.time())))
        
        ########################################################################################################################
        self.log.info("Step 1: Start the test with 2 KV and 2 CBAS nodes")

        self.log.info("Add a N1QL/Index nodes")
        self.query_node = self.servers[1]
        rest = RestConnection(self.query_node)
        rest.set_data_path(data_path=self.query_node.data_path,index_path=self.query_node.index_path,cbas_path=self.query_node.cbas_path)
        result = self.add_node(self.query_node, rebalance=False)
        self.assertTrue(result, msg="Failed to add N1QL/Index node.")
        self.log.info("Add a KV nodes")
        rest = RestConnection(self.kv_servers[1])
        rest.set_data_path(data_path=self.kv_servers[1].data_path,index_path=self.kv_servers[1].index_path,cbas_path=self.kv_servers[1].cbas_path)
        result = self.add_node(self.kv_servers[1], services=["kv"], rebalance=False)
        self.assertTrue(result, msg="Failed to add KV node.")

        self.log.info("Add one more KV node")
        rest = RestConnection(self.kv_servers[3])
        rest.set_data_path(data_path=self.kv_servers[3].data_path,index_path=self.kv_servers[3].index_path,cbas_path=self.kv_servers[3].cbas_path)
        result = self.add_node(self.kv_servers[3], services=["kv"], rebalance=False)
        self.assertTrue(result, msg="Failed to add KV node.")

        self.log.info("Add one more KV node")
        rest = RestConnection(self.kv_servers[4])
        rest.set_data_path(data_path=self.kv_servers[4].data_path,index_path=self.kv_servers[4].index_path,cbas_path=self.kv_servers[4].cbas_path)
        result = self.add_node(self.kv_servers[4], services=["kv"], rebalance=False)
        self.assertTrue(result, msg="Failed to add KV node.")
                 
        self.log.info("Add a CBAS nodes")
        result = self.add_node(self.cbas_servers[0], services=["cbas"], rebalance=True)
        self.assertTrue(result, msg="Failed to add CBAS node.")
         
        nodes_in_cluster = nodes_in_cluster + [self.query_node, self.kv_servers[1], self.kv_servers[3], self.kv_servers[4], self.cbas_servers[0]]
        ########################################################################################################################
        self.log.info("Step 2: Create Couchbase buckets.")
        self.create_required_buckets()
        
        ########################################################################################################################
        self.log.info("Step 3: Create 10M docs average of 1k docs for 8 couchbase buckets.")
        
        GleambookUsers = buck(name="GleambookUsers", authType=None, saslPassword=None,
                            num_replicas=self.num_replicas,
                            bucket_size=self.bucket_size,
                            eviction_policy='noEviction', lww=self.lww)
        
        items_start_from = 0
        total_num_items = self.input.param("num_items",1000000)
        num_query = self.input.param("num_query",240)

        self.use_replica_to = False
        self.rate_limit = self.input.param('rate_limit', '100000')
        load_thread = Thread(target=self.load_buckets_with_high_ops,
                                 name="high_ops_load",
                                 args=(self.master, GleambookUsers, total_num_items,50,4, items_start_from,2, 0))
        self.log.info('starting the load thread...')
        load_thread.start()
        load_thread.join()
        
        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items/10
        items_start_from += total_num_items
        ########################################################################################################################
        self.log.info("Step 4: Create 8 analytics buckets and 8 datasets and connect.")
        load_thread = Thread(target=self.load_buckets_with_high_ops,
                                 name="high_ops_load",
                                 args=(self.master, GleambookUsers, total_num_items,50,4, items_start_from,2, 0))
        self.log.info('starting the load thread...')
        load_thread.start()
        
        items_start_from += total_num_items
        self.setup_cbas()
        load_thread.join()
                 
        ########################################################################################################################
        self.log.info("Step 5: Wait for ingestion to complete.")
        self.sleep(10,"Wait for the ingestion to complete")
         
        ########################################################################################################################
        self.log.info("Step 6: Verify the items count.")
        self.validate_items_count()
        
        ########################################################################################################################
        self.log.info("Step 7: Disconnect CBAS bucket and create secondary indexes.")
        self.disconnect_cbas_buckets()
        self.create_cbas_indexes()
         
        ########################################################################################################################
        self.log.info("Step 8: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items",5000)
        executors=[]
        query_executors = 1
        num_executors = query_executors

        upsert_thread = Thread(target=self.load_buckets_with_high_ops,
                                 name="high_ops_delete",
                                 args=(self.master, GleambookUsers, num_items/10,10000,4, updates_from,1, 0))
        delete_thread = Thread(target=self.delete_buckets_with_high_ops,
                                 name="high_ops_delete",
                                 args=(self.master, GleambookUsers, num_items/10, self.rate_limit, 10000, 2, deletes_from,1))
        delete_thread.start()
        upsert_thread.start()
        
        for i in xrange(query_executors):
            executors.append(QueryRunner(random.choice(queries),num_query,self.cbas_util))
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)
        
        delete_thread.join()
        upsert_thread.join()
        
        ########################################################################################################################
        self.log.info("Step 9: Connect cbas buckets.")
        self.connect_cbas_buckets()
        self.sleep(10,"Wait for the ingestion to complete")
         
        ########################################################################################################################
        self.log.info("Step 10: Verify the items count.")
        self.validate_items_count()
         
        ########################################################################################################################
        self.log.info("Step 12: When 11 is in progress do a KV Rebalance in of 1 nodes.")
        rest = RestConnection(self.kv_servers[2])
        rest.set_data_path(data_path=self.kv_servers[2].data_path,index_path=self.kv_servers[2].index_path,cbas_path=self.kv_servers[2].cbas_path)
        rebalance = self.cluster.async_rebalance(nodes_in_cluster, [self.kv_servers[2]], [])
        nodes_in_cluster += [self.kv_servers[2]]
        ########################################################################################################################
        self.log.info("Step 11: Create 10M docs.")
        pool = Executors.newFixedThreadPool(5)
        total_num_items = self.input.param("num_items",1000000)
        num_query = self.input.param("num_query",240)

        self.use_replica_to = False
        self.rate_limit = self.input.param('rate_limit', '100000')
        load_thread = Thread(target=self.load_buckets_with_high_ops,
                                 name="high_ops_load",
                                 args=(self.master, GleambookUsers, total_num_items,50,4, items_start_from,2, 0))
        self.log.info('starting the load thread...')
        load_thread.start()

        for i in xrange(query_executors):
            executors.append(QueryRunner(random.choice(queries),num_query,self.cbas_util))
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)
        load_thread.join()
        
        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items/10
        items_start_from += total_num_items
                 
        ########################################################################################################################
        self.log.info("Step 13: Wait for rebalance to complete.")
        
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        self.sleep(20)
         
        ########################################################################################################################
        self.log.info("Step 14: Verify the items count.")
        self.validate_items_count()
         
        ########################################################################################################################
        self.log.info("Step 15: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items",5000)
        executors=[]
        query_executors = 1
        num_executors = query_executors

        upsert_thread = Thread(target=self.load_buckets_with_high_ops,
                                 name="high_ops_delete",
                                 args=(self.master, GleambookUsers, num_items/10,10000,4, updates_from,1, 0))
        delete_thread = Thread(target=self.delete_buckets_with_high_ops,
                                 name="high_ops_delete",
                                 args=(self.master, GleambookUsers, num_items/10, self.rate_limit, 10000, 2, deletes_from,1))
        delete_thread.start()
        upsert_thread.start()
        
        for i in xrange(query_executors):
            executors.append(QueryRunner(random.choice(queries),num_query,self.cbas_util))
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)
        
        delete_thread.join()
        upsert_thread.join()
        ########################################################################################################################
        self.log.info("Step 16: Verify Results that 1M docs gets deleted from analytics datasets.")
        self.validate_items_count()
         
        ########################################################################################################################
        self.log.info("Step 17: Disconnect CBAS buckets.")
        self.disconnect_cbas_buckets()
         
        ########################################################################################################################
        self.log.info("Step 18: Create 10M docs.")
        pool = Executors.newFixedThreadPool(5)
        total_num_items = self.input.param("num_items",1000000)
        num_query = self.input.param("num_query",240)

        self.use_replica_to = False
        self.rate_limit = self.input.param('rate_limit', '100000')
        load_thread = Thread(target=self.load_buckets_with_high_ops,
                                 name="high_ops_load",
                                 args=(self.master, GleambookUsers, total_num_items,50,4, items_start_from,2, 0))
        self.log.info('starting the load thread...')
        load_thread.start()

        for i in xrange(query_executors):
            executors.append(QueryRunner(random.choice(queries),num_query,self.cbas_util))
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)
        load_thread.join()
 
        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items/10
        items_start_from += total_num_items
         
        ########################################################################################################################
        self.log.info("Step 19: Multiple Connect/Disconnect CBAS buckets during ingestion in step 18.")
        self.connect_cbas_buckets()
        self.sleep(5)
        self.disconnect_cbas_buckets()
        self.connect_cbas_buckets()
        self.sleep(5)
        self.disconnect_cbas_buckets()
        self.connect_cbas_buckets()
        self.sleep(5)
        self.disconnect_cbas_buckets()
        self.connect_cbas_buckets()
        ########################################################################################################################
        self.log.info("Step 20: Verify the docs count.")
        self.validate_items_count()
         
        ########################################################################################################################
        self.log.info("Step 21: Run 500 complex queries concurrently and verify the results.")
        pool = Executors.newFixedThreadPool(5)
        num_query = self.input.param("num_query",500)
        executors=[]
        num_executors = 5
        query_executors = num_executors
        for i in xrange(query_executors):
            executors.append(QueryRunner(random.choice(queries),num_query,self.cbas_util))
         
        self.log.info("Step 22: When 21 is in progress do a KV Rebalance out of 2 nodes.")
        rebalance = self.cluster.async_rebalance(nodes_in_cluster, [], self.kv_servers[1:2])
        nodes_in_cluster = [node for node in nodes_in_cluster if node not in self.kv_servers[1:2]]
         
        futures = pool.invokeAll(executors)
        self.log.info("Step 23: Wait for rebalance.")
        
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        self.sleep(20)
         
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)
         
        ########################################################################################################################
        self.log.info("Step 24: Create 10M docs.")
        pool = Executors.newFixedThreadPool(5)
        executors=[]
        num_executors = 2
        query_executors = num_executors

        total_num_items = self.input.param("num_items",1000000)
        num_query = self.input.param("num_query",240)
        
        self.use_replica_to = False
        self.rate_limit = self.input.param('rate_limit', '100000')
        load_thread = Thread(target=self.load_buckets_with_high_ops,
                                 name="high_ops_load",
                                 args=(self.master, GleambookUsers, total_num_items,50,4, items_start_from,2, 0))
        self.log.info('starting the load thread...')
        load_thread.start()
        for i in xrange(query_executors):
            executors.append(QueryRunner(random.choice(queries),num_query,self.cbas_util))
         
        self.log.info("Step 26: Run 500 complex queries concurrently and verify the results.")
        executors.append(QueryRunner(random.choice(queries),500,self.cbas_util))
         
         
        ##################################################### NEED TO BE UPDATED ##################################################################
        self.log.info("Step 25: When 24 is in progress do a CBAS Rebalance in of 2 nodes.")
        for node in self.cbas_servers[2:]:
            rest = RestConnection(node)
            rest.set_data_path(data_path=node.data_path,index_path=node.index_path,cbas_path=node.cbas_path)
        rebalance = self.cluster.async_rebalance(nodes_in_cluster, self.cbas_servers[1:],[],services=["cbas","cbas"])
        nodes_in_cluster = nodes_in_cluster + self.cbas_servers[1:]
        futures = pool.invokeAll(executors)
        
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)
        load_thread.join()
        
        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items/10
        items_start_from += total_num_items
 
        self.log.info("Step 27: Wait for rebalance to complete.")
        
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
         
        ########################################################################################################################
        self.log.info("Step 28: Verify the docs count.")
        self.validate_items_count()
         
        ########################################################################################################################
        self.log.info("Step 29: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items",5000)
        executors=[]
        query_executors = 1
        num_executors = query_executors

        upsert_thread = Thread(target=self.load_buckets_with_high_ops,
                                 name="high_ops_delete",
                                 args=(self.master, GleambookUsers, num_items/10,10000,4, updates_from,1, 0))
        delete_thread = Thread(target=self.delete_buckets_with_high_ops,
                                 name="high_ops_delete",
                                 args=(self.master, GleambookUsers, num_items/10, self.rate_limit, 10000, 2, deletes_from,1))
        delete_thread.start()
        upsert_thread.start()
        
        for i in xrange(query_executors):
            executors.append(QueryRunner(random.choice(queries),num_query,self.cbas_util))
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)
        
        delete_thread.join()
        upsert_thread.join()
        ########################################################################################################################
        self.log.info("Step 30: Verify the docs count.")
        self.validate_items_count()
 
        ########################################################################################################################
        self.log.info("Step 31: Create 10M docs.")
        pool = Executors.newFixedThreadPool(5)
        total_num_items = self.input.param("num_items",1000000)
        num_query = self.input.param("num_query",240)

        self.use_replica_to = False
        self.rate_limit = self.input.param('rate_limit', '100000')
        load_thread = Thread(target=self.load_buckets_with_high_ops,
                                 name="high_ops_load",
                                 args=(self.master, GleambookUsers, total_num_items,50,4, items_start_from,2, 0))
        self.log.info('starting the load thread...')
        load_thread.start()

        for i in xrange(query_executors):
            executors.append(QueryRunner(random.choice(queries),num_query,self.cbas_util))
 
        ###################################################### NEED TO BE UPDATED ##################################################################
        self.log.info("Step 32: When 31 is in progress do a CBAS Rebalance out of 1 nodes.")
        rebalance = self.cluster.async_rebalance(nodes_in_cluster, [], self.cbas_servers[-1:])
        nodes_in_cluster = [node for node in nodes_in_cluster if node not in self.cbas_servers[-1:]]
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)
        load_thread.join()
 
        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items/10
        items_start_from += total_num_items
        #######################################################################################################################
        self.log.info("Step 33: Wait for rebalance to complete.")
        
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        self.sleep(20)
 
        ########################################################################################################################
        self.log.info("Step 34: Verify the docs count.")
        self.validate_items_count()
         
        ########################################################################################################################
        self.log.info("Step 35: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items",5000)
        executors=[]
        query_executors = 1
        num_executors = query_executors

        upsert_thread = Thread(target=self.load_buckets_with_high_ops,
                                 name="high_ops_delete",
                                 args=(self.master, GleambookUsers, num_items/10,10000,4, updates_from,1, 0))
        delete_thread = Thread(target=self.delete_buckets_with_high_ops,
                                 name="high_ops_delete",
                                 args=(self.master, GleambookUsers, num_items/10, self.rate_limit, 10000, 2, deletes_from,1))
        delete_thread.start()
        upsert_thread.start()
        
        for i in xrange(query_executors):
            executors.append(QueryRunner(random.choice(queries),num_query,self.cbas_util))
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)
        
        delete_thread.join()
        upsert_thread.join()
        ########################################################################################################################
        self.log.info("Step 36: Verify the docs count.")
        self.validate_items_count()
         
        ########################################################################################################################
        self.log.info("Step 37: Create 10M docs.")
        pool = Executors.newFixedThreadPool(5)
        total_num_items = self.input.param("num_items",1000000)
        num_query = self.input.param("num_query",240)

        self.use_replica_to = False
        self.rate_limit = self.input.param('rate_limit', '100000')
        load_thread = Thread(target=self.load_buckets_with_high_ops,
                                 name="high_ops_load",
                                 args=(self.master, GleambookUsers, total_num_items,50,4, items_start_from,2, 0))
        self.log.info('starting the load thread...')
        load_thread.start()

        for i in xrange(query_executors):
            executors.append(QueryRunner(random.choice(queries),num_query,self.cbas_util))
         
        ###################################################### NEED TO BE UPDATED ##################################################################
        self.log.info("Step 38: When 37 is in progress do a CBAS CC SWAP Rebalance of 2 nodes.")
        
        for node in self.cbas_servers[-1:]:
            rest = RestConnection(node)
            rest.set_data_path(data_path=node.data_path,index_path=node.index_path,cbas_path=node.cbas_path)
        rebalance = self.cluster.async_rebalance(nodes_in_cluster,self.cbas_servers[-1:], [self.cbas_node],services=["cbas"],check_vbucket_shuffling=False)
        nodes_in_cluster += self.cbas_servers[-1:]
        nodes_in_cluster.remove(self.cbas_node)
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)
        load_thread.join()
        
        ########################################################################################################################
        self.log.info("Step 39: Wait for rebalance to complete.")
        
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        self.sleep(20)
         
        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items/10
        items_start_from += total_num_items
         
        ########################################################################################################################
        self.log.info("Step 40: Verify the docs count.")
        self.validate_items_count()
 
        ########################################################################################################################
        self.log.info("Step 41: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items",5000)
        executors=[]
        query_executors = 1
        num_executors = query_executors

        upsert_thread = Thread(target=self.load_buckets_with_high_ops,
                                 name="high_ops_delete",
                                 args=(self.master, GleambookUsers, num_items/10,10000,4, updates_from,1, 0))
        delete_thread = Thread(target=self.delete_buckets_with_high_ops,
                                 name="high_ops_delete",
                                 args=(self.master, GleambookUsers, num_items/10, self.rate_limit, 10000, 2, deletes_from,1))
        delete_thread.start()
        upsert_thread.start()
        
        for i in xrange(query_executors):
            executors.append(QueryRunner(random.choice(queries),num_query,self.cbas_util))
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)
        
        delete_thread.join()
        upsert_thread.join()
        ########################################################################################################################
        self.log.info("Step 42: Verify the docs count.")
        self.validate_items_count() 
         
        ########################################################################################################################
        self.log.info("Step 43: Create 10M docs.")
        pool = Executors.newFixedThreadPool(5)
        total_num_items = self.input.param("num_items",1000000)
        num_query = self.input.param("num_query",240)

        self.use_replica_to = False
        self.rate_limit = self.input.param('rate_limit', '100000')
        load_thread = Thread(target=self.load_buckets_with_high_ops,
                                 name="high_ops_load",
                                 args=(self.master, GleambookUsers, total_num_items,50,4, items_start_from,2, 0))
        self.log.info('starting the load thread...')
        load_thread.start()

        for i in xrange(query_executors):
            executors.append(QueryRunner(random.choice(queries),num_query,self.cbas_util))
         
        ###################################################### NEED TO BE UPDATED ##################################################################
        self.log.info("Step 44: When 43 is in progress do a KV+CBAS Rebalance IN.")
        rest = RestConnection(self.cbas_node)
        rest.set_data_path(data_path=self.cbas_node.data_path,index_path=self.cbas_node.index_path,cbas_path=self.cbas_node.cbas_path)
        rebalance = self.cluster.async_rebalance(nodes_in_cluster, [self.cbas_node], [],services=["cbas"])
        nodes_in_cluster += [self.cbas_node]
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        rest = RestConnection(self.kv_servers[1])
        rest.set_data_path(data_path=self.kv_servers[1].data_path,index_path=self.kv_servers[1].index_path,cbas_path=self.kv_servers[1].cbas_path)
        rebalance = self.cluster.async_rebalance(nodes_in_cluster, [self.kv_servers[1]], [])
        nodes_in_cluster += [self.kv_servers[1]]
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)
        load_thread.join()
        ########################################################################################################################
        self.log.info("Step 45: Wait for rebalance to complete.")
        
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        self.sleep(20)
         
        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items/10
        items_start_from += total_num_items        
         
        ########################################################################################################################
        self.log.info("Step 46: Verify the docs count.")
        self.validate_items_count() 
         
        ########################################################################################################################
        self.log.info("Step 47: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items",5000)
        executors=[]
        query_executors = 1
        num_executors = query_executors

        upsert_thread = Thread(target=self.load_buckets_with_high_ops,
                                 name="high_ops_delete",
                                 args=(self.master, GleambookUsers, num_items/10,10000,4, updates_from,1, 0))
        delete_thread = Thread(target=self.delete_buckets_with_high_ops,
                                 name="high_ops_delete",
                                 args=(self.master, GleambookUsers, num_items/10, self.rate_limit, 10000, 2, deletes_from,1))
        delete_thread.start()
        upsert_thread.start()
        
        for i in xrange(query_executors):
            executors.append(QueryRunner(random.choice(queries),num_query,self.cbas_util))
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)
        
        delete_thread.join()
        upsert_thread.join()
        ########################################################################################################################
        self.log.info("Step 48: Verify the docs count.")
        self.validate_items_count() 
 
        ########################################################################################################################
        self.log.info("Step 49: Create 10M docs.")
        pool = Executors.newFixedThreadPool(5)
        total_num_items = self.input.param("num_items",1000000)
        num_query = self.input.param("num_query",240)

        self.use_replica_to = False
        self.rate_limit = self.input.param('rate_limit', '100000')
        load_thread = Thread(target=self.load_buckets_with_high_ops,
                                 name="high_ops_load",
                                 args=(self.master, GleambookUsers, total_num_items,50,4, items_start_from,2, 0))
        self.log.info('starting the load thread...')
        load_thread.start()

        for i in xrange(query_executors):
            executors.append(QueryRunner(random.choice(queries),num_query,self.cbas_util))
         
        ########################################################################################################################
        self.log.info("Step 50: When 49 is in progress do a CBAS Rebalance OUT.")
        rest = RestConnection(self.kv_servers[2])
#         rest.set_data_path(data_path=self.kv_servers[2].data_path,index_path=self.kv_servers[2].index_path,cbas_path=self.kv_servers[2].cbas_path)
        rebalance = self.cluster.async_rebalance(nodes_in_cluster, [], self.cbas_servers[-1:])
#         
        nodes_in_cluster = [node for node in nodes_in_cluster if node not in self.cbas_servers[-1:]]
#         nodes_in_cluster.remove(self.kv_servers[1])
        
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)
        load_thread.join()
        ########################################################################################################################
        self.log.info("Step 51: Wait for rebalance to complete.")
        
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        self.sleep(20)
        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items/10
        items_start_from += total_num_items  
 
        ########################################################################################################################
        self.log.info("Step 52: Verify the docs count.")
        self.validate_items_count() 
 
        ########################################################################################################################
        self.log.info("Step 53: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items",5000)
        executors=[]
        query_executors = 1
        num_executors = query_executors

        upsert_thread = Thread(target=self.load_buckets_with_high_ops,
                                 name="high_ops_delete",
                                 args=(self.master, GleambookUsers, num_items/10,10000,4, updates_from,1, 0))
        delete_thread = Thread(target=self.delete_buckets_with_high_ops,
                                 name="high_ops_delete",
                                 args=(self.master, GleambookUsers, num_items/10, self.rate_limit, 10000, 2, deletes_from,1))
        delete_thread.start()
        upsert_thread.start()
        
        for i in xrange(query_executors):
            executors.append(QueryRunner(random.choice(queries),num_query,self.cbas_util))
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)
        
        delete_thread.join()
        upsert_thread.join()
        ########################################################################################################################
        self.log.info("Step 54: Verify the docs count.")
        self.validate_items_count() 
         
         
        ########################################################################################################################
        self.log.info("Step 55: Create 10M docs.")
        pool = Executors.newFixedThreadPool(5)
        total_num_items = self.input.param("num_items",1000000)
        num_query = self.input.param("num_query",240)

        self.use_replica_to = False
        self.rate_limit = self.input.param('rate_limit', '100000')
        load_thread = Thread(target=self.load_buckets_with_high_ops,
                                 name="high_ops_load",
                                 args=(self.master, GleambookUsers, total_num_items,50,4, items_start_from,2, 0))
        self.log.info('starting the load thread...')
        load_thread.start()

        for i in xrange(query_executors):
            executors.append(QueryRunner(random.choice(queries),num_query,self.cbas_util))
         
        ########################################################################################################################
        self.log.info("Step 56: When 55 is in progress do a CBAS Rebalance IN.")
        for node in self.cbas_servers[-1:]:
            rest = RestConnection(node)
            rest.set_data_path(data_path=node.data_path,index_path=node.index_path,cbas_path=node.cbas_path)
        rest = RestConnection(self.cbas_servers[-1])
        rest.set_data_path(data_path=self.cbas_servers[-1].data_path,index_path=self.cbas_servers[-1].index_path,cbas_path=self.cbas_servers[-1].cbas_path)
        rebalance = self.cluster.async_rebalance(nodes_in_cluster, self.cbas_servers[-1:], [], services=["cbas"])
        nodes_in_cluster += self.cbas_servers[-1:]
         
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)
        load_thread.join()
        ########################################################################################################################
        self.log.info("Step 57: Wait for rebalance to complete.")
        
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        self.sleep(20)
         
        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items/10
        items_start_from += total_num_items  
 
        ########################################################################################################################
        self.log.info("Step 58: Verify the docs count.")
        self.validate_items_count() 
         
        ########################################################################################################################
        self.log.info("Step 59: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items",5000)
        executors=[]
        query_executors = 1
        num_executors = query_executors

        upsert_thread = Thread(target=self.load_buckets_with_high_ops,
                                 name="high_ops_delete",
                                 args=(self.master, GleambookUsers, num_items/10,10000,4, updates_from,1, 0))
        delete_thread = Thread(target=self.delete_buckets_with_high_ops,
                                 name="high_ops_delete",
                                 args=(self.master, GleambookUsers, num_items/10, self.rate_limit, 10000, 2, deletes_from,1))
        delete_thread.start()
        upsert_thread.start()
        
        for i in xrange(query_executors):
            executors.append(QueryRunner(random.choice(queries),num_query,self.cbas_util))
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)
        
        delete_thread.join()
        upsert_thread.join()
        ########################################################################################################################
        self.log.info("Step 60: Verify the docs count.")
        self.validate_items_count() 
                 
 
        print "End Time: %s"%str(time.strftime("%H:%M:%S", time.gmtime(time.time())))