def test_volume(self):
        nodes_in_cluster = [self.servers[0]]
        print "Start Time: %s" % str(
            time.strftime("%H:%M:%S", time.gmtime(time.time())))

        ########################################################################################################################
        self.log.info("Add a N1QL/Index nodes")
        self.query_node = self.servers[1]
        rest = RestConnection(self.query_node)
        rest.set_data_path(data_path=self.query_node.data_path,
                           index_path=self.query_node.index_path,
                           cbas_path=self.query_node.cbas_path)
        result = self.add_node(self.query_node, rebalance=False)
        self.assertTrue(result, msg="Failed to add N1QL/Index node.")

        self.log.info("Add a KV nodes")
        result = self.add_node(self.servers[2],
                               services=["kv"],
                               rebalance=True)
        self.assertTrue(result, msg="Failed to add KV node.")

        nodes_in_cluster = nodes_in_cluster + [
            self.servers[1], self.servers[2]
        ]
        ########################################################################################################################
        self.log.info("Step 2: Create Couchbase buckets.")
        self.create_required_buckets()
        for node in nodes_in_cluster:
            NodeHelper.do_a_warm_up(node)
            NodeHelper.wait_service_started(node)
        ########################################################################################################################
        self.log.info(
            "Step 3: Create 10M docs average of 1k docs for 8 couchbase buckets."
        )
        env = DefaultCouchbaseEnvironment.builder().mutationTokensEnabled(
            True).computationPoolSize(5).socketConnectTimeout(
                100000).connectTimeout(100000).maxRequestLifetime(
                    TimeUnit.SECONDS.toMillis(300)).build()
        cluster = CouchbaseCluster.create(env, self.master.ip)
        cluster.authenticate("Administrator", "password")
        bucket = cluster.openBucket("GleambookUsers")

        pool = Executors.newFixedThreadPool(5)
        items_start_from = 0
        total_num_items = self.input.param("num_items", 5000)

        executors = []
        num_executors = 5
        doc_executors = 5
        num_items = total_num_items / num_executors
        for i in xrange(doc_executors):
            executors.append(
                GleambookUser_Docloader(bucket,
                                        num_items,
                                        items_start_from + i * num_items,
                                        batch_size=2000))
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items / 10
        items_start_from += total_num_items
        ########################################################################################################################
        self.sleep(120, "Sleeping after 1st cycle.")
        self.log.info("Step 8: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4

        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, updates_from,
                                    "update"))
        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, deletes_from,
                                    "delete"))
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        ########################################################################################################################
        self.sleep(120, "Sleeping after 2nd cycle.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 5
        num_items = total_num_items / doc_executors

        for i in xrange(doc_executors):
            executors.append(
                GleambookUser_Docloader(bucket,
                                        num_items,
                                        items_start_from + i * num_items,
                                        batch_size=2000))
        rebalance = self.cluster.async_rebalance(nodes_in_cluster,
                                                 [self.servers[3]], [])
        futures = pool.invokeAll(executors)

        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)
        rebalance.get_result()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        bucket.close()
        cluster.disconnect()

        print "End Time: %s" % str(
            time.strftime("%H:%M:%S", time.gmtime(time.time())))
Ejemplo n.º 2
0
    def test_volume(self):
        nodes_in_cluster = [self.servers[0]]
        print "Start Time: %s" % str(
            time.strftime("%H:%M:%S", time.gmtime(time.time())))

        #######################################################################
        self.log.info("Step 1: Add a N1QL/Index nodes")
        self.query_node = self.servers[1]
        rest = RestConnection(self.query_node)
        rest.set_data_path(data_path=self.query_node.data_path,
                           index_path=self.query_node.index_path,
                           cbas_path=self.query_node.cbas_path)
        result = self.add_node(self.query_node, rebalance=False)
        self.assertTrue(result, msg="Failed to add N1QL/Index node.")

        self.log.info("Step 2: Add a KV nodes")
        result = self.add_node(self.servers[2],
                               services=["kv"],
                               rebalance=True)
        self.assertTrue(result, msg="Failed to add KV node.")

        nodes_in_cluster = nodes_in_cluster + [
            self.servers[1], self.servers[2]
        ]

        #######################################################################

        self.log.info("Step 3: Create Couchbase buckets.")
        self.create_required_buckets()

        #######################################################################

        env = DefaultCouchbaseEnvironment.builder().mutationTokensEnabled(
            True).computationPoolSize(5).socketConnectTimeout(
                10000000).connectTimeout(10000000).maxRequestLifetime(
                    TimeUnit.SECONDS.toMillis(1200)).build()

        try:
            System.setProperty("com.couchbase.forceIPv4", "false")
            logger = Logger.getLogger("com.couchbase.client")
            logger.setLevel(Level.SEVERE)
            for h in logger.getParent().getHandlers():
                if isinstance(h, ConsoleHandler):
                    h.setLevel(Level.SEVERE)

            cluster = CouchbaseCluster.create(env, self.master.ip)
            cluster.authenticate("Administrator", "password")
            self.bucket = cluster.openBucket("GleambookUsers")
            self.msg_bucket = cluster.openBucket("GleambookMessages")
        except CouchbaseException:
            print "cannot login from user: %s/%s" % (self.username,
                                                     self.password)
            raise

        self.c = cluster
        self.items_start_from = 0
        self.total_num_items = self.input.param("num_items", 5000)
        self.load_data()

        self.sleep(20, "Sleeping after 4th step.")

        self.validate_items_count()

        self.log.info("Step 4: Add node")
        result = self.add_node(self.servers[3], rebalance=False)
        self.assertTrue(result, msg="Failed to add node.")
        self.log.info("Step 5: Loading %s items" % self.total_num_items)
        self.load_data()

        self.log.info("Step 6: Rebalance Cluster")
        rebalance = self.rebalance()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        nodes_in_cluster = nodes_in_cluster + [self.servers[3]]

        self.log.info("Step 7: Start Verification")
        self.validate_items_count()
        self.check_snap_start_corruption()

        #######################################################################
        self.sleep(20)
        self.log.info("Step 8: Delete/Update docs.")
        self.update_data()

        self.log.info("Step 9: Verifying Data")
        self.validate_items_count()
        self.check_snap_start_corruption()

        #######################################################################
        self.log.info("Step 10: Removing node and Rebalance cluster")
        rebalance = self.cluster.async_rebalance(nodes_in_cluster, [],
                                                 [self.servers[3]])
        nodes_in_cluster.remove(self.servers[3])

        self.log.info("Step 11: Loading %s items" % self.total_num_items)
        self.load_data()

        rebalance.get_result()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        self.log.info("Step 12: Verifying Data")
        self.validate_items_count()
        self.check_snap_start_corruption()

        #######################################################################
        self.sleep(20)
        self.log.info("Step 13: Delete/Update docs.")
        self.update_data()

        self.log.info("Step 14: Verifying Data")
        self.validate_items_count()
        self.check_snap_start_corruption()

        #######################################################################
        self.sleep(20)
        self.log.info("Step 15: Add node")
        result = self.add_node(self.servers[3], rebalance=False)
        nodes_in_cluster = nodes_in_cluster + [self.servers[3]]

        self.log.info("Step 16: Loading %s items" % self.total_num_items)
        self.load_data()

        self.log.info("Step 17: Rebalancing Cluster")
        rebalance = self.cluster.async_rebalance(nodes_in_cluster, [],
                                                 [self.servers[2]])

        rebalance.get_result()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        nodes_in_cluster.remove(self.servers[2])

        self.log.info("Step 18: Verifying Data")
        self.validate_items_count()
        self.check_snap_start_corruption()

        #######################################################################
        self.sleep(20)
        self.log.info("Step 19: Delete/Update docs.")
        self.update_data()

        self.log.info("Step 20: Verifying Data")
        self.validate_items_count()
        self.check_snap_start_corruption()

        #######################################################################
        self.sleep(20)
        self.log.info("Step 21: Add node")
        result = self.add_node(self.servers[2], rebalance=False)

        self.log.info("Step 22: Loading %s items" % self.total_num_items)
        self.load_data()

        self.log.info("Step 23: Rebalancing Cluster")
        rebalance = self.rebalance()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        nodes_in_cluster = nodes_in_cluster + [self.servers[2]]

        self.log.info("Step 24: Verifying Data")
        self.validate_items_count()
        self.check_snap_start_corruption()

        #######################################################################
        self.sleep(20)
        self.log.info("Step 25: Delete/Update docs.")
        self.update_data()

        self.log.info("Step 26: Verifying Data")
        self.validate_items_count()
        self.check_snap_start_corruption()

        #######################################################################
        self.sleep(20)

        self.log.info("Step 27: Add node")
        result = self.add_node(self.servers[4], rebalance=False)

        self.log.info("Step 28: Loading %s items" % self.total_num_items)
        self.load_data()

        self.log.info("Step 29: Rebalancing Cluster")
        rebalance = self.rebalance()
        nodes_in_cluster = nodes_in_cluster + [self.servers[4]]
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        self.log.info("Step 30: Verifying Data")
        self.validate_items_count()
        self.check_snap_start_corruption()

        #######################################################################
        self.sleep(20)
        self.log.info("Step 31: Delete/Update docs.")
        self.update_data()

        self.log.info("Step 32: Verifying Data")
        self.validate_items_count()
        self.check_snap_start_corruption()

        #######################################################################
        self.sleep(20)
        self.log.info("Step 33: Removing node, Rebalancing Cluster")
        rebalance = self.cluster.async_rebalance(nodes_in_cluster, [],
                                                 [self.servers[3]])
        nodes_in_cluster.remove(self.servers[3])

        self.log.info("Step 34: Loading %s items" % self.total_num_items)
        self.load_data()

        rebalance.get_result()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        self.log.info("Step 35: Verifying Data")
        self.validate_items_count()
        self.check_snap_start_corruption()

        #######################################################################
        self.sleep(20)

        self.log.info("Step 36: Adding 3 nodes")
        otp1 = self.add_node(self.servers[5], rebalance=False)
        otp2 = self.add_node(self.servers[6], rebalance=False)
        otp3 = self.add_node(self.servers[7], rebalance=False)

        self.log.info("Step 37: Loading %s items" % self.total_num_items)
        self.load_data()

        self.log.info("Step 38: Rebalancing Cluster")
        rebalance = self.rebalance()
        nodes_in_cluster = nodes_in_cluster + [
            self.servers[5], self.servers[6], self.servers[7]
        ]
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        self.log.info("Step 39: Verifying Data")
        self.validate_items_count()
        self.check_snap_start_corruption()

        #######################################################################
        self.log.info("Step 40: Graceful failover node")
        self.rest.fail_over(otp3.id, graceful=True)
        self.log.info("Step 41: Loading %s items" % self.total_num_items)
        self.load_data()
        self.sleep(10)
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        self.log.info("Step 42: Rebalancing Cluster")
        rebalance = self.rebalance()
        nodes_in_cluster.remove(self.servers[7])
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        #######################################################################
        self.log.info("Step 43: Adding node and rebalancing")
        otp3 = self.add_node(self.servers[7], rebalance=True)
        nodes_in_cluster = nodes_in_cluster + [self.servers[7]]

        #######################################################################

        self.log.info("Step 44: Graceful failover node")
        self.rest.fail_over(otp3.id, graceful=True)
        self.log.info("Step 41: Loading %s items" % self.total_num_items)
        self.load_data()
        self.sleep(10)
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        self.log.info("Step 45: Delta recover node")
        self.rest.set_recovery_type(otp3.id, "delta")

        self.log.info("Step 46: Add node back to cluster")
        self.rest.add_back_node(otp3.id)

        rebalance = self.rebalance()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        self.log.info("Step 47: Graceful failover node")
        self.rest.fail_over(otp2.id, graceful=True)
        self.log.info("Step 48: Loading %s items" % self.total_num_items)
        self.load_data()
        self.sleep(10)
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        self.log.info("Step 49: Delta recover node")
        self.rest.set_recovery_type(otp2.id, "full")

        self.log.info("Step 50: Add node back to cluster")
        self.rest.add_back_node(otp2.id)

        rebalance = self.rebalance()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        self.bucket.close()
        self.msg_bucket.close()
        cluster.disconnect()
Ejemplo n.º 3
0
    def test_ups_volume(self):
        nodes_in_cluster = [self.servers[0]]
        print "Start Time: %s" % str(
            time.strftime("%H:%M:%S", time.gmtime(time.time())))

        ########################################################################################################################
        self.log.info("Add a KV nodes - 2")
        self.query_node = self.servers[1]
        rest = RestConnection(self.servers[1])
        rest.set_data_path(data_path=self.servers[1].data_path,
                           index_path=self.servers[1].index_path,
                           cbas_path=self.servers[1].cbas_path)
        result = self.add_node(self.servers[1], rebalance=False)
        self.assertTrue(result, msg="Failed to add N1QL/Index node.")

        self.log.info("Add a KV nodes - 3")
        rest = RestConnection(self.servers[2])
        rest.set_data_path(data_path=self.kv_servers[1].data_path,
                           index_path=self.kv_servers[1].index_path,
                           cbas_path=self.kv_servers[1].cbas_path)
        result = self.add_node(self.kv_servers[1],
                               services=["kv"],
                               rebalance=False)
        self.assertTrue(result, msg="Failed to add KV node.")

        self.log.info("Add one more KV node")
        rest = RestConnection(self.servers[3])
        rest.set_data_path(data_path=self.kv_servers[3].data_path,
                           index_path=self.kv_servers[3].index_path,
                           cbas_path=self.kv_servers[3].cbas_path)
        result = self.add_node(self.kv_servers[3],
                               services=["kv"],
                               rebalance=False)
        self.assertTrue(result, msg="Failed to add KV node.")

        self.log.info("Add one more KV node")
        rest = RestConnection(self.servers[4])
        rest.set_data_path(data_path=self.kv_servers[4].data_path,
                           index_path=self.kv_servers[4].index_path,
                           cbas_path=self.kv_servers[4].cbas_path)
        result = self.add_node(self.kv_servers[4],
                               services=["kv"],
                               rebalance=False)
        self.assertTrue(result, msg="Failed to add KV node.")

        nodes_in_cluster = nodes_in_cluster + [
            self.servers[1], self.servers[2], self.servers[3], self.servers[4]
        ]
        ########################################################################################################################
        self.log.info("Step 2: Create Couchbase buckets.")
        self.create_required_buckets()

        ########################################################################################################################
        self.log.info(
            "Step 3: Create 10M docs average of 1k docs for 8 couchbase buckets."
        )
        env = DefaultCouchbaseEnvironment.builder().mutationTokensEnabled(
            True).computationPoolSize(5).socketConnectTimeout(
                100000).connectTimeout(100000).maxRequestLifetime(
                    TimeUnit.SECONDS.toMillis(300)).build()
        cluster = CouchbaseCluster.create(env, self.master.ip)
        cluster.authenticate("Administrator", "password")
        bucket = cluster.openBucket("GleambookUsers")
        msg_bucket = cluster.openBucket("GleambookMessages")

        pool = Executors.newFixedThreadPool(5)
        items_start_from = 0
        total_num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 5
        num_items = total_num_items / num_executors
        for i in xrange(doc_executors):
            executors.append(
                GleambookUser_Docloader(bucket, num_items,
                                        items_start_from + i * num_items))
            executors.append(
                GleambookMessages_Docloader(msg_bucket, num_items,
                                            items_start_from + i * num_items))
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items / 10
        items_start_from += total_num_items

        ########################################################################################################################
        self.log.info("Step 6: Verify the items count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 8: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4

        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, updates_from,
                                    "update"))
        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, deletes_from,
                                    "delete"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        updates_from, "update"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        deletes_from, "delete"))
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)
        ########################################################################################################################
        self.log.info("Step 9: Connect cbas buckets.")
        self.connect_cbas_buckets()
        self.sleep(10, "Wait for the ingestion to complete")

        ########################################################################################################################
        self.log.info("Step 10: Verify the items count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info(
            "Step 12: When 11 is in progress do a KV Rebalance in of 1 nodes.")
        rest = RestConnection(self.servers[5])
        rest.set_data_path(data_path=self.servers[5].data_path,
                           index_path=self.servers[5].index_path,
                           cbas_path=self.servers[5].cbas_path)
        rebalance = self.cluster.async_rebalance(nodes_in_cluster,
                                                 [self.servers[5]], [])
        nodes_in_cluster += [self.servers[2]]
        ########################################################################################################################
        self.log.info("Step 11: Create 10M docs.")
        pool = Executors.newFixedThreadPool(5)
        total_num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4
        num_items = total_num_items / doc_executors
        for i in xrange(doc_executors):
            executors.append(
                GleambookUser_Docloader(bucket, num_items,
                                        items_start_from + i * num_items))
            executors.append(
                GleambookMessages_Docloader(msg_bucket, num_items,
                                            items_start_from + i * num_items))
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items / 10
        items_start_from += total_num_items

        ########################################################################################################################
        self.log.info("Step 13: Wait for rebalance to complete.")
        rebalance.get_result()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        self.sleep(20)

        ########################################################################################################################
        self.log.info("Step 14: Verify the items count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 15: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4

        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, updates_from,
                                    "update"))
        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, deletes_from,
                                    "delete"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        updates_from, "update"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        deletes_from, "delete"))

        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)
        ########################################################################################################################
        self.log.info(
            "Step 16: Verify Results that 1M docs gets deleted from analytics datasets."
        )
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 17: Disconnect CBAS buckets.")
        self.disconnect_cbas_buckets()

        ########################################################################################################################
        self.log.info("Step 18: Create 10M docs.")
        pool = Executors.newFixedThreadPool(5)
        total_num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4
        num_items = total_num_items / doc_executors
        for i in xrange(doc_executors):
            executors.append(
                GleambookUser_Docloader(bucket, num_items,
                                        items_start_from + i * num_items))
            executors.append(
                GleambookMessages_Docloader(msg_bucket, num_items,
                                            items_start_from + i * num_items))
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items / 10
        items_start_from += total_num_items

        ########################################################################################################################
        self.log.info("Step 20: Verify the docs count.")
        self.validate_items_count()

        ########################################################################################################################
        pool = Executors.newFixedThreadPool(5)
        executors = []
        num_executors = 5

        self.log.info(
            "Step 22: When 21 is in progress do a KV Rebalance out of 2 nodes."
        )
        rebalance = self.cluster.async_rebalance(nodes_in_cluster, [],
                                                 self.servers[1:3])
        nodes_in_cluster = [
            node for node in nodes_in_cluster if node not in self.servers[1:3]
        ]

        futures = pool.invokeAll(executors)
        self.log.info("Step 23: Wait for rebalance.")
        rebalance.get_result()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        self.sleep(20)

        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        ########################################################################################################################
        self.log.info("Step 24: Create 10M docs.")
        pool = Executors.newFixedThreadPool(5)
        total_num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 6
        doc_executors = 4
        num_items = total_num_items / doc_executors
        for i in xrange(doc_executors):
            executors.append(
                GleambookUser_Docloader(bucket, num_items,
                                        items_start_from + i * num_items))
            executors.append(
                GleambookMessages_Docloader(msg_bucket, num_items,
                                            items_start_from + i * num_items))

        ##################################################### NEED TO BE UPDATED ##################################################################
        self.log.info(
            "Step 25: When 24 is in progress do a KV Rebalance in of 2 nodes.")
        for node in self.servers[1:3]:
            rest = RestConnection(node)
            rest.set_data_path(data_path=node.data_path,
                               index_path=node.index_path,
                               cbas_path=node.cbas_path)
        rebalance = self.cluster.async_rebalance(nodes_in_cluster,
                                                 self.servers[1:3], [])
        nodes_in_cluster = nodes_in_cluster + self.servers[1:3]
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items / 10
        items_start_from += total_num_items

        self.log.info("Step 27: Wait for rebalance to complete.")
        rebalance.get_result()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        ########################################################################################################################
        self.log.info("Step 28: Verify the docs count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 29: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4

        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, updates_from,
                                    "update"))
        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, deletes_from,
                                    "delete"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        updates_from, "update"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        deletes_from, "delete"))

        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)
        ########################################################################################################################
        self.log.info("Step 30: Verify the docs count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 31: Create 10M docs.")
        pool = Executors.newFixedThreadPool(5)
        total_num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4
        num_items = total_num_items / doc_executors
        for i in xrange(doc_executors):
            executors.append(
                GleambookUser_Docloader(bucket, num_items,
                                        items_start_from + i * num_items))
            executors.append(
                GleambookMessages_Docloader(msg_bucket, num_items,
                                            items_start_from + i * num_items))

        ###################################################### NEED TO BE UPDATED ##################################################################
        self.log.info(
            "Step 32: When 31 is in progress do a KV Rebalance out of 2 nodes."
        )
        rebalance = self.cluster.async_rebalance(nodes_in_cluster, [],
                                                 self.servers[1:3])
        nodes_in_cluster = [
            node for node in nodes_in_cluster if node not in self.servers[1:3]
        ]
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items / 10
        items_start_from += total_num_items
        ########################################################################################################################
        self.log.info("Step 33: Wait for rebalance to complete.")
        rebalance.get_result()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        self.sleep(20)

        ########################################################################################################################
        self.log.info("Step 34: Verify the docs count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 35: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4

        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, updates_from,
                                    "update"))
        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, deletes_from,
                                    "delete"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        updates_from, "update"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        deletes_from, "delete"))

        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        ########################################################################################################################
        self.log.info("Step 36: Verify the docs count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 37: Create 10M docs.")
        pool = Executors.newFixedThreadPool(5)
        total_num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4
        num_items = total_num_items / doc_executors
        for i in xrange(doc_executors):
            executors.append(
                GleambookUser_Docloader(bucket, num_items,
                                        items_start_from + i * num_items))
            executors.append(
                GleambookMessages_Docloader(msg_bucket, num_items,
                                            items_start_from + i * num_items))

        ###################################################### NEED TO BE UPDATED ##################################################################
        self.log.info(
            "Step 38: When 37 is in progress do a CBAS SWAP Rebalance of 2 nodes."
        )
        for node in self.cbas_servers[-1:]:
            rest = RestConnection(node)
            rest.set_data_path(data_path=node.data_path,
                               index_path=node.index_path,
                               cbas_path=node.cbas_path)
        rebalance = self.cluster.async_rebalance(nodes_in_cluster,
                                                 self.servers[6],
                                                 [self.servers[5]],
                                                 services=["kv"],
                                                 check_vbucket_shuffling=False)
        nodes_in_cluster += self.servers[6]
        nodes_in_cluster.remove(self.servers[5])
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        ########################################################################################################################
        self.log.info("Step 39: Wait for rebalance to complete.")
        rebalance.get_result()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        self.sleep(20)

        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items / 10
        items_start_from += total_num_items

        ########################################################################################################################
        self.log.info("Step 40: Verify the docs count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 41: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4

        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, updates_from,
                                    "update"))
        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, deletes_from,
                                    "delete"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        updates_from, "update"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        deletes_from, "delete"))

        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        ########################################################################################################################
        self.log.info("Step 42: Verify the docs count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 43: Create 10M docs.")
        pool = Executors.newFixedThreadPool(5)
        total_num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4
        num_items = total_num_items / doc_executors
        for i in xrange(doc_executors):
            executors.append(
                GleambookUser_Docloader(bucket, num_items,
                                        items_start_from + i * num_items))
            executors.append(
                GleambookMessages_Docloader(msg_bucket, num_items,
                                            items_start_from + i * num_items))

        ###################################################### NEED TO BE UPDATED ##################################################################
        self.log.info("Step 44: When 43 is in progress do a KV Rebalance IN.")
        rest = RestConnection(self.servers[5])
        rest.set_data_path(data_path=self.servers[5].data_path,
                           index_path=self.servers[5].index_path,
                           cbas_path=self.servers[5].cbas_path)
        rebalance = self.cluster.async_rebalance(nodes_in_cluster,
                                                 [self.servers[5]], [],
                                                 services=["kv"])
        nodes_in_cluster += [self.servers[5]]
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        ########################################################################################################################
        self.log.info("Step 45: Wait for rebalance to complete.")
        rebalance.get_result()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        self.sleep(20)

        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items / 10
        items_start_from += total_num_items

        ########################################################################################################################
        self.log.info("Step 46: Verify the docs count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 47: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4

        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, updates_from,
                                    "update"))
        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, deletes_from,
                                    "delete"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        updates_from, "update"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        deletes_from, "delete"))

        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        ########################################################################################################################
        self.log.info("Step 48: Verify the docs count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 49: Create 10M docs.")
        pool = Executors.newFixedThreadPool(5)
        total_num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4
        num_items = total_num_items / doc_executors
        for i in xrange(doc_executors):
            executors.append(
                GleambookUser_Docloader(bucket, num_items,
                                        items_start_from + i * num_items))
            executors.append(
                GleambookMessages_Docloader(msg_bucket, num_items,
                                            items_start_from + i * num_items))

        ########################################################################################################################
        self.log.info(
            "Step 50: When 49 is in progress do a KV+CBAS Rebalance OUT.")
        rest = RestConnection(self.servers[6])
        rest.set_data_path(data_path=self.servers[6].data_path,
                           index_path=self.servers[6].index_path,
                           cbas_path=self.kv_servers[6].cbas_path)
        rebalance = self.cluster.async_rebalance(nodes_in_cluster, [],
                                                 [self.servers[6]])
        nodes_in_cluster.remove(self.servers[6])

        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        ########################################################################################################################
        self.log.info("Step 51: Wait for rebalance to complete.")
        rebalance.get_result()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        self.sleep(20)
        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items / 10
        items_start_from += total_num_items

        ########################################################################################################################
        self.log.info("Step 52: Verify the docs count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 53: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4

        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, updates_from,
                                    "update"))
        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, deletes_from,
                                    "delete"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        updates_from, "update"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        deletes_from, "delete"))

        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        ########################################################################################################################
        self.log.info("Step 54: Verify the docs count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 55: Create 10M docs.")
        pool = Executors.newFixedThreadPool(5)
        total_num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4
        num_items = total_num_items / doc_executors
        for i in xrange(doc_executors):
            executors.append(
                GleambookUser_Docloader(bucket, num_items,
                                        items_start_from + i * num_items))
            executors.append(
                GleambookMessages_Docloader(msg_bucket, num_items,
                                            items_start_from + i * num_items))

        ########################################################################################################################
        self.log.info(
            "Step 56: When 55 is in progress do a KV+CBAS SWAP Rebalance .")
        rest = RestConnection(self.servers[7])
        rest.set_data_path(data_path=self.servers[7].data_path,
                           index_path=self.servers[7].index_path,
                           cbas_path=self.servers[7].cbas_path)
        rebalance = self.cluster.async_rebalance(nodes_in_cluster,
                                                 [self.servers[7]],
                                                 [self.servers[6]])
        #         rebalance.get_result()
        nodes_in_cluster.remove(self.servers[6])
        nodes_in_cluster += [self.servers[7]]

        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        ########################################################################################################################
        self.log.info("Step 57: Wait for rebalance to complete.")
        rebalance.get_result()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=240)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")
        self.sleep(20)

        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items / 10
        items_start_from += total_num_items

        ########################################################################################################################
        self.log.info("Step 58: Verify the docs count.")
        self.validate_items_count()

        ########################################################################################################################
        self.log.info("Step 59: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4

        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, updates_from,
                                    "update"))
        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, deletes_from,
                                    "delete"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        updates_from, "update"))
        executors.append(
            GleambookMessages_Docloader(msg_bucket, num_items / 10,
                                        deletes_from, "delete"))

        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        ########################################################################################################################
        self.log.info("Step 60: Verify the docs count.")
        self.validate_items_count()

        bucket.close()
        msg_bucket.close()
        cluster.disconnect()

        print "End Time: %s" % str(
            time.strftime("%H:%M:%S", time.gmtime(time.time())))
Ejemplo n.º 4
0
Java based SDK client interface

@author: riteshagarwal

'''
from com.couchbase.client.java import CouchbaseCluster
from com.couchbase.client.core import CouchbaseException
from java.util.logging import Logger, Level, ConsoleHandler, FileHandler
from com.couchbase.client.java.env import DefaultCouchbaseEnvironment
from com.couchbase.client.core.retry import BestEffortRetryStrategy
from com.couchbase.client.core.env import QueryServiceConfig

env = DefaultCouchbaseEnvironment.builder().maxRequestLifetime(
    300000).queryServiceConfig(QueryServiceConfig.create(
        2, 100)).mutationTokensEnabled(True).computationPoolSize(
            5).socketConnectTimeout(100000).keepAliveTimeout(
                100000).keepAliveInterval(100000).connectTimeout(
                    100000).autoreleaseAfter(10000).retryStrategy(
                        BestEffortRetryStrategy.INSTANCE).build()


class SDKClient(object):
    """Java SDK Client Implementation for testrunner - master branch Implementation"""
    def __init__(self, server_ip, username, password):
        self.ip = server_ip
        self.username = username
        self.password = password
        self.cluster = None
        self.clusterManager = None

    def connectCluster(self, username=None, password=None):
Ejemplo n.º 5
0
'''
Created on Nov 6, 2017
Java based SDK client interface

@author: riteshagarwal

'''
from com.couchbase.client.java import CouchbaseCluster
from com.couchbase.client.core import CouchbaseException
from java.util.logging import Logger, Level, ConsoleHandler
from com.couchbase.client.java.env import DefaultCouchbaseEnvironment
from java.util.concurrent import TimeUnit
from java.lang import System

env = DefaultCouchbaseEnvironment.builder().mutationTokensEnabled(
    True).computationPoolSize(5).maxRequestLifetime(
        TimeUnit.SECONDS.toMillis(300)).socketConnectTimeout(
            100000).connectTimeout(100000).build()


class SDKClient(object):
    """Java SDK Client Implementation for testrunner - master branch Implementation"""
    def __init__(self, server):
        self.server = server
        self.username = self.server.rest_username
        self.password = self.server.rest_password
        self.cluster = None
        self.clusterManager = None

    def __del__(self):
        self.disconnectCluster()