Esempio n. 1
0
    def test_restart_cb(self):
        '''
        Description: This test will restart CB and verify that CBAS is also up and running with CB.
        
        Steps:
        1. Add first cbas node.
        2. Start rebalance, wait for rebalance complete.
        3. Stop Couchbase service, Start Couchbase Service. Wait for service to get started.
        4. Verify that CBAS service is also up Create bucket, datasets, connect bucket. Data ingestion should start.
        
        Author: Ritesh Agarwal
        '''
        self.load_sample_buckets(bucketName=self.cb_bucket_name,
                                 total_items=self.travel_sample_docs_count)
        self.add_node(self.cbas_servers[0], services=["cbas"])

        NodeHelper.stop_couchbase(self.cbas_servers[0])
        NodeHelper.start_couchbase(self.cbas_servers[0])

        self.log.info("Wait for cluster to be active")
        self.assertTrue(self.cbas_util.wait_for_cbas_to_recover(),
                        msg="Analytics service unavailable")

        self.setup_cbas_bucket_dataset_connect(self.cb_bucket_name,
                                               self.travel_sample_docs_count)
        self.assertTrue(
            self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, self.travel_sample_docs_count),
            "Data loss in CBAS.")
Esempio n. 2
0
    def test_restart_of_all_nodes(self):

        self.log.info("Add nodes, create cbas bucket and dataset")
        self.set_up_test()

        self.log.info("Wait for ingestion to complete and verify count")
        self.cbas_util.wait_for_ingestion_complete([self.dataset_name],
                                                   self.num_items)
        self.assertTrue(
            self.cbas_util.validate_cbas_dataset_items_count(
                self.dataset_name, self.num_items))

        self.log.info("Restart nodes")
        restart_kv = self.input.param("restart_kv", True)
        restart_cbas = self.input.param("restart_cbas", True)
        self.restart_servers = []

        if restart_kv:
            for kv_server in self.kv_servers:
                self.restart_servers.append(kv_server)
        if restart_cbas:
            self.restart_servers.append(self.cbas_node)
            for cbas_server in self.cbas_servers:
                self.restart_servers.append(cbas_server)

        for restart_node in self.restart_servers:
            NodeHelper.reboot_server_new(restart_node, self)
        self.sleep(15, message="Wait for service to be up and accept request")

        self.log.info("Add more documents in the default bucket")
        self.perform_doc_ops_in_all_cb_buckets(self.num_items,
                                               "create",
                                               self.num_items,
                                               self.num_items * 2,
                                               exp=0,
                                               batch_size=self.batch_size)

        self.log.info("Wait for ingestion to complete and verify count")
        self.cbas_util.wait_for_ingestion_complete([self.dataset_name],
                                                   self.num_items * 2)
        self.assertTrue(
            self.cbas_util.validate_cbas_dataset_items_count(
                self.dataset_name, self.num_items * 2))

        self.log.info("Delete documents in the default bucket")
        self.perform_doc_ops_in_all_cb_buckets(self.num_items,
                                               "delete",
                                               0,
                                               self.num_items,
                                               exp=0,
                                               batch_size=self.batch_size)

        self.log.info("Wait for ingestion to complete and verify count")
        self.cbas_util.wait_for_ingestion_complete([self.dataset_name],
                                                   self.num_items)
        self.assertTrue(
            self.cbas_util.validate_cbas_dataset_items_count(
                self.dataset_name, self.num_items))
Esempio n. 3
0
    def test_restart_kv_server_impact_on_bucket(self):

        self.log.info(
            'Add documents, create CBAS buckets, dataset and validate count')
        self.setup_for_test()

        self.log.info('Restart couchbase')
        NodeHelper.reboot_server_new(self.master, self)

        self.log.info('Validate document count')
        count_n1ql = self.rest.query_tool(
            'select count(*) from %s' %
            (self.cb_bucket_name))['results'][0]['$1']
        self.cbas_util.validate_cbas_dataset_items_count(
            self.cbas_dataset_name, count_n1ql)
Esempio n. 4
0
 def test_reboot_cbas(self):
     '''
     Description: This test will add the second cbas node then start rebalance and cancel rebalance
     before rebalance completes.
     
     Steps:
     1. Add first cbas node.
     2. Start rebalance, wait for rebalance complete.
     3. Create bucket, datasets, connect bucket. Data ingestion should start.
     4. Reboot CBAS node addd in Step 1.
     5. After reboot cbas node should be able to serve queries, validate items count.
     
     Author: Ritesh Agarwal
     '''
     self.load_sample_buckets(bucketName=self.cb_bucket_name, total_items=self.travel_sample_docs_count)
     self.add_node(self.cbas_node, services=["kv","cbas"])
     self.setup_cbas_bucket_dataset_connect(self.cb_bucket_name, self.travel_sample_docs_count)
     
     NodeHelper.reboot_server_new(self.cbas_node, self)
     
     self.assertTrue(self.cbas_util.validate_cbas_dataset_items_count(self.cbas_dataset_name, self.travel_sample_docs_count),"Data loss in CBAS.")
Esempio n. 5
0
    def test_primary_cbas_shutdown(self):
        '''
        Description: This test will add the second cbas node then start rebalance and cancel rebalance
        before rebalance completes.
        
        Steps:
        1. Add first cbas node.
        2. Start rebalance, wait for rebalance complete.
        3. Create bucket, datasets, connect bucket. Data ingestion should start.
        4. Add another cbas node, rebalance.
        5. Stop Couchbase service for Node1 added in step 1. Failover the node and rebalance.
        6. Second cbas node added in step 4 should be able to serve queries.
        
        Author: Ritesh Agarwal
        '''
        self.load_sample_buckets(bucketName=self.cb_bucket_name,
                                 total_items=self.travel_sample_docs_count)
        otpNode = self.add_node(self.cbas_servers[0], services=["cbas"])
        self.setup_cbas_bucket_dataset_connect(self.cb_bucket_name,
                                               self.travel_sample_docs_count)
        self.add_node(self.cbas_servers[1], services=["cbas"])
        NodeHelper.stop_couchbase(self.cbas_servers[0])
        self.rest.fail_over(otpNode=otpNode.id)
        self.rebalance()

        query = "select count(*) from {0};".format(self.cbas_dataset_name)
        self.cbas_util._run_concurrent_queries(
            query,
            "immediate",
            100,
            rest=RestConnection(self.cbas_servers[1]),
            batch_size=self.concurrent_batch_size)
        NodeHelper.start_couchbase(self.cbas_servers[0])
        NodeHelper.wait_service_started(self.cbas_servers[0])
 def test_reboot_nodes(self):
     #Test for reboot CC and reboot all nodes.
     self.setup_for_test(skip_data_loading=True)
     self.ingestion_in_progress()
     self.node_type = self.input.param('node_type','CC')
     
     replica_nodes_before_reboot = self.cbas_util.get_replicas_info(self.shell)
     replicas_before_reboot=len(self.cbas_util.get_replicas_info(self.shell))
     if self.node_type == "CC":
         NodeHelper.reboot_server(self.cbas_node, self)
     elif self.node_type == "NC":
         for server in self.cbas_servers:
             NodeHelper.reboot_server(server, self)
     else:
         NodeHelper.reboot_server(self.cbas_node, self)
         for server in self.cbas_servers:
             NodeHelper.reboot_server(server, self)
     
     self.sleep(60)
     replica_nodes_after_reboot = self.cbas_util.get_replicas_info(self.shell)
     replicas_after_reboot=len(replica_nodes_after_reboot)
     
     self.assertTrue(replica_nodes_after_reboot == replica_nodes_before_reboot,
                     "Replica nodes changed after reboot. Before: %s , After : %s"
                     %(replica_nodes_before_reboot,replica_nodes_after_reboot))
     self.assertTrue(replicas_after_reboot == replicas_before_reboot,
                     "Number of Replica nodes changed after reboot. Before: %s , After : %s"
                     %(replicas_before_reboot,replicas_after_reboot))
     
     items_in_cbas_bucket = 0
     start_time=time.time()
     while (items_in_cbas_bucket == 0 or items_in_cbas_bucket == -1) and time.time()<start_time+60:
         try:
             items_in_cbas_bucket, _ = self.cbas_util.get_num_items_in_cbas_dataset(self.cbas_dataset_name)
         except:
             pass
         self.sleep(1)
            
     query = "select count(*) from {0};".format(self.cbas_dataset_name)
     self.cbas_util._run_concurrent_queries(query,"immediate",100)
     
     if not self.cbas_util.validate_cbas_dataset_items_count(self.cbas_dataset_name,self.num_items*2):
         self.fail("No. of items in CBAS dataset do not match that in the CB bucket")
     
     for replica in replica_nodes_after_reboot:
         self.log.info("replica state during rebalance: %s"%replica['status'])
         self.assertEqual(replica['status'], "IN_SYNC","Replica state is incorrect: %s"%replica['status'])
     self.ingest_more_data()
    def test_volume(self):
        nodes_in_cluster = [self.servers[0]]
        print "Start Time: %s" % str(
            time.strftime("%H:%M:%S", time.gmtime(time.time())))

        ########################################################################################################################
        self.log.info("Add a N1QL/Index nodes")
        self.query_node = self.servers[1]
        rest = RestConnection(self.query_node)
        rest.set_data_path(data_path=self.query_node.data_path,
                           index_path=self.query_node.index_path,
                           cbas_path=self.query_node.cbas_path)
        result = self.add_node(self.query_node, rebalance=False)
        self.assertTrue(result, msg="Failed to add N1QL/Index node.")

        self.log.info("Add a KV nodes")
        result = self.add_node(self.servers[2],
                               services=["kv"],
                               rebalance=True)
        self.assertTrue(result, msg="Failed to add KV node.")

        nodes_in_cluster = nodes_in_cluster + [
            self.servers[1], self.servers[2]
        ]
        ########################################################################################################################
        self.log.info("Step 2: Create Couchbase buckets.")
        self.create_required_buckets()
        for node in nodes_in_cluster:
            NodeHelper.do_a_warm_up(node)
            NodeHelper.wait_service_started(node)
        ########################################################################################################################
        self.log.info(
            "Step 3: Create 10M docs average of 1k docs for 8 couchbase buckets."
        )
        env = DefaultCouchbaseEnvironment.builder().mutationTokensEnabled(
            True).computationPoolSize(5).socketConnectTimeout(
                100000).connectTimeout(100000).maxRequestLifetime(
                    TimeUnit.SECONDS.toMillis(300)).build()
        cluster = CouchbaseCluster.create(env, self.master.ip)
        cluster.authenticate("Administrator", "password")
        bucket = cluster.openBucket("GleambookUsers")

        pool = Executors.newFixedThreadPool(5)
        items_start_from = 0
        total_num_items = self.input.param("num_items", 5000)

        executors = []
        num_executors = 5
        doc_executors = 5
        num_items = total_num_items / num_executors
        for i in xrange(doc_executors):
            executors.append(
                GleambookUser_Docloader(bucket,
                                        num_items,
                                        items_start_from + i * num_items,
                                        batch_size=2000))
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items / 10
        items_start_from += total_num_items
        ########################################################################################################################
        self.sleep(120, "Sleeping after 1st cycle.")
        self.log.info("Step 8: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4

        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, updates_from,
                                    "update"))
        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, deletes_from,
                                    "delete"))
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        ########################################################################################################################
        self.sleep(120, "Sleeping after 2nd cycle.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 5
        num_items = total_num_items / doc_executors

        for i in xrange(doc_executors):
            executors.append(
                GleambookUser_Docloader(bucket,
                                        num_items,
                                        items_start_from + i * num_items,
                                        batch_size=2000))
        rebalance = self.cluster.async_rebalance(nodes_in_cluster,
                                                 [self.servers[3]], [])
        futures = pool.invokeAll(executors)

        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)
        rebalance.get_result()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        bucket.close()
        cluster.disconnect()

        print "End Time: %s" % str(
            time.strftime("%H:%M:%S", time.gmtime(time.time())))
Esempio n. 8
0
    def test_logging_configurations_are_restored_post_service_restarts(self):

        self.log.info("Add a cbas node")
        result = self.add_node(self.cbas_servers[0],
                               services=["cbas"],
                               rebalance=True)
        self.assertTrue(result, msg="Failed to add CBAS node")

        self.log.info("Delete all loggers")
        self.cbas_util.delete_all_loggers_on_cbas()

        self.log.info("Set the logging level using the json object")
        status, content, response = self.cbas_util.set_log_level_on_cbas(
            CbasLogging.DEFAULT_LOGGER_CONFIG_DICT)
        self.assertTrue(status,
                        msg="Response status incorrect for SET request")

        self.log.info("Delete specific logger")
        logger_name = self.input.param("logger_name_to_delete",
                                       "com.couchbase.client.core.node")
        status, content, response = self.cbas_util.delete_specific_cbas_log_level(
            logger_name)
        self.assertTrue(status, msg="Status mismatch for DELETE")
        del CbasLogging.DEFAULT_LOGGER_CONFIG_DICT[logger_name]

        self.log.info("Update specific logger")
        logger_name = self.input.param("logger_name_to_update",
                                       "org.apache.hyracks")
        logger_level_to_update = self.input.param("logger_level_to_update",
                                                  "FATAL")
        status, response, content = self.cbas_util.set_specific_log_level_on_cbas(
            logger_name, logger_level_to_update)
        self.assertTrue(status, msg="Status mismatch for SET")
        CbasLogging.DEFAULT_LOGGER_CONFIG_DICT[
            logger_name] = logger_level_to_update

        self.log.info("Add a new logger")
        logger_name = self.input.param("logger_name_to_add",
                                       "org.apache.hyracks123")
        logger_level_to_add = self.input.param("logger_level_to_add", "ALL")
        status, response, content = self.cbas_util.set_specific_log_level_on_cbas(
            logger_name, logger_level_to_add)
        self.assertTrue(status, msg="Status mismatch for SET")
        CbasLogging.DEFAULT_LOGGER_CONFIG_DICT[
            logger_name] = logger_level_to_add

        self.log.info("Verify logging configuration that we set on cbas Node")
        for name, level in CbasLogging.DEFAULT_LOGGER_CONFIG_DICT.items():
            status, content, response = self.cbas_util.get_specific_cbas_log_level(
                name)
            self.assertTrue(status,
                            msg="Response status incorrect for GET request")
            self.assertEquals(content,
                              level,
                              msg="Logger configuration mismatch for logger " +
                              name)

        self.sleep(
            timeout=10,
            message=
            "Waiting for logger configuration to be copied across cbas nodes")

        self.log.info("Verify logging configuration on other cbas node")
        for name, level in CbasLogging.DEFAULT_LOGGER_CONFIG_DICT.items():
            status, content, response = cbas_utils(
                self.master,
                self.cbas_servers[0]).get_specific_cbas_log_level(name)
            self.assertTrue(status,
                            msg="Response status incorrect for GET request")
            self.assertEquals(content,
                              level,
                              msg="Logger configuration mismatch for logger " +
                              name)

        self.log.info("Read input params")
        process_name = self.input.param('process_name', None)
        service_name = self.input.param('service_name', None)
        restart_couchbase = self.input.param('restart_couchbase', False)
        reboot = self.input.param('reboot', False)
        kill_services = self.input.param('kill_services', False)

        self.log.info("Establish a remote connection")
        shell_cc = RemoteMachineShellConnection(self.cbas_node)
        shell_nc = RemoteMachineShellConnection(self.cbas_servers[0])

        if kill_services:
            self.log.info("Kill the %s service on CC cbas node" % service_name)
            shell_cc.kill_process(process_name, service_name)

            self.log.info("Kill the %s service on other cbas node" %
                          service_name)
            shell_nc.kill_process(process_name, service_name)

        if restart_couchbase:
            self.log.info("Restart couchbase CC node ")
            shell_cc.restart_couchbase()

            self.log.info("Restart couchbase NC node ")
            shell_nc.restart_couchbase()

        if reboot:
            self.log.info("Reboot couchbase CC node")
            NodeHelper.reboot_server(self.cbas_node, self)

            self.log.info("Reboot couchbase NC node")
            NodeHelper.reboot_server(self.cbas_servers[0], self)

        end_time = datetime.datetime.now() + datetime.timedelta(minutes=int(1))
        self.log.info(
            "Wait for nodes to be bootstrapped, neglect the unreachable server exceptions"
        )
        while datetime.datetime.now() < end_time:
            try:
                self.log.info("Get the logging configurations")
                status, content, response = self.cbas_util.get_log_level_on_cbas(
                )
                self.assertTrue(
                    status, msg="Response status incorrect for GET request")

                self.log.info("Convert response to a dictionary")
                log_dict = CbasLogging.convert_logger_get_result_to_a_dict(
                    content)
                if len(log_dict) >= len(
                        CbasLogging.DEFAULT_LOGGER_CONFIG_DICT):
                    break
            except Exception as e:
                pass

        self.log.info("Verify logging configuration post service kill")
        for name, level in CbasLogging.DEFAULT_LOGGER_CONFIG_DICT.items():
            status, content, response = self.cbas_util.get_specific_cbas_log_level(
                name)
            self.assertTrue(status,
                            msg="Response status incorrect for GET request")
            self.assertEquals(content,
                              level,
                              msg="Logger configuration mismatch for logger " +
                              name)

        self.sleep(
            timeout=10,
            message=
            "Waiting for logger configuration to be copied across cbas nodes")

        self.log.info(
            "Verify logging configuration on other cbas node post service kill"
        )
        for name, level in CbasLogging.DEFAULT_LOGGER_CONFIG_DICT.items():
            status, content, response = cbas_utils(
                self.master,
                self.cbas_servers[0]).get_specific_cbas_log_level(name)
            self.assertTrue(status,
                            msg="Response status incorrect for GET request")
            self.assertEquals(content,
                              level,
                              msg="Logger configuration mismatch for logger " +
                              name)
Esempio n. 9
0
    def test_logging_configurations_are_restored_post_service_restarts(self):

        self.log.info("Add a cbas node")
        result = self.add_node(self.cbas_servers[0],
                               services=["cbas"],
                               rebalance=True)
        self.assertTrue(result, msg="Failed to add CBAS node")

        self.log.info("Delete all loggers")
        self.cbas_util.delete_all_loggers_on_cbas()

        self.log.info("Set the logging level using the json object")
        status, content, response = self.cbas_util.set_log_level_on_cbas(
            CbasLogging.DEFAULT_LOGGER_CONFIG_DICT)
        self.assertTrue(status,
                        msg="Response status incorrect for SET request")

        self.log.info("Delete specific logger")
        logger_name = self.input.param("logger_name_to_delete",
                                       "com.couchbase.client.core.node")
        status, content, response = self.cbas_util.delete_specific_cbas_log_level(
            logger_name)
        self.assertTrue(status, msg="Status mismatch for DELETE")
        del CbasLogging.DEFAULT_LOGGER_CONFIG_DICT[logger_name]

        self.log.info("Update specific logger")
        logger_name = self.input.param("logger_name_to_update",
                                       "org.apache.hyracks")
        logger_level_to_update = self.input.param("logger_level_to_update",
                                                  "FATAL")
        status, response, content = self.cbas_util.set_specific_log_level_on_cbas(
            logger_name, logger_level_to_update)
        self.assertTrue(status, msg="Status mismatch for SET")
        CbasLogging.DEFAULT_LOGGER_CONFIG_DICT[
            logger_name] = logger_level_to_update

        self.log.info("Add a new logger")
        logger_name = self.input.param("logger_name_to_add",
                                       "org.apache.hyracks123")
        logger_level_to_add = self.input.param("logger_level_to_add", "ALL")
        status, response, content = self.cbas_util.set_specific_log_level_on_cbas(
            logger_name, logger_level_to_add)
        self.assertTrue(status, msg="Status mismatch for SET")
        CbasLogging.DEFAULT_LOGGER_CONFIG_DICT[
            logger_name] = logger_level_to_add

        self.log.info("Verify logging configuration that we set on cbas Node")
        for name, level in CbasLogging.DEFAULT_LOGGER_CONFIG_DICT.items():
            status, content, response = self.cbas_util.get_specific_cbas_log_level(
                name)
            self.assertTrue(status,
                            msg="Response status incorrect for GET request")
            self.assertEquals(content,
                              level,
                              msg="Logger configuration mismatch for logger " +
                              name)

        self.sleep(
            timeout=10,
            message=
            "Waiting for logger configuration to be copied across cbas nodes")

        self.log.info("Verify logging configuration on other cbas node")
        for name, level in CbasLogging.DEFAULT_LOGGER_CONFIG_DICT.items():
            status, content, response = cbas_utils(
                self.master,
                self.cbas_servers[0]).get_specific_cbas_log_level(name)
            self.assertTrue(status,
                            msg="Response status incorrect for GET request")
            self.assertEquals(content,
                              level,
                              msg="Logger configuration mismatch for logger " +
                              name)

        self.log.info("Read input params")
        process_name = self.input.param('process_name', None)
        service_name = self.input.param('service_name', None)
        restart_couchbase = self.input.param('restart_couchbase', False)
        reboot = self.input.param('reboot', False)
        kill_services = self.input.param('kill_services', False)

        self.log.info("Establish a remote connection")
        shell_cc = RemoteMachineShellConnection(self.cbas_node)
        shell_nc = RemoteMachineShellConnection(self.cbas_servers[0])

        if kill_services:
            self.log.info("Kill the %s service on CC cbas node" % service_name)
            shell_cc.kill_process(process_name, service_name)

            self.log.info("Kill the %s service on other cbas node" %
                          service_name)
            shell_nc.kill_process(process_name, service_name)

        if restart_couchbase:
            self.log.info("Restart couchbase CC node ")
            shell_cc.restart_couchbase()

            self.log.info("Restart couchbase NC node ")
            shell_nc.restart_couchbase()

        if reboot:
            self.log.info("Reboot couchbase CC node")
            NodeHelper.reboot_server(self.cbas_node, self)

            self.log.info("Reboot couchbase NC node")
            NodeHelper.reboot_server(self.cbas_servers[0], self)

        self.log.info(
            "Wait for request to complete and cluster to be active: Using private ping() function"
        )
        cluster_recover_start_time = time.time()
        while time.time() < cluster_recover_start_time + 180:
            try:
                status, metrics, _, cbas_result, _ = self.cbas_util.execute_statement_on_cbas_util(
                    "set `import-private-functions` `true`;ping();")
                if status == "success":
                    break
            except:
                self.sleep(2, message="Wait for service to up again")

        self.log.info("Verify logging configuration post service kill")
        for name, level in CbasLogging.DEFAULT_LOGGER_CONFIG_DICT.items():
            status, content, response = self.cbas_util.get_specific_cbas_log_level(
                name)
            self.assertTrue(status,
                            msg="Response status incorrect for GET request")
            self.assertEquals(content,
                              level,
                              msg="Logger configuration mismatch for logger " +
                              name)

        self.sleep(
            timeout=10,
            message=
            "Waiting for logger configuration to be copied across cbas nodes")

        self.log.info(
            "Verify logging configuration on other cbas node post service kill"
        )
        for name, level in CbasLogging.DEFAULT_LOGGER_CONFIG_DICT.items():
            status, content, response = cbas_utils(
                self.master,
                self.cbas_servers[0]).get_specific_cbas_log_level(name)
            self.assertTrue(status,
                            msg="Response status incorrect for GET request")
            self.assertEquals(content,
                              level,
                              msg="Logger configuration mismatch for logger " +
                              name)
Esempio n. 10
0
    def test_service_restart(self):
        self.setup_for_test()
        self.restart_method = self.input.param('restart_method', None)
        self.cbas_node_type = self.input.param('cbas_node_type', None)

        query = "select sleep(count(*),50000) from {0};".format(
            self.cbas_dataset_name)
        handles = self.cbas_util._run_concurrent_queries(query, "async", 10)
        self.ingestion_in_progress()

        if self.cbas_node_type == "CC":
            node_in_test = self.cbas_node
        else:
            node_in_test = self.cbas_servers[0]

        items_in_cbas_bucket, _ = self.cbas_util.get_num_items_in_cbas_dataset(
            self.cbas_dataset_name)
        self.log.info("Items before service restart: %s" %
                      items_in_cbas_bucket)

        if self.restart_method == "graceful":
            self.log.info("Gracefully re-starting service on node %s" %
                          node_in_test)
            NodeHelper.do_a_warm_up(node_in_test)
            NodeHelper.wait_service_started(node_in_test)
        else:
            self.log.info("Kill Memcached process on node %s" % node_in_test)
            shell = RemoteMachineShellConnection(node_in_test)
            shell.kill_memcached()

        items_in_cbas_bucket = 0
        start_time = time.time()
        while (items_in_cbas_bucket == 0 or items_in_cbas_bucket
               == -1) and time.time() < start_time + 60:
            try:
                items_in_cbas_bucket, _ = self.cbas_util.get_num_items_in_cbas_dataset(
                    self.cbas_dataset_name)
            except:
                pass

        self.log.info(
            "After graceful service restart docs in CBAS bucket : %s" %
            items_in_cbas_bucket)
        if items_in_cbas_bucket < self.num_items * 3 and items_in_cbas_bucket > self.num_items:
            self.log.info("Data Ingestion Interrupted successfully")
        elif items_in_cbas_bucket < self.num_items:
            self.log.info(
                "Data Ingestion did interrupted and restarting from 0.")
        else:
            self.log.info(
                "Data Ingestion did not interrupted but complete before service restart."
            )

        run_count = 0
        fail_count = 0
        success_count = 0
        aborted_count = 0
        shell = RemoteMachineShellConnection(node_in_test)
        for handle in handles:
            status, hand = self.cbas_util.retrieve_request_status_using_handle(
                node_in_test, handle, shell)
            if status == "running":
                run_count += 1
                self.log.info("query with handle %s is running." % handle)
            elif status == "failed":
                fail_count += 1
                self.log.info("query with handle %s is failed." % handle)
            elif status == "success":
                success_count += 1
                self.log.info("query with handle %s is successful." % handle)
            else:
                aborted_count += 1
                self.log.info("Queued job is deleted: %s" % status)

        self.log.info("After service restart %s queued jobs are Running." %
                      run_count)
        self.log.info("After service restart %s queued jobs are Failed." %
                      fail_count)
        self.log.info("After service restart %s queued jobs are Successful." %
                      success_count)
        self.log.info("After service restart %s queued jobs are Aborted." %
                      aborted_count)

        if self.cbas_node_type == "NC":
            self.assertTrue(fail_count + aborted_count == 0,
                            "Some queries failed/aborted")

        query = "select count(*) from {0};".format(self.cbas_dataset_name)
        self.cbas_util._run_concurrent_queries(query, "immediate", 100)

        count_n1ql = self.rest.query_tool(
            'select count(*) from `%s`' %
            (self.cb_bucket_name))['results'][0]['$1']
        if not self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, count_n1ql):
            self.fail(
                "No. of items in CBAS dataset do not match that in the CB bucket"
            )
Esempio n. 11
0
    def test_network_hardening(self):
        self.setup_for_test()

        end = self.num_items
        CC = self.cbas_node
        NC = self.cbas_servers
        KV = self.master
        nodes = [CC] + NC

        for node in nodes:
            for i in xrange(2):
                NodeHelper.enable_firewall(node)

                start = end
                end = start + self.num_items
                tasks = self.perform_doc_ops_in_all_cb_buckets(self.num_items,
                                                               "create",
                                                               start,
                                                               end,
                                                               batch_size=1000,
                                                               _async=True)

                self.sleep(
                    30,
                    "Sleep after enabling firewall on node %s then disbale it."
                    % node.ip)
                NodeHelper.disable_firewall(node)

                items_in_cbas_bucket = 0
                start_time = time.time()
                while (items_in_cbas_bucket == 0 or items_in_cbas_bucket
                       == -1) and time.time() < start_time + 60:
                    try:
                        items_in_cbas_bucket, _ = self.cbas_util.get_num_items_in_cbas_dataset(
                            self.cbas_dataset_name)
                    except:
                        pass
                self.log.info("Items after network is up: %s" %
                              items_in_cbas_bucket)

                if items_in_cbas_bucket < end and items_in_cbas_bucket > start:
                    self.log.info("Data Ingestion Interrupted successfully")
                elif items_in_cbas_bucket < start:
                    self.log.info(
                        "Data Ingestion did not interrupted but restarting from 0."
                    )
                else:
                    self.log.info(
                        "Data Ingestion did not interrupted but complete before service restart."
                    )

                query = "select count(*) from {0};".format(
                    self.cbas_dataset_name)
                self.cbas_util._run_concurrent_queries(query, "immediate", 100)

                for task in tasks:
                    task.get_result()

                if not self.cbas_util.validate_cbas_dataset_items_count(
                        self.cbas_dataset_name, end):
                    self.fail(
                        "No. of items in CBAS dataset do not match that in the CB bucket"
                    )

                NodeHelper.enable_firewall(node, bidirectional=True)

                start = end
                end = start + self.num_items
                tasks = self.perform_doc_ops_in_all_cb_buckets(self.num_items,
                                                               "create",
                                                               start,
                                                               end,
                                                               batch_size=1000,
                                                               _async=True)

                self.sleep(
                    30,
                    "Sleep after enabling firewall on CC node then disbale it."
                )
                NodeHelper.disable_firewall(node)

                items_in_cbas_bucket = 0
                start_time = time.time()
                while (items_in_cbas_bucket == 0 or items_in_cbas_bucket
                       == -1) and time.time() < start_time + 60:
                    try:
                        items_in_cbas_bucket, _ = self.cbas_util.get_num_items_in_cbas_dataset(
                            self.cbas_dataset_name)
                    except:
                        pass
                self.log.info("Items after network is up: %s" %
                              items_in_cbas_bucket)

                if items_in_cbas_bucket < end and items_in_cbas_bucket > start:
                    self.log.info("Data Ingestion Interrupted successfully")
                elif items_in_cbas_bucket < start:
                    self.log.info(
                        "Data Ingestion did not interrupted but restarting from 0."
                    )
                else:
                    self.log.info(
                        "Data Ingestion did not interrupted but complete before service restart."
                    )

                query = "select count(*) from {0};".format(
                    self.cbas_dataset_name)
                self.cbas_util._run_concurrent_queries(query, "immediate", 100)

                for task in tasks:
                    task.get_result()

                if not self.cbas_util.validate_cbas_dataset_items_count(
                        self.cbas_dataset_name, end):
                    self.fail(
                        "No. of items in CBAS dataset do not match that in the CB bucket"
                    )
Esempio n. 12
0
    def test_stop_start_service_ingest_data(self):
        self.setup_for_test()
        self.cbas_node_type = self.input.param('cbas_node_type', None)

        query = "select sleep(count(*),50000) from {0};".format(
            self.cbas_dataset_name)
        handles = self.cbas_util._run_concurrent_queries(query, "async", 10)
        self.ingestion_in_progress()

        if self.cbas_node_type == "CC":
            node_in_test = self.cbas_node
            self.cbas_util.closeConn()
            self.cbas_util = cbas_utils(self.master, self.cbas_servers[0])
            self.cbas_util.createConn("default")
        else:
            node_in_test = self.cbas_servers[0]

        items_in_cbas_bucket, _ = self.cbas_util.get_num_items_in_cbas_dataset(
            self.cbas_dataset_name)
        self.log.info("Items before service restart: %s" %
                      items_in_cbas_bucket)

        self.log.info("Gracefully stopping service on node %s" % node_in_test)
        NodeHelper.stop_couchbase(node_in_test)
        NodeHelper.start_couchbase(node_in_test)
        NodeHelper.wait_service_started(node_in_test)
        #         self.sleep(10, "wait for service to come up.")
        #
        #         items_in_cbas_bucket, _ = self.cbas_util.get_num_items_in_cbas_dataset(self.cbas_dataset_name)
        #         self.log.info("After graceful STOPPING/STARTING service docs in CBAS bucket : %s"%items_in_cbas_bucket)
        #
        #         start_time = time.time()
        #         while items_in_cbas_bucket <=0 and time.time()<start_time+60:
        #             items_in_cbas_bucket, _ = self.cbas_util.get_num_items_in_cbas_dataset(self.cbas_dataset_name)
        #             self.sleep(1)
        items_in_cbas_bucket = 0
        start_time = time.time()
        while (items_in_cbas_bucket == 0 or items_in_cbas_bucket
               == -1) and time.time() < start_time + 60:
            try:
                items_in_cbas_bucket, _ = self.cbas_util.get_num_items_in_cbas_dataset(
                    self.cbas_dataset_name)
            except:
                pass

        if items_in_cbas_bucket < self.num_items * 3 and items_in_cbas_bucket > self.num_items:
            self.log.info("Data Ingestion Interrupted successfully")
        elif items_in_cbas_bucket < self.num_items:
            self.log.info(
                "Data Ingestion did not interrupted but restarting from 0.")
        else:
            self.log.info(
                "Data Ingestion did not interrupted but complete before service restart."
            )

        run_count = 0
        fail_count = 0
        success_count = 0
        aborted_count = 0
        shell = RemoteMachineShellConnection(node_in_test)
        for handle in handles:
            status, hand = self.cbas_util.retrieve_request_status_using_handle(
                node_in_test, handle, shell)
            if status == "running":
                run_count += 1
                self.log.info("query with handle %s is running." % handle)
            elif status == "failed":
                fail_count += 1
                self.log.info("query with handle %s is failed." % handle)
            elif status == "success":
                success_count += 1
                self.log.info("query with handle %s is successful." % handle)
            else:
                aborted_count += 1
                self.log.info("Queued job is deleted: %s" % status)

        self.log.info("After service restart %s queued jobs are Running." %
                      run_count)
        self.log.info("After service restart %s queued jobs are Failed." %
                      fail_count)
        self.log.info("After service restart %s queued jobs are Successful." %
                      success_count)
        self.log.info("After service restart %s queued jobs are Aborted." %
                      aborted_count)

        if self.cbas_node_type == "NC":
            self.assertTrue(fail_count + aborted_count == 0,
                            "Some queries failed/aborted")

        query = "select count(*) from {0};".format(self.cbas_dataset_name)
        self.cbas_util._run_concurrent_queries(query, "immediate", 100)

        if not self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, self.num_items * 3):
            self.fail(
                "No. of items in CBAS dataset do not match that in the CB bucket"
            )