def test_primary_cbas_shutdown(self):
        '''
        Description: This test will add the second cbas node then start rebalance and cancel rebalance
        before rebalance completes.
        
        Steps:
        1. Add first cbas node.
        2. Start rebalance, wait for rebalance complete.
        3. Create bucket, datasets, connect bucket. Data ingestion should start.
        4. Add another cbas node, rebalance.
        5. Stop Couchbase service for Node1 added in step 1. Failover the node and rebalance.
        6. Second cbas node added in step 4 should be able to serve queries.
        
        Author: Ritesh Agarwal
        '''
        self.load_sample_buckets(bucketName=self.cb_bucket_name,
                                 total_items=self.travel_sample_docs_count)
        otpNode = self.add_node(self.cbas_servers[0], services=["cbas"])
        self.setup_cbas_bucket_dataset_connect(self.cb_bucket_name,
                                               self.travel_sample_docs_count)
        self.add_node(self.cbas_servers[1], services=["cbas"])
        NodeHelper.stop_couchbase(self.cbas_servers[0])
        self.rest.fail_over(otpNode=otpNode.id)
        self.rebalance()

        query = "select count(*) from {0};".format(self.cbas_dataset_name)
        self.cbas_util._run_concurrent_queries(
            query,
            "immediate",
            100,
            rest=RestConnection(self.cbas_servers[1]),
            batch_size=self.concurrent_batch_size)
        NodeHelper.start_couchbase(self.cbas_servers[0])
        NodeHelper.wait_service_started(self.cbas_servers[0])
    def test_restart_cb(self):
        '''
        Description: This test will restart CB and verify that CBAS is also up and running with CB.
        
        Steps:
        1. Add first cbas node.
        2. Start rebalance, wait for rebalance complete.
        3. Stop Couchbase service, Start Couchbase Service. Wait for service to get started.
        4. Verify that CBAS service is also up Create bucket, datasets, connect bucket. Data ingestion should start.
        
        Author: Ritesh Agarwal
        '''
        self.load_sample_buckets(bucketName=self.cb_bucket_name,
                                 total_items=self.travel_sample_docs_count)
        self.add_node(self.cbas_servers[0], services=["cbas"])

        NodeHelper.stop_couchbase(self.cbas_servers[0])
        NodeHelper.start_couchbase(self.cbas_servers[0])

        self.log.info("Wait for cluster to be active")
        self.assertTrue(self.cbas_util.wait_for_cbas_to_recover(),
                        msg="Analytics service unavailable")

        self.setup_cbas_bucket_dataset_connect(self.cb_bucket_name,
                                               self.travel_sample_docs_count)
        self.assertTrue(
            self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, self.travel_sample_docs_count),
            "Data loss in CBAS.")
Beispiel #3
0
    def test_stop_start_service_ingest_data(self):
        self.setup_for_test()
        self.cbas_node_type = self.input.param('cbas_node_type', None)

        query = "select sleep(count(*),50000) from {0};".format(
            self.cbas_dataset_name)
        handles = self.cbas_util._run_concurrent_queries(query, "async", 10)
        self.ingestion_in_progress()

        if self.cbas_node_type == "CC":
            node_in_test = self.cbas_node
            self.cbas_util.closeConn()
            self.cbas_util = cbas_utils(self.master, self.cbas_servers[0])
            self.cbas_util.createConn("default")
        else:
            node_in_test = self.cbas_servers[0]

        items_in_cbas_bucket, _ = self.cbas_util.get_num_items_in_cbas_dataset(
            self.cbas_dataset_name)
        self.log.info("Items before service restart: %s" %
                      items_in_cbas_bucket)

        self.log.info("Gracefully stopping service on node %s" % node_in_test)
        NodeHelper.stop_couchbase(node_in_test)
        NodeHelper.start_couchbase(node_in_test)
        NodeHelper.wait_service_started(node_in_test)
        #         self.sleep(10, "wait for service to come up.")
        #
        #         items_in_cbas_bucket, _ = self.cbas_util.get_num_items_in_cbas_dataset(self.cbas_dataset_name)
        #         self.log.info("After graceful STOPPING/STARTING service docs in CBAS bucket : %s"%items_in_cbas_bucket)
        #
        #         start_time = time.time()
        #         while items_in_cbas_bucket <=0 and time.time()<start_time+60:
        #             items_in_cbas_bucket, _ = self.cbas_util.get_num_items_in_cbas_dataset(self.cbas_dataset_name)
        #             self.sleep(1)
        items_in_cbas_bucket = 0
        start_time = time.time()
        while (items_in_cbas_bucket == 0 or items_in_cbas_bucket
               == -1) and time.time() < start_time + 60:
            try:
                items_in_cbas_bucket, _ = self.cbas_util.get_num_items_in_cbas_dataset(
                    self.cbas_dataset_name)
            except:
                pass

        if items_in_cbas_bucket < self.num_items * 3 and items_in_cbas_bucket > self.num_items:
            self.log.info("Data Ingestion Interrupted successfully")
        elif items_in_cbas_bucket < self.num_items:
            self.log.info(
                "Data Ingestion did not interrupted but restarting from 0.")
        else:
            self.log.info(
                "Data Ingestion did not interrupted but complete before service restart."
            )

        run_count = 0
        fail_count = 0
        success_count = 0
        aborted_count = 0
        shell = RemoteMachineShellConnection(node_in_test)
        for handle in handles:
            status, hand = self.cbas_util.retrieve_request_status_using_handle(
                node_in_test, handle, shell)
            if status == "running":
                run_count += 1
                self.log.info("query with handle %s is running." % handle)
            elif status == "failed":
                fail_count += 1
                self.log.info("query with handle %s is failed." % handle)
            elif status == "success":
                success_count += 1
                self.log.info("query with handle %s is successful." % handle)
            else:
                aborted_count += 1
                self.log.info("Queued job is deleted: %s" % status)

        self.log.info("After service restart %s queued jobs are Running." %
                      run_count)
        self.log.info("After service restart %s queued jobs are Failed." %
                      fail_count)
        self.log.info("After service restart %s queued jobs are Successful." %
                      success_count)
        self.log.info("After service restart %s queued jobs are Aborted." %
                      aborted_count)

        if self.cbas_node_type == "NC":
            self.assertTrue(fail_count + aborted_count == 0,
                            "Some queries failed/aborted")

        query = "select count(*) from {0};".format(self.cbas_dataset_name)
        self.cbas_util._run_concurrent_queries(query, "immediate", 100)

        count_n1ql = self.rest.query_tool(
            'select count(*) from `%s`' %
            (self.cb_bucket_name))['results'][0]['$1']
        if not self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, count_n1ql):
            self.fail(
                "No. of items in CBAS dataset do not match that in the CB bucket"
            )