Пример #1
0
    def setUp(self):
        self.input = TestInputSingleton.input
        self.input.test_params.update({"default_bucket": False})
        self.rebalanceServers = None
        self.nodeType = "KV"
        self.wait_for_rebalance = True
        super(CBASClusterOperations, self).setUp()
        self.num_items = self.input.param("items", 1000)
        self.bucket_util.create_default_bucket()
        #         self.cbas_util.createConn("default")
        if 'nodeType' in self.input.test_params:
            self.nodeType = self.input.test_params['nodeType']

        self.rebalance_both = self.input.param("rebalance_cbas_and_kv", False)
        if not self.rebalance_both:
            if self.nodeType == "KV":
                self.rebalanceServers = self.kv_servers
                self.wait_for_rebalance = False
            elif self.nodeType == "CBAS":
                self.rebalanceServers = [self.cbas_node] + self.cbas_servers
        else:
            self.rebalanceServers = self.kv_servers + [self.cbas_node
                                                       ] + self.cbas_servers
            self.nodeType = "KV" + "-" + "CBAS"

        self.assertTrue(
            len(self.rebalanceServers) > 1,
            "Not enough %s servers to run tests." % self.rebalanceServers)
        self.log.info("This test will be running in %s context." %
                      self.nodeType)
        self.task_manager = TaskManager()
        self.load_gen_tasks = []
Пример #2
0
class RebalanceinJython(BaseTestCase):
    def setUp(self):
        super(RebalanceinJython, self).setUp()
        self.task_manager = TaskManager()
        self.load_gen_tasks = []

    def tearDown(self):
        self.task_manager.shutdown_task_manager()
        super(RebalanceinJython, self).tearDown()

    def start_load_gen(self, docs, bucket):
        cluster = CouchbaseCluster.create(self.servers[0].ip)
        cluster.authenticate("Administrator", "password")
        bucket = cluster.openBucket(bucket)
        k = "rebalance"
        v = {"value": "asd"}
        docloaders = []
        num_executors = 5
        total_num_executors = 5
        num_docs = docs / total_num_executors
        load_gen_task_name = "Loadgen"
        for i in xrange(total_num_executors):
            task_name = "{0}_{1}".format(load_gen_task_name, i)
            self.load_gen_tasks.append(
                DocloaderTask(bucket, num_docs, i * num_docs, k, v, task_name))
        for task in self.load_gen_tasks:
            self.task_manager.add_new_task(task)

    def finish_load_gen(self):
        for task in self.load_gen_tasks:
            print self.task_manager.get_task_result(task)

    def start_rebalance_nodes(self):
        servs_in = [
            self.servers[i + self.nodes_init] for i in range(self.nodes_in)
        ]
        self.rebalance_task = rebalanceTask(self.servers[:self.nodes_init],
                                            to_add=servs_in,
                                            to_remove=[])
        self.task_manager.add_new_task(self.rebalance_task)

    def get_rebalance_result(self):
        return self.task_manager.get_task_result(self.rebalance_task)

    def test_rebalance_in(self):
        self.start_load_gen(100000, self.buckets[0].name)
        self.sleep(5)
        #test_Task = TestTask(30)
        #self.task_manager.add_new_task(test_Task)
        self.start_rebalance_nodes()
        print self.get_rebalance_result()
        #print self.task_manager.get_task_result(test_Task)
        self.finish_load_gen()
Пример #3
0
    def __init__(self,
                 configuration=None,
                 header_name=None,
                 header_value=None,
                 cookie=None):
        if configuration is None:
            configuration = Configuration()
        self.configuration = configuration

        self.pool = TaskManager()
        self.rest_client = rest.RESTClientObject(configuration)
        self.default_headers = {}
        if header_name is not None:
            self.default_headers[header_name] = header_value
        self.cookie = cookie
        # Set default User-Agent.
        self.user_agent = 'Swagger-Codegen/1.0.0/python'
Пример #4
0
class CBASClusterOperations(CBASBaseTest):
    def setUp(self):
        self.input = TestInputSingleton.input
        self.input.test_params.update({"default_bucket": False})
        self.rebalanceServers = None
        self.nodeType = "KV"
        self.wait_for_rebalance = True
        super(CBASClusterOperations, self).setUp()
        self.num_items = self.input.param("items", 1000)
        self.bucket_util.create_default_bucket()
        #         self.cbas_util.createConn("default")
        if 'nodeType' in self.input.test_params:
            self.nodeType = self.input.test_params['nodeType']

        self.rebalance_both = self.input.param("rebalance_cbas_and_kv", False)
        if not self.rebalance_both:
            if self.nodeType == "KV":
                self.rebalanceServers = self.kv_servers
                self.wait_for_rebalance = False
            elif self.nodeType == "CBAS":
                self.rebalanceServers = [self.cbas_node] + self.cbas_servers
        else:
            self.rebalanceServers = self.kv_servers + [self.cbas_node
                                                       ] + self.cbas_servers
            self.nodeType = "KV" + "-" + "CBAS"

        self.assertTrue(
            len(self.rebalanceServers) > 1,
            "Not enough %s servers to run tests." % self.rebalanceServers)
        self.log.info("This test will be running in %s context." %
                      self.nodeType)
        self.task_manager = TaskManager()
        self.load_gen_tasks = []

    def setup_for_test(self, skip_data_loading=False):
        if not skip_data_loading:
            # Load Couchbase bucket first.
            self.perform_doc_ops_in_all_cb_buckets(self.num_items, "create", 0,
                                                   self.num_items)
        self.cbas_util.createConn(self.cb_bucket_name)
        # Create bucket on CBAS
        self.assertTrue(
            self.cbas_util.create_bucket_on_cbas(
                cbas_bucket_name=self.cbas_bucket_name,
                cb_bucket_name=self.cb_bucket_name,
                cb_server_ip=self.cb_server_ip),
            "bucket creation failed on cbas")

        # Create dataset on the CBAS bucket
        self.cbas_util.create_dataset_on_bucket(
            cbas_bucket_name=self.cb_bucket_name,
            cbas_dataset_name=self.cbas_dataset_name)

        # Create indexes on the CBAS bucket
        self.create_secondary_indexes = self.input.param(
            "create_secondary_indexes", False)
        if self.create_secondary_indexes:
            self.index_fields = "profession:string,number:bigint"
            create_idx_statement = "create index {0} on {1}({2});".format(
                self.index_name, self.cbas_dataset_name, self.index_fields)
            status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
                create_idx_statement)

            self.assertTrue(status == "success", "Create Index query failed")

            self.assertTrue(
                self.cbas_util.verify_index_created(
                    self.index_name, self.index_fields.split(","),
                    self.cbas_dataset_name)[0])

        # Connect to Bucket
        self.cbas_util.connect_to_bucket(
            cbas_bucket_name=self.cbas_bucket_name,
            cb_bucket_password=self.cb_bucket_password)

        if not skip_data_loading:
            # Validate no. of items in CBAS dataset
            if not self.cbas_util.validate_cbas_dataset_items_count(
                    self.cbas_dataset_name, self.num_items):
                self.fail(
                    "No. of items in CBAS dataset do not match that in the CB bucket"
                )

    def test_rebalance_in(self):
        '''
        Description: This will test the rebalance in feature i.e. one node coming in to the cluster.
        Then Rebalance. Verify that is has no effect on the data ingested to cbas.
        
        Steps:
        1. Setup cbas. bucket, datasets/shadows, connect.
        2. Add a node and rebalance. Don't wait for rebalance completion.
        3. During rebalance, do mutations and execute queries on cbas.
        
        Author: Ritesh Agarwal/Mihir Kamdar
        Date Created: 18/07/2017
        '''
        query = "select count(*) from {0};".format(self.cbas_dataset_name)

        self.setup_for_test()
        self.add_node(node=self.rebalanceServers[1],
                      rebalance=True,
                      wait_for_rebalance_completion=self.wait_for_rebalance)
        self.log.info("Rebalance state:%s" %
                      self.rest._rebalance_progress_status())

        self.perform_doc_ops_in_all_cb_buckets(self.num_items, "create",
                                               self.num_items,
                                               self.num_items * 2)

        self.log.info("Rebalance state:%s" %
                      self.rest._rebalance_progress_status())
        self.cbas_util._run_concurrent_queries(
            query, None, 2000, batch_size=self.concurrent_batch_size)

        self.log.info("Rebalance state:%s" %
                      self.rest._rebalance_progress_status())

        if not self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, self.num_items * 2, 0):
            self.fail(
                "No. of items in CBAS dataset do not match that in the CB bucket"
            )

    def test_rebalance_out(self):
        '''
        Description: This will test the rebalance out feature i.e. one node going out of cluster.
        Then Rebalance.
        
        Steps:
        1. Add a node, Rebalance.
        2. Setup cbas. bucket, datasets/shadows, connect.
        3. Remove a node and rebalance. Don't wait for rebalance completion.
        4. During rebalance, do mutations and execute queries on cbas.
        
        Author: Ritesh Agarwal/Mihir Kamdar
        Date Created: 18/07/2017
        '''
        self.add_node(node=self.rebalanceServers[1])
        query = "select count(*) from {0};".format(self.cbas_dataset_name)
        self.setup_for_test()
        otpnodes = []
        nodes = self.rest.node_statuses()
        for node in nodes:
            if node.ip == self.rebalanceServers[1].ip:
                otpnodes.append(node)
        self.remove_node(otpnodes, wait_for_rebalance=self.wait_for_rebalance)
        self.log.info("Rebalance state:%s" %
                      self.rest._rebalance_progress_status())

        self.perform_doc_ops_in_all_cb_buckets(self.num_items, "create",
                                               self.num_items,
                                               self.num_items * 2)

        self.cbas_util._run_concurrent_queries(
            query, "immediate", 2000, batch_size=self.concurrent_batch_size)

        if not self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, self.num_items * 2, 0):
            self.fail(
                "No. of items in CBAS dataset do not match that in the CB bucket"
            )

    def test_swap_rebalance(self):
        '''
        Description: This will test the swap rebalance feature i.e. one node going out and one node coming in cluster.
        Then Rebalance. Verify that is has no effect on the data ingested to cbas.
        
        Steps:
        1. Setup cbas. bucket, datasets/shadows, connect.
        2. Add a node that is to be swapped against the leaving node. Do not rebalance.
        3. Remove a node and rebalance.
        4. During rebalance, do mutations and execute queries on cbas.
        
        Author: Ritesh Agarwal/Mihir Kamdar
        Date Created: 20/07/2017
        '''
        query = "select count(*) from {0};".format(self.cbas_dataset_name)
        self.setup_for_test()

        otpnodes = []
        nodes = self.rest.node_statuses()
        if self.nodeType == "KV":
            service = ["kv"]
        else:
            service = ["cbas"]
        otpnodes.append(self.add_node(node=self.servers[1], services=service))
        self.add_node(node=self.servers[3], services=service, rebalance=False)
        self.remove_node(otpnodes, wait_for_rebalance=self.wait_for_rebalance)

        self.perform_doc_ops_in_all_cb_buckets(self.num_items, "create",
                                               self.num_items,
                                               self.num_items * 2)

        self.cbas_util._run_concurrent_queries(
            query, "immediate", 2000, batch_size=self.concurrent_batch_size)

        if not self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, self.num_items * 2, 0):
            self.fail(
                "No. of items in CBAS dataset do not match that in the CB bucket"
            )

    def test_failover(self):
        '''
        Description: This will test the node failover both graceful and hard failover based on
        graceful_failover param in testcase conf file.
        
        Steps:
        1. Add node to the cluster which will be failed over.
        2. Create docs, setup cbas.
        3. Mark the node for fail over.
        4. Do rebalance asynchronously. During rebalance perform mutations.
        5. Run some CBAS queries.
        6. Check for correct number of items in CBAS datasets.
        
        Author: Ritesh Agarwal/Mihir Kamdar
        Date Created: 20/07/2017
        '''

        #Add node which will be failed over later.
        self.add_node(node=self.rebalanceServers[1])
        query = "select count(*) from {0};".format(self.cbas_dataset_name)

        graceful_failover = self.input.param("graceful_failover", False)
        self.setup_for_test()
        failover_task = self._cb_cluster.async_failover(
            self.input.servers, [self.rebalanceServers[1]], graceful_failover)
        failover_task.get_result()

        self.rebalance()
        self.perform_doc_ops_in_all_cb_buckets(self.num_items, "create",
                                               self.num_items,
                                               self.num_items * 3 / 2)

        self.cbas_util._run_concurrent_queries(
            query, "immediate", 2000, batch_size=self.concurrent_batch_size)

        if not self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, self.num_items * 3 / 2, 0):
            self.fail(
                "No. of items in CBAS dataset do not match that in the CB bucket"
            )

    '''
    -i b/resources/4-nodes-template.ini -t cbas.cbas_cluster_operations.CBASClusterOperations.test_rebalance_in_cb_cbas_together,cb_bucket_name=default,cbas_bucket_name=default_bucket,cbas_dataset_name=default_ds,items=10,nodeType=KV,rebalance_cbas_and_kv=True,wait_for_rebalace=False
    '''

    def test_rebalance_in_cb_cbas_together(self):

        self.log.info("Creates cbas buckets and dataset")
        dataset_count_query = "select count(*) from {0};".format(
            self.cbas_dataset_name)
        self.setup_for_test()

        self.log.info("Rebalance in KV node")
        wait_for_rebalace_complete = self.input.param("wait_for_rebalace",
                                                      False)
        self.add_node(node=self.rebalanceServers[1],
                      rebalance=False,
                      wait_for_rebalance_completion=wait_for_rebalace_complete)

        self.log.info("Rebalance in CBAS node")
        self.add_node(node=self.rebalanceServers[3],
                      rebalance=True,
                      wait_for_rebalance_completion=wait_for_rebalace_complete)

        self.log.info(
            "Perform document create as rebalance is in progress : Rebalance state:%s"
            % self.rest._rebalance_progress_status())
        self.perform_doc_ops_in_all_cb_buckets(self.num_items, "create",
                                               self.num_items,
                                               self.num_items * 2)

        self.log.info(
            "Run queries as rebalance is in progress : Rebalance state:%s" %
            self.rest._rebalance_progress_status())
        handles = self.cbas_util._run_concurrent_queries(
            dataset_count_query,
            None,
            2000,
            batch_size=self.concurrent_batch_size)

        self.log.info("Log concurrent query status")
        self.cbas_util.log_concurrent_query_outcome(self.master, handles)

        if not self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, self.num_items * 2, 0):
            self.fail(
                "No. of items in CBAS dataset do not match that in the CB bucket"
            )

    '''
    -i b/resources/4-nodes-template.ini -t cbas.cbas_cluster_operations.CBASClusterOperations.test_rebalance_out_cb_cbas_together,cb_bucket_name=default,cbas_bucket_name=default_bucket,cbas_dataset_name=default_ds,items=10,nodeType=KV,rebalance_cbas_and_kv=True,wait_for_rebalace=False
    '''

    def test_rebalance_out_cb_cbas_together(self):

        self.log.info(
            "Rebalance in KV node and  wait for rebalance to complete")
        self.add_node(node=self.rebalanceServers[1])

        self.log.info(
            "Rebalance in CBAS node and  wait for rebalance to complete")
        self.add_node(node=self.rebalanceServers[3])

        self.log.info("Creates cbas buckets and dataset")
        dataset_count_query = "select count(*) from {0};".format(
            self.cbas_dataset_name)
        self.setup_for_test()

        self.log.info("Fetch and remove nodes to rebalance out")
        wait_for_rebalace_complete = self.input.param("wait_for_rebalace",
                                                      False)
        otpnodes = []
        nodes = self.rest.node_statuses()
        for node in nodes:
            if node.ip == self.rebalanceServers[
                    1].ip or node.ip == self.rebalanceServers[3].ip:
                otpnodes.append(node)

        for every_node in otpnodes:
            self.remove_node(otpnodes,
                             wait_for_rebalance=wait_for_rebalace_complete)

        self.sleep(30,
                   message="Sleep for 30 seconds for remove node to complete")

        self.log.info(
            "Perform document create as rebalance is in progress : Rebalance state:%s"
            % self.rest._rebalance_progress_status())
        self.perform_doc_ops_in_all_cb_buckets(self.num_items, "create",
                                               self.num_items,
                                               self.num_items * 2)

        self.log.info(
            "Run queries as rebalance is in progress : Rebalance state:%s" %
            self.rest._rebalance_progress_status())
        handles = self.cbas_util._run_concurrent_queries(
            dataset_count_query,
            "immediate",
            2000,
            batch_size=self.concurrent_batch_size)

        self.log.info("Log concurrent query status")
        self.cbas_util.log_concurrent_query_outcome(self.master, handles)

        if not self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, self.num_items * 2, 0):
            self.fail(
                "No. of items in CBAS dataset do not match that in the CB bucket"
            )

    '''
    -i b/resources/4-nodes-template.ini -t cbas.cbas_cluster_operations.CBASClusterOperations.test_swap_rebalance_cb_cbas_together,cb_bucket_name=default,cbas_bucket_name=default_bucket,cbas_dataset_name=default_ds,items=10,rebalance_cbas_and_kv=True,wait_for_rebalance=True
    '''

    def test_swap_rebalance_cb_cbas_together(self):

        self.log.info("Creates cbas buckets and dataset")
        wait_for_rebalance = self.input.param("wait_for_rebalance", True)
        dataset_count_query = "select count(*) from {0};".format(
            self.cbas_dataset_name)
        self.setup_for_test()

        self.log.info("Add KV node and don't rebalance")
        self.add_node(node=self.rebalanceServers[1], rebalance=False)

        self.log.info("Add cbas node and don't rebalance")
        self.add_node(node=self.rebalanceServers[3], rebalance=False)

        otpnodes = []
        nodes = self.rest.node_statuses()
        for node in nodes:
            if node.ip == self.rebalanceServers[
                    0].ip or node.ip == self.rebalanceServers[2].ip:
                otpnodes.append(node)

        self.log.info("Remove master node")
        self.remove_node(otpnode=otpnodes,
                         wait_for_rebalance=wait_for_rebalance)

        self.log.info("Create instances pointing to new master nodes")
        c_utils = cbas_utils(self.rebalanceServers[1],
                             self.rebalanceServers[3])
        c_utils.createConn(self.cb_bucket_name)

        self.log.info("Create reference to SDK client")
        client = SDKClient(scheme="couchbase",
                           hosts=[self.rebalanceServers[1].ip],
                           bucket=self.cb_bucket_name,
                           password=self.rebalanceServers[1].rest_password)

        self.log.info("Add more document to default bucket")
        documents = ['{"name":"value"}'] * (self.num_items // 10)
        document_id_prefix = "custom-id-"
        client.insert_custom_json_documents(document_id_prefix, documents)

        self.log.info(
            "Run queries as rebalance is in progress : Rebalance state:%s" %
            self.rest._rebalance_progress_status())
        handles = c_utils._run_concurrent_queries(
            dataset_count_query,
            "immediate",
            2000,
            batch_size=self.concurrent_batch_size)

        self.log.info("Log concurrent query status")
        self.cbas_util.log_concurrent_query_outcome(self.master, handles)

        if not c_utils.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, self.num_items +
            (self.num_items // 10), 0):
            self.fail(
                "No. of items in CBAS dataset do not match that in the CB bucket"
            )

    def test_rebalance_in_multiple_cbas_on_a_busy_system(self):
        node_services = []
        node_services.append(self.input.param('service', "cbas"))
        self.log.info("Setup CBAS")
        self.setup_for_test(skip_data_loading=True)

        self.log.info("Run KV ops in async while rebalance is in progress")
        json_generator = JsonGenerator()
        generators = json_generator.generate_docs_simple(
            docs_per_day=self.num_items, start=0)
        tasks = self.bucket_util._async_load_all_buckets(
            self.cluster.master, generators, "create", 0)

        self.log.info("Run concurrent queries to simulate busy system")
        statement = "select sleep(count(*),50000) from {0} where mutated=0;".format(
            self.cbas_dataset_name)
        handles = self.cbas_util._run_concurrent_queries(
            statement, self.mode, self.num_concurrent_queries)

        self.log.info("Rebalance in CBAS nodes")
        self.cluster_util.add_node(node=self.rebalanceServers[1],
                                   services=node_services,
                                   rebalance=False,
                                   wait_for_rebalance_completion=False)
        self.cluster_util.add_node(node=self.rebalanceServers[3],
                                   services=node_services,
                                   rebalance=True,
                                   wait_for_rebalance_completion=True)

        self.log.info("Get KV ops result")
        for task in tasks:
            task.get_result()

        self.log.info("Log concurrent query status")
        self.cbas_util.log_concurrent_query_outcome(self.cluster.master,
                                                    handles)

        if not self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, self.num_items, 0):
            self.fail(
                "No. of items in CBAS dataset do not match that in the CB bucket"
            )

    def _start_load_gen(self, docs, bucket, num_executors):
        cluster = CouchbaseCluster.create(self.servers[0].ip)
        cluster.authenticate("Administrator", "password")
        bucket = cluster.openBucket(bucket)
        k = "rebalance"
        v = {"value": "asd"}
        docloaders = []
        total_num_executors = num_executors
        num_docs = docs / total_num_executors
        load_gen_task_name = "Loadgen"
        for i in xrange(total_num_executors):
            task_name = "{0}_{1}".format(load_gen_task_name, i)
            self.load_gen_tasks.append(
                DocloaderTask(bucket, num_docs, i * num_docs, k, v, task_name))
        for task in self.load_gen_tasks:
            self.task_manager.add_new_task(task)

    def _finish_load_gen(self):
        for task in self.load_gen_tasks:
            self.log.info(self.task_manager.get_task_result(task))

    def test_rebalance_in_multiple_cbas_on_a_busy_system_jython(self):
        services = []
        services.append(self.input.param('service', "cbas"))
        services.append(self.input.param('service', 'cbas'))
        self.log.info("Setup CBAS")
        self.setup_for_test(skip_data_loading=True)
        self.log.info("Run KV ops in background")
        num_executors = 9 // self.buckets.__len__()
        for bucket in self.buckets:
            self._start_load_gen(self.num_items, bucket.name, num_executors)

        self.log.info("Run concurrent queries to simulate busy system")
        statement = "select sleep(count(*),50000) from {0} where mutated=0;".format(
            self.cbas_dataset_name)
        handles = self.cbas_util._run_concurrent_queries(
            statement, self.mode, self.num_concurrent_queries)

        self.log.info("Rebalance in CBAS nodes")
        nodes_in = [self.rebalanceServers[1], self.rebalanceServers[3]]
        rebalance_task = rebalanceTask(self.servers[:2],
                                       to_add=nodes_in,
                                       to_remove=[],
                                       services=services)
        self.task_manager.add_new_task(rebalance_task)

        rebalance_result = self.task_manager.get_task_result(rebalance_task)
        self.log.info(rebalance_result)

        self.log.info("Get KV ops result")
        self._finish_load_gen()

        self.log.info("Log concurrent query status")
        self.cbas_util.log_concurrent_query_outcome(self.master, handles)

        if not self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, self.num_items, 0):
            self.fail(
                "No. of items in CBAS dataset do not match that in the CB bucket"
            )

    def test_rebalance_out_multiple_cbas_on_a_busy_system(self):
        node_services = []
        node_services.append(self.input.param('service', "cbas"))
        self.log.info("Rebalance in CBAS nodes")
        self.add_node(node=self.rebalanceServers[1], services=node_services)
        self.add_node(node=self.rebalanceServers[3], services=node_services)

        self.log.info("Setup CBAS")
        self.setup_for_test(skip_data_loading=True)

        self.log.info("Run KV ops in async while rebalance is in progress")
        json_generator = JsonGenerator()
        generators = json_generator.generate_docs_simple(
            docs_per_day=self.num_items, start=0)
        tasks = self._async_load_all_buckets(self.master, generators, "create",
                                             0)

        self.log.info("Run concurrent queries to simulate busy system")
        statement = "select sleep(count(*),50000) from {0} where mutated=0;".format(
            self.cbas_dataset_name)
        handles = self.cbas_util._run_concurrent_queries(
            statement, self.mode, self.num_concurrent_queries)

        self.log.info("Fetch and remove nodes to rebalance out")
        self.rebalance_cc = self.input.param("rebalance_cc", False)
        out_nodes = []
        nodes = self.rest.node_statuses()

        if self.rebalance_cc:
            for node in nodes:
                if node.ip == self.cbas_node.ip or node.ip == self.servers[
                        1].ip:
                    out_nodes.append(node)
            self.cbas_util.closeConn()
            self.log.info(
                "Reinitialize CBAS utils with ip %s, since CC node is rebalanced out"
                % self.servers[3].ip)
            self.cbas_util = cbas_utils(self.master, self.servers[3])
            self.cbas_util.createConn("default")
        else:
            for node in nodes:
                if node.ip == self.servers[3].ip or node.ip == self.servers[
                        1].ip:
                    out_nodes.append(node)

        self.log.info("Rebalance out CBAS nodes %s %s" %
                      (out_nodes[0].ip, out_nodes[1].ip))
        self.remove_all_nodes_then_rebalance([out_nodes[0], out_nodes[1]])

        self.log.info("Get KV ops result")
        for task in tasks:
            task.get_result()

        self.log.info("Log concurrent query status")
        self.cbas_util.log_concurrent_query_outcome(self.master, handles)

        if not self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, self.num_items, 0):
            self.fail(
                "No. of items in CBAS dataset do not match that in the CB bucket"
            )

    '''
    cbas.cbas_cluster_operations.CBASClusterOperations.test_rebalance_swap_multiple_cbas_on_a_busy_system,cb_bucket_name=default,cbas_bucket_name=default_bucket,cbas_dataset_name=default_ds,items=10,rebalance_cbas_and_kv=True,service=cbas,rebalance_cc=False
    cbas.cbas_cluster_operations.CBASClusterOperations.test_rebalance_swap_multiple_cbas_on_a_busy_system,cb_bucket_name=default,cbas_bucket_name=default_bucket,cbas_dataset_name=default_ds,items=10,rebalance_cbas_and_kv=True,service=cbas,rebalance_cc=True
    '''

    def test_rebalance_swap_multiple_cbas_on_a_busy_system(self):
        '''
        1. We have 4 node cluster with 1 KV and 3 CBAS. Assume the IPS end with 101(KV), 102(CBAS), 103(CBAS), 104(CBAS)
        2, Post initial setup - 101 running KV and 102 running CBAS as CC node
        3. As part of test test add an extra NC node that we will swap rebalance later - Adding 103 and rebalance
        4. If swap rebalance NC - then select the node added in #3 for remove and 104 to add during swap 
        5. If swap rebalance CC - then select the CC node added for remove and 104 to add during swap
        '''

        self.log.info('Read service input param')
        node_services = []
        node_services.append(self.input.param('service', "cbas"))

        self.log.info(
            "Rebalance in CBAS nodes, this node will be removed during swap")
        self.add_node(node=self.rebalanceServers[1], services=node_services)

        self.log.info("Setup CBAS")
        self.setup_for_test(skip_data_loading=True)

        self.log.info("Run KV ops in async while rebalance is in progress")
        json_generator = JsonGenerator()
        generators = json_generator.generate_docs_simple(
            docs_per_day=self.num_items, start=0)
        tasks = self._async_load_all_buckets(self.master, generators, "create",
                                             0)

        self.log.info("Run concurrent queries to simulate busy system")
        statement = "select sleep(count(*),50000) from {0} where mutated=0;".format(
            self.cbas_dataset_name)
        handles = self.cbas_util._run_concurrent_queries(
            statement, self.mode, self.num_concurrent_queries)

        self.log.info("Fetch node to remove during rebalance")
        self.rebalance_cc = self.input.param("rebalance_cc", False)
        out_nodes = []
        nodes = self.rest.node_statuses()
        reinitialize_cbas_util = False
        for node in nodes:
            if self.rebalance_cc and (node.ip == self.cbas_node.ip):
                out_nodes.append(node)
                reinitialize_cbas_util = True
            elif not self.rebalance_cc and node.ip == self.rebalanceServers[
                    1].ip:
                out_nodes.append(node)

        self.log.info("Swap rebalance CBAS nodes")
        self.add_node(node=self.rebalanceServers[3],
                      services=node_services,
                      rebalance=False)
        self.remove_node([out_nodes[0]], wait_for_rebalance=True)

        self.log.info("Get KV ops result")
        for task in tasks:
            task.get_result()

        if reinitialize_cbas_util is True:
            self.cbas_util = cbas_utils(self.master, self.rebalanceServers[3])
            self.cbas_util.createConn("default")

        self.log.info("Log concurrent query status")
        self.cbas_util.log_concurrent_query_outcome(self.master, handles)

        if not self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, self.num_items, 0):
            self.fail(
                "No. of items in CBAS dataset do not match that in the CB bucket"
            )

    '''
    test_fail_over_node_followed_by_rebalance_out_or_add_back,cb_bucket_name=default,graceful_failover=True,cbas_bucket_name=default_cbas,cbas_dataset_name=default_ds,items=10000,nodeType=KV,rebalance_out=True,concurrent_batch_size=500
    test_fail_over_node_followed_by_rebalance_out_or_add_back,cb_bucket_name=default,graceful_failover=True,cbas_bucket_name=default_cbas,cbas_dataset_name=default_ds,items=10000,nodeType=KV,rebalance_out=False,recovery_strategy=full,concurrent_batch_size=500
    test_fail_over_node_followed_by_rebalance_out_or_add_back,cb_bucket_name=default,graceful_failover=True,cbas_bucket_name=default_cbas,cbas_dataset_name=default_ds,items=10000,nodeType=KV,rebalance_out=False,recovery_strategy=delta,concurrent_batch_size=500

    test_fail_over_node_followed_by_rebalance_out_or_add_back,cb_bucket_name=default,graceful_failover=False,cbas_bucket_name=default_cbas,cbas_dataset_name=default_ds,items=10000,nodeType=KV,rebalance_out=True,concurrent_batch_size=500
    test_fail_over_node_followed_by_rebalance_out_or_add_back,cb_bucket_name=default,graceful_failover=False,cbas_bucket_name=default_cbas,cbas_dataset_name=default_ds,items=10000,nodeType=KV,rebalance_out=False,recovery_strategy=full,concurrent_batch_size=500
    test_fail_over_node_followed_by_rebalance_out_or_add_back,cb_bucket_name=default,graceful_failover=False,cbas_bucket_name=default_cbas,cbas_dataset_name=default_ds,items=10000,nodeType=KV,rebalance_out=False,recovery_strategy=delta,concurrent_batch_size=500

    test_fail_over_node_followed_by_rebalance_out_or_add_back,cb_bucket_name=default,graceful_failover=False,cbas_bucket_name=default_cbas,cbas_dataset_name=default_ds,items=10000,nodeType=CBAS,rebalance_out=True,concurrent_batch_size=500
    test_fail_over_node_followed_by_rebalance_out_or_add_back,cb_bucket_name=default,graceful_failover=False,cbas_bucket_name=default_cbas,cbas_dataset_name=default_ds,items=10000,nodeType=CBAS,rebalance_out=False,recovery_strategy=full,concurrent_batch_size=500
    '''

    def test_fail_over_node_followed_by_rebalance_out_or_add_back(self):
        """
        1. Start with an initial setup, having 1 KV and 1 CBAS
        2. Add a node that will be failed over - KV/CBAS
        3. Create CBAS buckets and dataset
        4. Fail over the KV node based in graceful_failover parameter specified
        5. Rebalance out/add back based on input param specified in conf file
        6. Perform doc operations
        7. run concurrent queries
        8. Verify document count on dataset post failover
        """
        self.log.info("Add an extra node to fail-over")
        self.add_node(node=self.rebalanceServers[1])

        self.log.info("Read the failure out type to be performed")
        graceful_failover = self.input.param("graceful_failover", True)

        self.log.info("Set up test - Create cbas buckets and data-sets")
        self.setup_for_test()

        self.log.info("Perform Async doc operations on KV")
        json_generator = JsonGenerator()
        generators = json_generator.generate_docs_simple(
            docs_per_day=self.num_items * 3 / 2, start=self.num_items)
        kv_task = self._async_load_all_buckets(self.master, generators,
                                               "create", 0)

        self.log.info("Run concurrent queries on CBAS")
        query = "select count(*) from {0};".format(self.cbas_dataset_name)
        handles = self.cbas_util._run_concurrent_queries(
            query,
            "async",
            self.num_concurrent_queries,
            batch_size=self.concurrent_batch_size)

        self.log.info("fail-over the node")
        fail_task = self._cb_cluster.async_failover(self.input.servers,
                                                    [self.rebalanceServers[1]],
                                                    graceful_failover)
        fail_task.get_result()

        self.log.info(
            "Read input param to decide on add back or rebalance out")
        self.rebalance_out = self.input.param("rebalance_out", False)
        if self.rebalance_out:
            self.log.info("Rebalance out the fail-over node")
            self.rebalance()
        else:
            self.recovery_strategy = self.input.param("recovery_strategy",
                                                      "full")
            self.log.info("Performing %s recovery" % self.recovery_strategy)
            success = False
            end_time = datetime.datetime.now() + datetime.timedelta(
                minutes=int(1))
            while datetime.datetime.now() < end_time or not success:
                try:
                    self.sleep(10, message="Wait for fail over complete")
                    self.rest.set_recovery_type(
                        'ns_1@' + self.rebalanceServers[1].ip,
                        self.recovery_strategy)
                    success = True
                except Exception:
                    self.log.info(
                        "Fail over in progress. Re-try after 10 seconds.")
                    pass
            if not success:
                self.fail("Recovery %s failed." % self.recovery_strategy)
            self.rest.add_back_node('ns_1@' + self.rebalanceServers[1].ip)
            self.rebalance()

        self.log.info("Get KV ops result")
        for task in kv_task:
            task.get_result()

        self.log.info("Log concurrent query status")
        self.cbas_util.log_concurrent_query_outcome(self.master, handles)

        self.log.info("Validate dataset count on CBAS")
        count_n1ql = self.rest.query_tool(
            'select count(*) from `%s`' %
            self.cb_bucket_name)['results'][0]['$1']
        if not self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name,
                count_n1ql,
                0,
                timeout=400,
                analytics_timeout=400):
            self.fail(
                "No. of items in CBAS dataset do not match that in the CB bucket"
            )

    '''
    test_to_fail_initial_rebalance_and_verify_subsequent_rebalance_succeeds,cb_bucket_name=default,cbas_bucket_name=default_bucket,cbas_dataset_name=default_ds,items=10,nodeType=CBAS,num_queries=10,restart_couchbase_on_incoming_or_outgoing_node=True,rebalance_type=in
    test_to_fail_initial_rebalance_and_verify_subsequent_rebalance_succeeds,cb_bucket_name=default,cbas_bucket_name=default_bucket,cbas_dataset_name=default_ds,items=10,nodeType=CBAS,num_queries=10,restart_couchbase_on_incoming_or_outgoing_node=True,rebalance_type=out
    test_to_fail_initial_rebalance_and_verify_subsequent_rebalance_succeeds,cb_bucket_name=default,cbas_bucket_name=default_bucket,cbas_dataset_name=default_ds,items=10,nodeType=CBAS,num_queries=10,restart_couchbase_on_incoming_or_outgoing_node=True,rebalance_type=swap
    '''

    def test_to_fail_initial_rebalance_and_verify_subsequent_rebalance_succeeds(
            self):

        self.log.info("Pick the incoming and outgoing nodes during rebalance")
        self.rebalance_type = self.input.param("rebalance_type", "in")
        nodes_to_add = [self.rebalanceServers[1]]
        nodes_to_remove = []
        reinitialize_cbas_util = False
        if self.rebalance_type == 'out':
            nodes_to_remove.append(self.rebalanceServers[1])
            self.add_node(self.rebalanceServers[1])
            nodes_to_add = []
        elif self.rebalance_type == 'swap':
            self.add_node(nodes_to_add[0], rebalance=False)
            nodes_to_remove.append(self.cbas_node)
            reinitialize_cbas_util = True
        self.log.info(
            "Incoming nodes - %s, outgoing nodes - %s. For rebalance type %s "
            % (nodes_to_add, nodes_to_remove, self.rebalance_type))

        self.log.info("Creates cbas buckets and dataset")
        dataset_count_query = "select count(*) from {0};".format(
            self.cbas_dataset_name)
        self.setup_for_test()

        self.log.info("Perform async doc operations on KV")
        json_generator = JsonGenerator()
        generators = json_generator.generate_docs_simple(
            docs_per_day=self.num_items * 3 / 2, start=self.num_items)
        kv_task = self._async_load_all_buckets(self.master,
                                               generators,
                                               "create",
                                               0,
                                               batch_size=5000)

        self.log.info("Run concurrent queries on CBAS")
        handles = self.cbas_util._run_concurrent_queries(
            dataset_count_query, "async", self.num_concurrent_queries)

        self.log.info("Fetch the server to restart couchbase on")
        restart_couchbase_on_incoming_or_outgoing_node = self.input.param(
            "restart_couchbase_on_incoming_or_outgoing_node", True)
        if not restart_couchbase_on_incoming_or_outgoing_node:
            node = self.cbas_node
        else:
            node = self.rebalanceServers[1]
        shell = RemoteMachineShellConnection(node)

        self.log.info("Rebalance nodes")
        self.cluster.async_rebalance(self.servers, nodes_to_add,
                                     nodes_to_remove)

        self.log.info("Restart Couchbase on node %s" % node.ip)
        shell.restart_couchbase()
        self.sleep(30, message="Waiting for service to be back again...")

        self.log.info("Verify subsequent rebalance is successful")
        nodes_to_add = [
        ]  # Node is already added to cluster in previous rebalance, adding it again will throw exception
        self.assertTrue(
            self.cluster.rebalance(self.servers, nodes_to_add,
                                   nodes_to_remove))

        self.log.info("Get KV ops result")
        for task in kv_task:
            task.get_result()

        self.log.info("Log concurrent query status")
        self.cbas_util.log_concurrent_query_outcome(self.master, handles)

        if reinitialize_cbas_util is True:
            self.cbas_util = cbas_utils(self.master, self.rebalanceServers[1])
            self.cbas_util.createConn("default")

        self.log.info("Validate dataset count on CBAS")
        if not self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, self.num_items * 3 / 2, 0):
            self.fail(
                "No. of items in CBAS dataset do not match that in the CB bucket"
            )

    '''
    test_rebalance_on_nodes_running_multiple_services,cb_bucket_name=default,cbas_bucket_name=default_bucket,cbas_dataset_name=default_ds,items=10,nodeType=KV,num_queries=10,rebalance_type=in
    test_rebalance_on_nodes_running_multiple_services,cb_bucket_name=default,cbas_bucket_name=default_bucket,cbas_dataset_name=default_ds,items=10,nodeType=KV,num_queries=10,rebalance_type=out
    test_rebalance_on_nodes_running_multiple_services,cb_bucket_name=default,cbas_bucket_name=default_bucket,cbas_dataset_name=default_ds,items=10,num_queries=10,rebalance_type=swap,rebalance_cbas_and_kv=True
    '''

    def test_rebalance_on_nodes_running_multiple_services(self):

        self.log.info("Pick the incoming and outgoing nodes during rebalance")
        active_services = ['cbas,fts,kv']
        self.rebalance_type = self.input.param("rebalance_type", "in")
        nodes_to_add = [self.rebalanceServers[1]]
        nodes_to_remove = []
        if self.rebalance_type == 'out':
            # This node will be rebalanced out
            nodes_to_remove.append(self.rebalanceServers[1])
            # Will be running services as specified in the list - active_services
            self.add_node(nodes_to_add[0], services=active_services)
            # No nodes to remove so making the add notes empty
            nodes_to_add = []
        elif self.rebalance_type == 'swap':
            # Below node will be swapped with the incoming node specified in nodes_to_add
            self.add_node(nodes_to_add[0], services=active_services)
            nodes_to_add = []
            nodes_to_add.append(self.rebalanceServers[3])
            # Below node will be removed and swapped with node that was added earlier
            nodes_to_remove.append(self.rebalanceServers[1])

        self.log.info(
            "Incoming nodes - %s, outgoing nodes - %s. For rebalance type %s "
            % (nodes_to_add, nodes_to_remove, self.rebalance_type))

        self.log.info("Creates cbas buckets and dataset")
        dataset_count_query = "select count(*) from {0};".format(
            self.cbas_dataset_name)
        self.setup_for_test()

        self.log.info("Perform async doc operations on KV")
        json_generator = JsonGenerator()
        generators = json_generator.generate_docs_simple(
            docs_per_day=self.num_items * 3 / 2, start=self.num_items)
        kv_task = self._async_load_all_buckets(self.master,
                                               generators,
                                               "create",
                                               0,
                                               batch_size=5000)

        self.log.info("Run concurrent queries on CBAS")
        handles = self.cbas_util._run_concurrent_queries(
            dataset_count_query, "async", self.num_concurrent_queries)

        self.log.info("Rebalance nodes")
        # Do not add node to nodes_to_add if already added as add_node earlier
        self.cluster.rebalance(self.servers,
                               nodes_to_add,
                               nodes_to_remove,
                               services=active_services)

        self.log.info("Get KV ops result")
        for task in kv_task:
            task.get_result()

        self.log.info("Log concurrent query status")
        self.cbas_util.log_concurrent_query_outcome(self.master, handles)

        self.log.info("Validate dataset count on CBAS")
        if not self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, self.num_items * 3 / 2, 0):
            self.fail(
                "No. of items in CBAS dataset do not match that in the CB bucket"
            )

    def tearDown(self):
        super(CBASClusterOperations, self).tearDown()
Пример #5
0
    def setUp(self):
        self.input = TestInputSingleton.input

        # Framework specific parameters
        self.log_level = self.input.param("log_level", "info").upper()
        self.infra_log_level = self.input.param("infra_log_level",
                                                "info").upper()
        self.skip_setup_cleanup = self.input.param("skip_setup_cleanup", False)
        self.tear_down_while_setup = self.input.param("tear_down_while_setup",
                                                      True)
        self.test_timeout = self.input.param("test_timeout", 3600)
        self.thread_to_use = self.input.param("threads_to_use", 10)
        self.case_number = self.input.param("case_number", 0)
        # End of framework parameters

        # Cluster level info settings
        self.log_info = self.input.param("log_info", None)
        self.log_location = self.input.param("log_location", None)
        self.stat_info = self.input.param("stat_info", None)
        self.port = self.input.param("port", None)
        self.port_info = self.input.param("port_info", None)
        self.servers = self.input.servers
        self.__cb_clusters = []
        self.num_servers = self.input.param("servers", len(self.servers))
        self.primary_index_created = False
        self.index_quota_percent = self.input.param("index_quota_percent",
                                                    None)
        self.gsi_type = self.input.param("gsi_type", 'plasma')
        # CBAS setting
        self.jre_path = self.input.param("jre_path", None)
        # End of cluster info parameters

        # Bucket specific params
        self.bucket_type = self.input.param("bucket_type",
                                            Bucket.bucket_type.MEMBASE)
        self.bucket_size = self.input.param("bucket_size", None)
        self.bucket_lww = self.input.param("lww", True)
        self.standard_buckets = self.input.param("standard_buckets", 1)
        if self.standard_buckets > 10:
            self.bucket_util.change_max_buckets(self.standard_buckets)
        self.vbuckets = self.input.param("vbuckets", 1024)
        self.num_replicas = self.input.param("replicas", 1)
        self.active_resident_threshold = int(
            self.input.param("active_resident_threshold", 100))
        self.compression_mode = self.input.param("compression_mode", 'passive')
        # End of bucket parameters

        # Doc specific params
        self.key_size = self.input.param("key_size", 0)
        self.doc_size = self.input.param("doc_size", 10)
        self.sub_doc_size = self.input.param("sub_doc_size", 10)
        self.doc_type = self.input.param("doc_type", "json")
        self.num_items = self.input.param("num_items", 100000)
        self.target_vbucket = self.input.param("target_vbucket", None)
        self.maxttl = self.input.param("maxttl", 0)
        # End of doc specific parameters

        # Transactions parameters
        self.transaction_timeout = self.input.param("transaction_timeout", 100)
        self.transaction_commit = self.input.param("transaction_commit", True)
        self.update_count = self.input.param("update_count", 1)
        self.sync = self.input.param("sync", True)
        self.default_bucket = self.input.param("default_bucket", True)
        self.num_buckets = self.input.param("num_buckets", 0)
        self.atomicity = self.input.param("atomicity", False)
        # end of transaction parameters

        # Client specific params
        self.sdk_client_type = self.input.param("sdk_client_type", "java")
        self.sdk_compression = self.input.param("sdk_compression", True)
        self.replicate_to = self.input.param("replicate_to", 0)
        self.persist_to = self.input.param("persist_to", 0)
        self.sdk_retries = self.input.param("sdk_retries", 5)
        self.sdk_timeout = self.input.param("sdk_timeout", 5)
        self.durability_level = self.input.param("durability", "")

        # Doc Loader Params
        self.process_concurrency = self.input.param("process_concurrency", 8)
        self.batch_size = self.input.param("batch_size", 20)
        self.ryow = self.input.param("ryow", False)
        self.check_persistence = self.input.param("check_persistence", False)
        # End of client specific parameters

        # initial number of items in the cluster
        self.services_init = self.input.param("services_init", None)
        self.nodes_init = self.input.param("nodes_init", 1)
        self.nodes_in = self.input.param("nodes_in", 1)
        self.nodes_out = self.input.param("nodes_out", 1)
        self.services_in = self.input.param("services_in", None)
        self.forceEject = self.input.param("forceEject", False)
        self.wait_timeout = self.input.param("wait_timeout", 60)
        self.dgm_run = self.input.param("dgm_run", False)
        self.verify_unacked_bytes = self.input.param("verify_unacked_bytes",
                                                     False)
        self.disabled_consistent_view = self.input.param(
            "disabled_consistent_view", None)
        self.rebalanceIndexWaitingDisabled = self.input.param(
            "rebalanceIndexWaitingDisabled", None)
        self.rebalanceIndexPausingDisabled = self.input.param(
            "rebalanceIndexPausingDisabled", None)
        self.maxParallelIndexers = self.input.param("maxParallelIndexers",
                                                    None)
        self.maxParallelReplicaIndexers = self.input.param(
            "maxParallelReplicaIndexers", None)
        self.quota_percent = self.input.param("quota_percent", None)
        if not hasattr(self, 'skip_buckets_handle'):
            self.skip_buckets_handle = self.input.param(
                "skip_buckets_handle", False)

        # Initiate logging variables
        self.log = logging.getLogger("test")
        self.infra_log = logging.getLogger("infra")

        # Configure loggers
        self.log.setLevel(self.log_level)
        self.infra_log.setLevel(self.infra_log_level)

        # Support lib objects for testcase execution
        self.task_manager = TaskManager(self.thread_to_use)
        self.task = ServerTasks(self.task_manager)
        # End of library object creation

        self.cleanup = False
        self.nonroot = False
        self.test_failure = None

        self.__log_setup_status("started")
        if len(self.input.clusters) > 1:
            # Multi cluster setup
            counter = 1
            for _, nodes in self.input.clusters.iteritems():
                self.__cb_clusters.append(
                    CBCluster(name="C%s" % counter, servers=nodes))
                counter += 1
        else:
            # Single cluster
            self.cluster = CBCluster(servers=self.servers)
            self.__cb_clusters.append(self.cluster)
            self.cluster_util = ClusterUtils(self.cluster, self.task_manager)

            self.bucket_util = BucketUtils(self.cluster, self.cluster_util,
                                           self.task)

        for cluster in self.__cb_clusters:
            shell = RemoteMachineShellConnection(cluster.master)
            self.os_info = shell.extract_remote_info().type.lower()
            if self.os_info != 'windows':
                if cluster.master.ssh_username != "root":
                    self.nonroot = True
                    shell.disconnect()
                    break
        """ some tests need to bypass checking cb server at set up
            to run installation """
        self.skip_init_check_cbserver = self.input.param(
            "skip_init_check_cbserver", False)

        try:
            if self.skip_setup_cleanup:
                self.buckets = self.bucket_util.get_all_buckets()
                return
            if not self.skip_init_check_cbserver:
                for cluster in self.__cb_clusters:
                    self.cb_version = None
                    if RestHelper(RestConnection(
                            cluster.master)).is_ns_server_running():
                        """
                        Since every new couchbase version, there will be new
                        features that test code won't work on previous release.
                        So we need to get couchbase version to filter out
                        those tests.
                        """
                        self.cb_version = RestConnection(
                            cluster.master).get_nodes_version()
                    else:
                        self.log.debug("couchbase server does not run yet")
                    # We stopped supporting TAP protocol since 3.x and 3.x support also has stopped
                    self.protocol = "dcp"
            self.services_map = None

            self.__log_setup_status("started")
            for cluster in self.__cb_clusters:
                if not self.skip_buckets_handle and not self.skip_init_check_cbserver:
                    self.log.debug("Cleaning up cluster")
                    cluster_util = ClusterUtils(cluster, self.task_manager)
                    bucket_util = BucketUtils(cluster, cluster_util, self.task)
                    cluster_util.cluster_cleanup(bucket_util)

            # avoid any cluster operations in setup for new upgrade
            #  & upgradeXDCR tests
            if str(self.__class__).find('newupgradetests') != -1 or \
                    str(self.__class__).find('upgradeXDCR') != -1 or \
                    str(self.__class__).find('Upgrade_EpTests') != -1 or \
                    hasattr(self, 'skip_buckets_handle') and \
                    self.skip_buckets_handle:
                self.log.warning(
                    "any cluster operation in setup will be skipped")
                self.primary_index_created = True
                self.__log_setup_status("finished")
                return
            # avoid clean up if the previous test has been tear down
            if self.case_number == 1 or self.case_number > 1000:
                if self.case_number > 1000:
                    self.log.warn(
                        "TearDown for previous test failed. will retry..")
                    self.case_number -= 1000
                self.cleanup = True
                if not self.skip_init_check_cbserver:
                    self.tearDownEverything()
                    self.tear_down_while_setup = False
            if not self.skip_init_check_cbserver:
                for cluster in self.__cb_clusters:
                    self.log.info("Initializing cluster")
                    cluster_util = ClusterUtils(cluster, self.task_manager)
                    # self.cluster_util.reset_cluster()
                    master_services = cluster_util.get_services(
                        cluster.servers[:1], self.services_init, start_node=0)
                    if master_services is not None:
                        master_services = master_services[0].split(",")

                    self.quota = self._initialize_nodes(
                        self.task,
                        cluster,
                        self.disabled_consistent_view,
                        self.rebalanceIndexWaitingDisabled,
                        self.rebalanceIndexPausingDisabled,
                        self.maxParallelIndexers,
                        self.maxParallelReplicaIndexers,
                        self.port,
                        self.quota_percent,
                        services=master_services)

                    cluster_util.change_env_variables()
                    cluster_util.change_checkpoint_params()
                    #cluster_util.add_all_nodes_then_rebalance(cluster.servers[1:])
                    self.log.info("{0} initialized".format(cluster))
            else:
                self.quota = ""

            for cluster in self.__cb_clusters:
                cluster_util = ClusterUtils(cluster, self.task_manager)
                if self.log_info:
                    cluster_util.change_log_info()
                if self.log_location:
                    cluster_util.change_log_location()
                if self.stat_info:
                    cluster_util.change_stat_info()
                if self.port_info:
                    cluster_util.change_port_info()
                if self.port:
                    self.port = str(self.port)

            self.__log_setup_status("finished")

            if not self.skip_init_check_cbserver:
                self.__log("started")
                self.sleep(5)
        except Exception, e:
            traceback.print_exc()
            self.task.shutdown(force=True)
            self.fail(e)
Пример #6
0
 def setUp(self):
     super(RebalanceinJython, self).setUp()
     self.task_manager = TaskManager()
     self.load_gen_tasks = []
Пример #7
0
    def setUp(self):
        self.input = TestInputSingleton.input

        # Framework specific parameters
        self.log_level = self.input.param("log_level", "info").upper()
        self.infra_log_level = self.input.param("infra_log_level",
                                                "error").upper()
        self.skip_setup_cleanup = self.input.param("skip_setup_cleanup", False)
        self.tear_down_while_setup = self.input.param("tear_down_while_setup",
                                                      True)
        self.test_timeout = self.input.param("test_timeout", 3600)
        self.thread_to_use = self.input.param("threads_to_use", 30)
        self.case_number = self.input.param("case_number", 0)
        # End of framework parameters

        # Cluster level info settings
        self.log_info = self.input.param("log_info", None)
        self.log_location = self.input.param("log_location", None)
        self.stat_info = self.input.param("stat_info", None)
        self.port = self.input.param("port", None)
        self.port_info = self.input.param("port_info", None)
        self.servers = self.input.servers
        self.cb_clusters = OrderedDict()
        self.num_servers = self.input.param("servers", len(self.servers))
        self.primary_index_created = False
        self.index_quota_percent = self.input.param("index_quota_percent",
                                                    None)
        self.gsi_type = self.input.param("gsi_type", 'plasma')
        # CBAS setting
        self.jre_path = self.input.param("jre_path", None)
        self.enable_dp = self.input.param("enable_dp", False)
        # End of cluster info parameters

        # Bucket specific params
        self.bucket_type = self.input.param("bucket_type", Bucket.Type.MEMBASE)
        self.bucket_ttl = self.input.param("bucket_ttl", 0)
        self.bucket_size = self.input.param("bucket_size", None)
        self.bucket_conflict_resolution_type = \
            self.input.param("bucket_conflict_resolution",
                             Bucket.ConflictResolution.SEQ_NO)
        self.bucket_replica_index = self.input.param("bucket_replica_index", 1)
        self.bucket_eviction_policy = \
            self.input.param("bucket_eviction_policy",
                             Bucket.EvictionPolicy.VALUE_ONLY)
        self.flush_enabled = self.input.param("flushEnabled",
                                              Bucket.FlushBucket.DISABLED)
        self.bucket_time_sync = self.input.param("bucket_time_sync", False)
        self.standard_buckets = self.input.param("standard_buckets", 1)
        self.num_replicas = self.input.param("replicas", Bucket.ReplicaNum.ONE)
        self.active_resident_threshold = \
            int(self.input.param("active_resident_threshold", 100))
        self.compression_mode = \
            self.input.param("compression_mode",
                             Bucket.CompressionMode.PASSIVE)
        self.bucket_storage = \
            self.input.param("bucket_storage",
                             Bucket.StorageBackend.couchstore)
        if self.bucket_storage == Bucket.StorageBackend.magma:
            self.bucket_eviction_policy = Bucket.EvictionPolicy.FULL_EVICTION

        self.scope_name = self.input.param("scope", CbServer.default_scope)
        self.collection_name = self.input.param("collection",
                                                CbServer.default_collection)
        self.bucket_durability_level = self.input.param(
            "bucket_durability", Bucket.DurabilityLevel.NONE).upper()
        self.bucket_purge_interval = self.input.param("bucket_purge_interval",
                                                      1)
        self.bucket_durability_level = \
            BucketDurability[self.bucket_durability_level]
        # End of bucket parameters

        # Doc specific params
        self.key = self.input.param("key", "test_docs")
        self.key_size = self.input.param("key_size", 8)
        self.doc_size = self.input.param("doc_size", 256)
        self.sub_doc_size = self.input.param("sub_doc_size", 10)
        self.doc_type = self.input.param("doc_type", "json")
        self.num_items = self.input.param("num_items", 100000)
        self.target_vbucket = self.input.param("target_vbucket", None)
        self.maxttl = self.input.param("maxttl", 0)
        self.random_exp = self.input.param("random_exp", False)
        self.randomize_doc_size = self.input.param("randomize_doc_size", False)
        self.randomize_value = self.input.param("randomize_value", False)
        self.rev_write = self.input.param("rev_write", False)
        self.rev_read = self.input.param("rev_read", False)
        self.rev_update = self.input.param("rev_update", False)
        self.rev_del = self.input.param("rev_del", False)
        self.random_key = self.input.param("random_key", False)
        self.mix_key_size = self.input.param("mix_key_size", False)
        # End of doc specific parameters

        # Transactions parameters
        self.transaction_timeout = self.input.param("transaction_timeout", 100)
        self.transaction_commit = self.input.param("transaction_commit", True)
        self.update_count = self.input.param("update_count", 1)
        self.sync = self.input.param("sync", True)
        self.default_bucket = self.input.param("default_bucket", True)
        self.num_buckets = self.input.param("num_buckets", 0)
        self.atomicity = self.input.param("atomicity", False)
        self.defer = self.input.param("defer", False)
        # end of transaction parameters

        # Client specific params
        self.sdk_client_type = self.input.param("sdk_client_type", "java")
        self.replicate_to = self.input.param("replicate_to", 0)
        self.persist_to = self.input.param("persist_to", 0)
        self.sdk_retries = self.input.param("sdk_retries", 5)
        self.sdk_timeout = self.input.param("sdk_timeout", 5)
        self.time_unit = self.input.param("time_unit", "seconds")
        self.durability_level = self.input.param("durability", "").upper()
        self.sdk_client_pool = self.input.param("sdk_client_pool", None)
        self.sdk_pool_capacity = self.input.param("sdk_pool_capacity", 1)
        # Client compression settings
        self.sdk_compression = self.input.param("sdk_compression", None)
        compression_min_ratio = self.input.param("min_ratio", None)
        compression_min_size = self.input.param("min_size", None)
        if type(self.sdk_compression) is bool:
            self.sdk_compression = {"enabled": self.sdk_compression}
            if compression_min_size:
                self.sdk_compression["minSize"] = compression_min_size
            if compression_min_ratio:
                self.sdk_compression["minRatio"] = compression_min_ratio

        # Doc Loader Params
        self.process_concurrency = self.input.param("process_concurrency", 20)
        self.batch_size = self.input.param("batch_size", 2000)
        self.dgm_batch = self.input.param("dgm_batch", 5000)
        self.ryow = self.input.param("ryow", False)
        self.check_persistence = self.input.param("check_persistence", False)
        # End of client specific parameters

        # initial number of items in the cluster
        self.services_init = self.input.param("services_init", None)
        self.nodes_init = self.input.param("nodes_init", 1)
        self.nodes_in = self.input.param("nodes_in", 1)
        self.nodes_out = self.input.param("nodes_out", 1)
        self.services_in = self.input.param("services_in", None)
        self.forceEject = self.input.param("forceEject", False)
        self.wait_timeout = self.input.param("wait_timeout", 120)
        self.verify_unacked_bytes = \
            self.input.param("verify_unacked_bytes", False)
        self.disabled_consistent_view = \
            self.input.param("disabled_consistent_view", None)
        self.rebalanceIndexWaitingDisabled = \
            self.input.param("rebalanceIndexWaitingDisabled", None)
        self.rebalanceIndexPausingDisabled = \
            self.input.param("rebalanceIndexPausingDisabled", None)
        self.maxParallelIndexers = \
            self.input.param("maxParallelIndexers", None)
        self.maxParallelReplicaIndexers = \
            self.input.param("maxParallelReplicaIndexers", None)
        self.quota_percent = self.input.param("quota_percent", 90)
        self.skip_buckets_handle = self.input.param("skip_buckets_handle",
                                                    False)

        # SDKClientPool object for creating generic clients across tasks
        if self.sdk_client_pool is True:
            self.init_sdk_pool_object()

        # Initiate logging variables
        self.log = logger.get("test")
        self.infra_log = logger.get("infra")

        self.cleanup_pcaps()
        self.collect_pcaps = self.input.param("collect_pcaps", False)
        if self.collect_pcaps:
            self.start_collect_pcaps()

        # variable for log collection using cbCollect
        self.get_cbcollect_info = self.input.param("get-cbcollect-info", False)

        # Variable for initializing the current (start of test) timestamp
        self.start_timestamp = datetime.now()
        '''
        Be careful while using this flag.
        This is only and only for stand-alone tests.
        During bugs reproductions, when a crash is seen
        stop_server_on_crash will stop the server
        so that we can collect data/logs/dumps at the right time
        '''
        self.stop_server_on_crash = self.input.param("stop_server_on_crash",
                                                     False)
        self.collect_data = self.input.param("collect_data", False)

        # Configure loggers
        self.log.setLevel(self.log_level)
        self.infra_log.setLevel(self.infra_log_level)

        # Support lib objects for testcase execution
        self.task_manager = TaskManager(self.thread_to_use)
        self.task = ServerTasks(self.task_manager)
        # End of library object creation

        self.sleep = sleep

        self.cleanup = False
        self.nonroot = False
        self.test_failure = None
        self.crash_warning = self.input.param("crash_warning", False)
        self.summary = TestSummary(self.log)

        # Populate memcached_port in case of cluster_run
        cluster_run_base_port = ClusterRun.port
        if int(self.input.servers[0].port) == ClusterRun.port:
            for server in self.input.servers:
                server.port = cluster_run_base_port
                cluster_run_base_port += 1
                # If not defined in node.ini under 'memcached_port' section
                if server.memcached_port is CbServer.memcached_port:
                    server.memcached_port = \
                        ClusterRun.memcached_port \
                        + (2 * (int(server.port) - ClusterRun.port))

        self.log_setup_status(self.__class__.__name__, "started")
        cluster_name_format = "C%s"
        default_cluster_index = counter_index = 1
        if len(self.input.clusters) > 1:
            # Multi cluster setup
            for _, nodes in self.input.clusters.iteritems():
                cluster_name = cluster_name_format % counter_index
                tem_cluster = CBCluster(name=cluster_name, servers=nodes)
                self.cb_clusters[cluster_name] = tem_cluster
                counter_index += 1
        else:
            # Single cluster
            cluster_name = cluster_name_format % counter_index
            self.cb_clusters[cluster_name] = CBCluster(name=cluster_name,
                                                       servers=self.servers)

        # Initialize self.cluster with first available cluster as default
        self.cluster = self.cb_clusters[cluster_name_format %
                                        default_cluster_index]
        self.cluster_util = ClusterUtils(self.cluster, self.task_manager)
        self.bucket_util = BucketUtils(self.cluster_util, self.task)

        if self.standard_buckets > 10:
            self.bucket_util.change_max_buckets(self.cluster.master,
                                                self.standard_buckets)

        for cluster_name, cluster in self.cb_clusters.items():
            shell = RemoteMachineShellConnection(cluster.master)
            self.os_info = shell.extract_remote_info().type.lower()
            if self.os_info != 'windows':
                if cluster.master.ssh_username != "root":
                    self.nonroot = True
                    shell.disconnect()
                    break
            shell.disconnect()
        """ some tests need to bypass checking cb server at set up
            to run installation """
        self.skip_init_check_cbserver = \
            self.input.param("skip_init_check_cbserver", False)

        try:
            if self.skip_setup_cleanup:
                self.cluster.buckets = self.bucket_util.get_all_buckets(
                    self.cluster)
                return
            self.services_map = None

            self.log_setup_status("BaseTestCase", "started")
            for cluster_name, cluster in self.cb_clusters.items():
                if not self.skip_buckets_handle \
                        and not self.skip_init_check_cbserver:
                    self.log.debug("Cleaning up cluster")
                    cluster_util = ClusterUtils(cluster, self.task_manager)
                    bucket_util = BucketUtils(cluster_util, self.task)
                    cluster_util.cluster_cleanup(bucket_util)

            # Avoid cluster operations in setup for new upgrade / upgradeXDCR
            if str(self.__class__).find('newupgradetests') != -1 or \
                    str(self.__class__).find('upgradeXDCR') != -1 or \
                    str(self.__class__).find('Upgrade_EpTests') != -1 or \
                    self.skip_buckets_handle:
                self.log.warning("Cluster operation in setup will be skipped")
                self.primary_index_created = True
                self.log_setup_status("BaseTestCase", "finished")
                return
            # avoid clean up if the previous test has been tear down
            if self.case_number == 1 or self.case_number > 1000:
                if self.case_number > 1000:
                    self.log.warn("TearDown for prev test failed. Will retry")
                    self.case_number -= 1000
                self.cleanup = True
                if not self.skip_init_check_cbserver:
                    self.tearDownEverything()
                    self.tear_down_while_setup = False
            if not self.skip_init_check_cbserver:
                for cluster_name, cluster in self.cb_clusters.items():
                    self.log.info("Initializing cluster")
                    cluster_util = ClusterUtils(cluster, self.task_manager)
                    cluster_util.reset_cluster()
                    master_services = cluster_util.get_services(
                        cluster.servers[:1], self.services_init, start_node=0)
                    if master_services is not None:
                        master_services = master_services[0].split(",")

                    self.quota = self._initialize_nodes(
                        self.task,
                        cluster,
                        self.disabled_consistent_view,
                        self.rebalanceIndexWaitingDisabled,
                        self.rebalanceIndexPausingDisabled,
                        self.maxParallelIndexers,
                        self.maxParallelReplicaIndexers,
                        self.port,
                        self.quota_percent,
                        services=master_services)

                    cluster_util.change_env_variables()
                    cluster_util.change_checkpoint_params()
                    self.log.info("{0} initialized".format(cluster))
            else:
                self.quota = ""

            # Enable dp_version since we need collections enabled
            if self.enable_dp:
                for server in self.cluster.servers:
                    shell_conn = RemoteMachineShellConnection(server)
                    cb_cli = CbCli(shell_conn)
                    cb_cli.enable_dp()
                    shell_conn.disconnect()

            for cluster_name, cluster in self.cb_clusters.items():
                cluster_util = ClusterUtils(cluster, self.task_manager)
                if self.log_info:
                    cluster_util.change_log_info()
                if self.log_location:
                    cluster_util.change_log_location()
                if self.stat_info:
                    cluster_util.change_stat_info()
                if self.port_info:
                    cluster_util.change_port_info()
                if self.port:
                    self.port = str(self.port)

            self.log_setup_status("BaseTestCase", "finished")

            if not self.skip_init_check_cbserver:
                self.__log("started")
        except Exception as e:
            traceback.print_exc()
            self.task.shutdown(force=True)
            self.fail(e)
Пример #8
0
class BaseTestCase(unittest.TestCase):
    def setUp(self):
        self.input = TestInputSingleton.input

        # Framework specific parameters
        self.log_level = self.input.param("log_level", "info").upper()
        self.infra_log_level = self.input.param("infra_log_level",
                                                "error").upper()
        self.skip_setup_cleanup = self.input.param("skip_setup_cleanup", False)
        self.tear_down_while_setup = self.input.param("tear_down_while_setup",
                                                      True)
        self.test_timeout = self.input.param("test_timeout", 3600)
        self.thread_to_use = self.input.param("threads_to_use", 30)
        self.case_number = self.input.param("case_number", 0)
        # End of framework parameters

        # Cluster level info settings
        self.log_info = self.input.param("log_info", None)
        self.log_location = self.input.param("log_location", None)
        self.stat_info = self.input.param("stat_info", None)
        self.port = self.input.param("port", None)
        self.port_info = self.input.param("port_info", None)
        self.servers = self.input.servers
        self.cb_clusters = OrderedDict()
        self.num_servers = self.input.param("servers", len(self.servers))
        self.primary_index_created = False
        self.index_quota_percent = self.input.param("index_quota_percent",
                                                    None)
        self.gsi_type = self.input.param("gsi_type", 'plasma')
        # CBAS setting
        self.jre_path = self.input.param("jre_path", None)
        self.enable_dp = self.input.param("enable_dp", False)
        # End of cluster info parameters

        # Bucket specific params
        self.bucket_type = self.input.param("bucket_type", Bucket.Type.MEMBASE)
        self.bucket_ttl = self.input.param("bucket_ttl", 0)
        self.bucket_size = self.input.param("bucket_size", None)
        self.bucket_conflict_resolution_type = \
            self.input.param("bucket_conflict_resolution",
                             Bucket.ConflictResolution.SEQ_NO)
        self.bucket_replica_index = self.input.param("bucket_replica_index", 1)
        self.bucket_eviction_policy = \
            self.input.param("bucket_eviction_policy",
                             Bucket.EvictionPolicy.VALUE_ONLY)
        self.flush_enabled = self.input.param("flushEnabled",
                                              Bucket.FlushBucket.DISABLED)
        self.bucket_time_sync = self.input.param("bucket_time_sync", False)
        self.standard_buckets = self.input.param("standard_buckets", 1)
        self.num_replicas = self.input.param("replicas", Bucket.ReplicaNum.ONE)
        self.active_resident_threshold = \
            int(self.input.param("active_resident_threshold", 100))
        self.compression_mode = \
            self.input.param("compression_mode",
                             Bucket.CompressionMode.PASSIVE)
        self.bucket_storage = \
            self.input.param("bucket_storage",
                             Bucket.StorageBackend.couchstore)
        if self.bucket_storage == Bucket.StorageBackend.magma:
            self.bucket_eviction_policy = Bucket.EvictionPolicy.FULL_EVICTION

        self.scope_name = self.input.param("scope", CbServer.default_scope)
        self.collection_name = self.input.param("collection",
                                                CbServer.default_collection)
        self.bucket_durability_level = self.input.param(
            "bucket_durability", Bucket.DurabilityLevel.NONE).upper()
        self.bucket_purge_interval = self.input.param("bucket_purge_interval",
                                                      1)
        self.bucket_durability_level = \
            BucketDurability[self.bucket_durability_level]
        # End of bucket parameters

        # Doc specific params
        self.key = self.input.param("key", "test_docs")
        self.key_size = self.input.param("key_size", 8)
        self.doc_size = self.input.param("doc_size", 256)
        self.sub_doc_size = self.input.param("sub_doc_size", 10)
        self.doc_type = self.input.param("doc_type", "json")
        self.num_items = self.input.param("num_items", 100000)
        self.target_vbucket = self.input.param("target_vbucket", None)
        self.maxttl = self.input.param("maxttl", 0)
        self.random_exp = self.input.param("random_exp", False)
        self.randomize_doc_size = self.input.param("randomize_doc_size", False)
        self.randomize_value = self.input.param("randomize_value", False)
        self.rev_write = self.input.param("rev_write", False)
        self.rev_read = self.input.param("rev_read", False)
        self.rev_update = self.input.param("rev_update", False)
        self.rev_del = self.input.param("rev_del", False)
        self.random_key = self.input.param("random_key", False)
        self.mix_key_size = self.input.param("mix_key_size", False)
        # End of doc specific parameters

        # Transactions parameters
        self.transaction_timeout = self.input.param("transaction_timeout", 100)
        self.transaction_commit = self.input.param("transaction_commit", True)
        self.update_count = self.input.param("update_count", 1)
        self.sync = self.input.param("sync", True)
        self.default_bucket = self.input.param("default_bucket", True)
        self.num_buckets = self.input.param("num_buckets", 0)
        self.atomicity = self.input.param("atomicity", False)
        self.defer = self.input.param("defer", False)
        # end of transaction parameters

        # Client specific params
        self.sdk_client_type = self.input.param("sdk_client_type", "java")
        self.replicate_to = self.input.param("replicate_to", 0)
        self.persist_to = self.input.param("persist_to", 0)
        self.sdk_retries = self.input.param("sdk_retries", 5)
        self.sdk_timeout = self.input.param("sdk_timeout", 5)
        self.time_unit = self.input.param("time_unit", "seconds")
        self.durability_level = self.input.param("durability", "").upper()
        self.sdk_client_pool = self.input.param("sdk_client_pool", None)
        self.sdk_pool_capacity = self.input.param("sdk_pool_capacity", 1)
        # Client compression settings
        self.sdk_compression = self.input.param("sdk_compression", None)
        compression_min_ratio = self.input.param("min_ratio", None)
        compression_min_size = self.input.param("min_size", None)
        if type(self.sdk_compression) is bool:
            self.sdk_compression = {"enabled": self.sdk_compression}
            if compression_min_size:
                self.sdk_compression["minSize"] = compression_min_size
            if compression_min_ratio:
                self.sdk_compression["minRatio"] = compression_min_ratio

        # Doc Loader Params
        self.process_concurrency = self.input.param("process_concurrency", 20)
        self.batch_size = self.input.param("batch_size", 2000)
        self.dgm_batch = self.input.param("dgm_batch", 5000)
        self.ryow = self.input.param("ryow", False)
        self.check_persistence = self.input.param("check_persistence", False)
        # End of client specific parameters

        # initial number of items in the cluster
        self.services_init = self.input.param("services_init", None)
        self.nodes_init = self.input.param("nodes_init", 1)
        self.nodes_in = self.input.param("nodes_in", 1)
        self.nodes_out = self.input.param("nodes_out", 1)
        self.services_in = self.input.param("services_in", None)
        self.forceEject = self.input.param("forceEject", False)
        self.wait_timeout = self.input.param("wait_timeout", 120)
        self.verify_unacked_bytes = \
            self.input.param("verify_unacked_bytes", False)
        self.disabled_consistent_view = \
            self.input.param("disabled_consistent_view", None)
        self.rebalanceIndexWaitingDisabled = \
            self.input.param("rebalanceIndexWaitingDisabled", None)
        self.rebalanceIndexPausingDisabled = \
            self.input.param("rebalanceIndexPausingDisabled", None)
        self.maxParallelIndexers = \
            self.input.param("maxParallelIndexers", None)
        self.maxParallelReplicaIndexers = \
            self.input.param("maxParallelReplicaIndexers", None)
        self.quota_percent = self.input.param("quota_percent", 90)
        self.skip_buckets_handle = self.input.param("skip_buckets_handle",
                                                    False)

        # SDKClientPool object for creating generic clients across tasks
        if self.sdk_client_pool is True:
            self.init_sdk_pool_object()

        # Initiate logging variables
        self.log = logger.get("test")
        self.infra_log = logger.get("infra")

        self.cleanup_pcaps()
        self.collect_pcaps = self.input.param("collect_pcaps", False)
        if self.collect_pcaps:
            self.start_collect_pcaps()

        # variable for log collection using cbCollect
        self.get_cbcollect_info = self.input.param("get-cbcollect-info", False)

        # Variable for initializing the current (start of test) timestamp
        self.start_timestamp = datetime.now()
        '''
        Be careful while using this flag.
        This is only and only for stand-alone tests.
        During bugs reproductions, when a crash is seen
        stop_server_on_crash will stop the server
        so that we can collect data/logs/dumps at the right time
        '''
        self.stop_server_on_crash = self.input.param("stop_server_on_crash",
                                                     False)
        self.collect_data = self.input.param("collect_data", False)

        # Configure loggers
        self.log.setLevel(self.log_level)
        self.infra_log.setLevel(self.infra_log_level)

        # Support lib objects for testcase execution
        self.task_manager = TaskManager(self.thread_to_use)
        self.task = ServerTasks(self.task_manager)
        # End of library object creation

        self.sleep = sleep

        self.cleanup = False
        self.nonroot = False
        self.test_failure = None
        self.crash_warning = self.input.param("crash_warning", False)
        self.summary = TestSummary(self.log)

        # Populate memcached_port in case of cluster_run
        cluster_run_base_port = ClusterRun.port
        if int(self.input.servers[0].port) == ClusterRun.port:
            for server in self.input.servers:
                server.port = cluster_run_base_port
                cluster_run_base_port += 1
                # If not defined in node.ini under 'memcached_port' section
                if server.memcached_port is CbServer.memcached_port:
                    server.memcached_port = \
                        ClusterRun.memcached_port \
                        + (2 * (int(server.port) - ClusterRun.port))

        self.log_setup_status(self.__class__.__name__, "started")
        cluster_name_format = "C%s"
        default_cluster_index = counter_index = 1
        if len(self.input.clusters) > 1:
            # Multi cluster setup
            for _, nodes in self.input.clusters.iteritems():
                cluster_name = cluster_name_format % counter_index
                tem_cluster = CBCluster(name=cluster_name, servers=nodes)
                self.cb_clusters[cluster_name] = tem_cluster
                counter_index += 1
        else:
            # Single cluster
            cluster_name = cluster_name_format % counter_index
            self.cb_clusters[cluster_name] = CBCluster(name=cluster_name,
                                                       servers=self.servers)

        # Initialize self.cluster with first available cluster as default
        self.cluster = self.cb_clusters[cluster_name_format %
                                        default_cluster_index]
        self.cluster_util = ClusterUtils(self.cluster, self.task_manager)
        self.bucket_util = BucketUtils(self.cluster_util, self.task)

        if self.standard_buckets > 10:
            self.bucket_util.change_max_buckets(self.cluster.master,
                                                self.standard_buckets)

        for cluster_name, cluster in self.cb_clusters.items():
            shell = RemoteMachineShellConnection(cluster.master)
            self.os_info = shell.extract_remote_info().type.lower()
            if self.os_info != 'windows':
                if cluster.master.ssh_username != "root":
                    self.nonroot = True
                    shell.disconnect()
                    break
            shell.disconnect()
        """ some tests need to bypass checking cb server at set up
            to run installation """
        self.skip_init_check_cbserver = \
            self.input.param("skip_init_check_cbserver", False)

        try:
            if self.skip_setup_cleanup:
                self.cluster.buckets = self.bucket_util.get_all_buckets(
                    self.cluster)
                return
            self.services_map = None

            self.log_setup_status("BaseTestCase", "started")
            for cluster_name, cluster in self.cb_clusters.items():
                if not self.skip_buckets_handle \
                        and not self.skip_init_check_cbserver:
                    self.log.debug("Cleaning up cluster")
                    cluster_util = ClusterUtils(cluster, self.task_manager)
                    bucket_util = BucketUtils(cluster_util, self.task)
                    cluster_util.cluster_cleanup(bucket_util)

            # Avoid cluster operations in setup for new upgrade / upgradeXDCR
            if str(self.__class__).find('newupgradetests') != -1 or \
                    str(self.__class__).find('upgradeXDCR') != -1 or \
                    str(self.__class__).find('Upgrade_EpTests') != -1 or \
                    self.skip_buckets_handle:
                self.log.warning("Cluster operation in setup will be skipped")
                self.primary_index_created = True
                self.log_setup_status("BaseTestCase", "finished")
                return
            # avoid clean up if the previous test has been tear down
            if self.case_number == 1 or self.case_number > 1000:
                if self.case_number > 1000:
                    self.log.warn("TearDown for prev test failed. Will retry")
                    self.case_number -= 1000
                self.cleanup = True
                if not self.skip_init_check_cbserver:
                    self.tearDownEverything()
                    self.tear_down_while_setup = False
            if not self.skip_init_check_cbserver:
                for cluster_name, cluster in self.cb_clusters.items():
                    self.log.info("Initializing cluster")
                    cluster_util = ClusterUtils(cluster, self.task_manager)
                    cluster_util.reset_cluster()
                    master_services = cluster_util.get_services(
                        cluster.servers[:1], self.services_init, start_node=0)
                    if master_services is not None:
                        master_services = master_services[0].split(",")

                    self.quota = self._initialize_nodes(
                        self.task,
                        cluster,
                        self.disabled_consistent_view,
                        self.rebalanceIndexWaitingDisabled,
                        self.rebalanceIndexPausingDisabled,
                        self.maxParallelIndexers,
                        self.maxParallelReplicaIndexers,
                        self.port,
                        self.quota_percent,
                        services=master_services)

                    cluster_util.change_env_variables()
                    cluster_util.change_checkpoint_params()
                    self.log.info("{0} initialized".format(cluster))
            else:
                self.quota = ""

            # Enable dp_version since we need collections enabled
            if self.enable_dp:
                for server in self.cluster.servers:
                    shell_conn = RemoteMachineShellConnection(server)
                    cb_cli = CbCli(shell_conn)
                    cb_cli.enable_dp()
                    shell_conn.disconnect()

            for cluster_name, cluster in self.cb_clusters.items():
                cluster_util = ClusterUtils(cluster, self.task_manager)
                if self.log_info:
                    cluster_util.change_log_info()
                if self.log_location:
                    cluster_util.change_log_location()
                if self.stat_info:
                    cluster_util.change_stat_info()
                if self.port_info:
                    cluster_util.change_port_info()
                if self.port:
                    self.port = str(self.port)

            self.log_setup_status("BaseTestCase", "finished")

            if not self.skip_init_check_cbserver:
                self.__log("started")
        except Exception as e:
            traceback.print_exc()
            self.task.shutdown(force=True)
            self.fail(e)

    def cleanup_pcaps(self):
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            # Stop old instances of tcpdump if still running
            stop_tcp_cmd = "if [[ \"$(pgrep tcpdump)\" ]]; " \
                           "then kill -s TERM $(pgrep tcpdump); fi"
            _, _ = shell.execute_command(stop_tcp_cmd)
            shell.execute_command("rm -rf pcaps")
            shell.execute_command("rm -rf " + server.ip + "_pcaps.zip")
            shell.disconnect()

    def start_collect_pcaps(self):
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            # Create path for storing pcaps
            create_path = "mkdir -p pcaps"
            o, e = shell.execute_command(create_path)
            shell.log_command_output(o, e)
            # Install tcpdump command if it doesn't exist
            o, e = shell.execute_command("yum install -y tcpdump")
            shell.log_command_output(o, e)
            # Install screen command if it doesn't exist
            o, e = shell.execute_command("yum install -y screen")
            shell.log_command_output(o, e)
            # Execute the tcpdump command
            tcp_cmd = "screen -dmS test bash -c \"tcpdump -C 500 -W 10 " \
                      "-w pcaps/pack-dump-file.pcap  -i eth0 -s 0 tcp\""
            o, e = shell.execute_command(tcp_cmd)
            shell.log_command_output(o, e)
            shell.disconnect()

    def start_fetch_pcaps(self):
        log_path = TestInputSingleton.input.param("logs_folder", "/tmp")
        for server in self.servers:
            remote_client = RemoteMachineShellConnection(server)
            # stop tcdump
            stop_tcp_cmd = "if [[ \"$(pgrep tcpdump)\" ]]; " \
                           "then kill -s TERM $(pgrep tcpdump); fi"
            o, e = remote_client.execute_command(stop_tcp_cmd)
            remote_client.log_command_output(o, e)
            if self.is_test_failed():
                # install zip unzip
                o, e = remote_client.execute_command(
                    "yum install -y zip unzip")
                remote_client.log_command_output(o, e)
                # zip the pcaps folder
                zip_cmd = "zip -r " + server.ip + "_pcaps.zip pcaps"
                o, e = remote_client.execute_command(zip_cmd)
                remote_client.log_command_output(o, e)
                # transfer the zip file
                zip_file_copied = remote_client.get_file(
                    "/root", os.path.basename(server.ip + "_pcaps.zip"),
                    log_path)
                self.log.info("%s node pcap zip copied on client : %s" %
                              (server.ip, zip_file_copied))
                if zip_file_copied:
                    # Remove the zips
                    remote_client.execute_command("rm -rf " + server.ip +
                                                  "_pcaps.zip")
            # Remove pcaps
            remote_client.execute_command("rm -rf pcaps")
            remote_client.disconnect()

    def tearDown(self):
        self.task_manager.shutdown_task_manager()
        self.task.shutdown(force=True)
        self.task_manager.abort_all_tasks()
        if self.sdk_client_pool:
            self.sdk_client_pool.shutdown()
        if self.collect_pcaps:
            self.log.info("Starting Pcaps collection!!")
            self.start_fetch_pcaps()
        result = self.check_coredump_exist(self.servers, force_collect=True)
        self.tearDownEverything()
        if not self.crash_warning:
            self.assertFalse(result, msg="Cb_log file validation failed")
        if self.crash_warning and result:
            self.log.warn("CRASH | CRITICAL | WARN messages found in cb_logs")

    def tearDownEverything(self):
        if self.skip_setup_cleanup:
            return
        for _, cluster in self.cb_clusters.items():
            cluster_util = ClusterUtils(cluster, self.task_manager)
            bucket_util = BucketUtils(cluster_util, self.task)
            try:
                if self.skip_buckets_handle:
                    return
                test_failed = self.is_test_failed()
                if test_failed \
                        and TestInputSingleton.input.param("stop-on-failure",
                                                           False) \
                        or self.input.param("skip_cleanup", False):
                    self.log.warn("CLEANUP WAS SKIPPED")
                else:
                    if test_failed:
                        # Collect logs because we have not shut things down
                        if self.get_cbcollect_info:
                            self.fetch_cb_collect_logs()

                        get_trace = \
                            TestInputSingleton.input.param("get_trace", None)
                        if get_trace:
                            for server in cluster.servers:
                                shell = \
                                    RemoteMachineShellConnection(server)
                                output, _ = shell.execute_command(
                                    "ps -aef|grep %s" % get_trace)
                                output = shell.execute_command(
                                    "pstack %s" % output[0].split()[1].strip())
                                self.infra_log.debug(output[0])
                                shell.disconnect()
                        else:
                            self.log.critical("Skipping get_trace !!")

                    rest = RestConnection(cluster.master)
                    alerts = rest.get_alerts()
                    if alerts is not None and len(alerts) != 0:
                        self.infra_log.warn("Alerts found: {0}".format(alerts))
                    self.log.debug("Cleaning up cluster")
                    cluster_util.cluster_cleanup(bucket_util)
            except BaseException as e:
                # kill memcached
                traceback.print_exc()
                self.log.warning("Killing memcached due to {0}".format(e))
                cluster_util.kill_memcached()
                # Increase case_number to retry tearDown in setup for next test
                self.case_number += 1000
            finally:
                # stop all existing task manager threads
                if self.cleanup:
                    self.cleanup = False
                else:
                    cluster_util.reset_env_variables()
        self.infra_log.info("========== tasks in thread pool ==========")
        self.task_manager.print_tasks_in_pool()
        self.infra_log.info("==========================================")
        if not self.tear_down_while_setup:
            self.task_manager.shutdown_task_manager()
            self.task.shutdown(force=True)

    def is_test_failed(self):
        return (hasattr(self, '_resultForDoCleanups')
                and len(self._resultForDoCleanups.failures
                or self._resultForDoCleanups.errors)) \
               or (hasattr(self, '_exc_info')
                   and self._exc_info()[1] is not None)

    def handle_setup_exception(self, exception_obj):
        # Shutdown client pool in case of any error before failing
        if self.sdk_client_pool is not None:
            self.sdk_client_pool.shutdown()
        # print the tracback of the failure
        traceback.print_exc()
        # Throw the exception so that the test will fail at setUp
        raise exception_obj

    def __log(self, status):
        try:
            msg = "{0}: {1} {2}" \
                .format(datetime.now(), self._testMethodName, status)
            RestConnection(self.servers[0]).log_client_error(msg)
        except Exception as e:
            self.log.warning("Exception during REST log_client_error: %s" % e)

    def log_setup_status(self, class_name, status, stage="setup"):
        self.log.info("========= %s %s %s for test #%d %s =========" %
                      (class_name, stage, status, self.case_number,
                       self._testMethodName))

    def _initialize_nodes(self,
                          task,
                          cluster,
                          disabled_consistent_view=None,
                          rebalance_index_waiting_disabled=None,
                          rebalance_index_pausing_disabled=None,
                          max_parallel_indexers=None,
                          max_parallel_replica_indexers=None,
                          port=None,
                          quota_percent=None,
                          services=None):
        quota = 0
        init_tasks = []
        ssh_sessions = dict()

        # Open ssh_connections for command execution
        for server in cluster.servers:
            ssh_sessions[server.ip] = RemoteMachineShellConnection(server)

        for server in cluster.servers:
            # Make sure that data_and index_path are writable by couchbase user
            if not server.index_path:
                server.index_path = server.data_path
            for path in set(
                [_f for _f in [server.data_path, server.index_path] if _f]):
                for cmd in ("rm -rf {0}/*".format(path),
                            "chown -R couchbase:couchbase {0}".format(path)):
                    ssh_sessions[server.ip].execute_command(cmd)
                rest = RestConnection(server)
                rest.set_data_path(data_path=server.data_path,
                                   index_path=server.index_path,
                                   cbas_path=server.cbas_path)
            init_port = port or server.port or '8091'
            assigned_services = services
            if cluster.master != server:
                assigned_services = None
            init_tasks.append(
                task.async_init_node(
                    server,
                    disabled_consistent_view,
                    rebalance_index_waiting_disabled,
                    rebalance_index_pausing_disabled,
                    max_parallel_indexers,
                    max_parallel_replica_indexers,
                    init_port,
                    quota_percent,
                    services=assigned_services,
                    index_quota_percent=self.index_quota_percent,
                    gsi_type=self.gsi_type))
        for _task in init_tasks:
            node_quota = self.task_manager.get_task_result(_task)
            if node_quota < quota or quota == 0:
                quota = node_quota
        if quota < 100 and not len(set([server.ip
                                        for server in self.servers])) == 1:
            self.log.warn("RAM quota was defined less than 100 MB:")
            for server in cluster.servers:
                ram = ssh_sessions[server.ip].extract_remote_info().ram
                self.log.debug("Node: {0}: RAM: {1}".format(server.ip, ram))

        # Close all ssh_connections
        for server in cluster.servers:
            ssh_sessions[server.ip].disconnect()

        if self.jre_path:
            for server in cluster.servers:
                rest = RestConnection(server)
                rest.set_jre_path(self.jre_path)
        return quota

    def fetch_cb_collect_logs(self):
        log_path = TestInputSingleton.input.param("logs_folder", "/tmp")
        is_single_node_server = len(self.servers) == 1
        for _, cluster in self.cb_clusters.items():
            rest = RestConnection(cluster.master)
            nodes = rest.get_nodes()
            # Creating cluster_util object to handle multi_cluster scenario
            cluster_util = ClusterUtils(cluster, self.task_manager)
            status = cluster_util.trigger_cb_collect_on_cluster(
                rest, nodes, is_single_node_server)

            if status is True:
                cluster_util.wait_for_cb_collect_to_complete(rest)
                cluster_util.copy_cb_collect_logs(rest, nodes, cluster,
                                                  log_path)
            else:
                self.log.error("API perform_cb_collect returned False")

    def log_failure(self, message):
        self.log.error(message)
        self.summary.set_status("FAILED")
        if self.test_failure is None:
            self.test_failure = message

    def validate_test_failure(self):
        if self.test_failure is not None:
            self.fail(self.test_failure)

    def get_clusters(self):
        return [self.cb_clusters[name] for name in self.cb_clusters.keys()]

    def get_task(self):
        return self.task

    def get_task_mgr(self):
        return self.task_manager

    def init_sdk_pool_object(self):
        self.sdk_client_pool = SDKClientPool()
        DocLoaderUtils.sdk_client_pool = self.sdk_client_pool

    def check_coredump_exist(self, servers, force_collect=False):
        bin_cb = "/opt/couchbase/bin/"
        lib_cb = "/opt/couchbase/var/lib/couchbase/"
        # crash_dir = "/opt/couchbase/var/lib/couchbase/"
        crash_dir_win = "c://CrashDumps"
        result = False
        self.data_sets = dict()

        def find_index_of(str_list, sub_string):
            for i in range(len(str_list)):
                if sub_string in str_list[i]:
                    return i
            return -1

        def get_gdb(gdb_shell, dmp_path, dmp_name):
            dmp_file = dmp_path + dmp_name
            core_file = dmp_path + dmp_name.strip(".dmp") + ".core"
            gdb_shell.execute_command("rm -rf " + core_file)
            gdb_shell.execute_command("/" + bin_cb + "minidump-2-core " +
                                      dmp_file + " > " + core_file)
            cmd = "gdb --batch {} -c {} -ex \"bt full\" -ex quit"\
                .format(os.path.join(bin_cb, "memcached"), core_file)
            self.log.info("running %s" % cmd)
            gdb_out = gdb_shell.execute_command(cmd)[0]
            t_index = find_index_of(gdb_out, "Core was generated by")
            gdb_out = gdb_out[t_index:]
            gdb_out = " ".join(gdb_out)
            return gdb_out

        def get_full_thread_dump(gdb_shell):
            cmd = 'gdb -p `(pidof memcached)` -ex "thread apply all bt" -ex detach -ex quit'
            self.log.info("running %s" % cmd)
            thread_dump = gdb_shell.execute_command(cmd)[0]
            index = find_index_of(
                thread_dump, "Thread debugging using libthread_db enabled")
            print(" ".join(thread_dump[index:]))

        def check_if_new_messages(grep_output_list):
            """
            Check the grep's last line for the latest timestamp.
            If this timestamp < start_timestamp of the test,
            then return False (as the grep's output is from previous tests)
            Note: This method works only if slave's time(timezone) matches
                  that of VM's. Else it won't be possible to compare timestamps
            """
            last_line = grep_output_list[-1]
            if not re.match(r"[0-9]{4}-[0-9]{2}-[0-9]{2}T", last_line):
                # To check if line doesn't begin with any yyyy-mm-ddT
                self.log.critical("%s does not match any timestamp" %
                                  last_line)
                return True
            timestamp = last_line.split()[0]
            timestamp = timestamp.split(".")[0]
            timestamp = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S")
            self.log.info("Comparing timestamps: Log's latest timestamp: %s, "
                          "Test's start timestamp is %s" %
                          (timestamp, self.start_timestamp))
            if timestamp > self.start_timestamp:
                return True
            else:
                return False

        for idx, server in enumerate(servers):
            shell = RemoteMachineShellConnection(server)
            shell.extract_remote_info()
            crash_dir = lib_cb + "crash/"
            if shell.info.type.lower() == "windows":
                crash_dir = crash_dir_win
            if int(server.port) in range(ClusterRun.port,
                                         ClusterRun.port + 10):
                crash_dir = os.path.join(
                    TestInputSingleton.input.servers[0].cli_path, "ns_server",
                    "data", "n_%s" % str(idx), "crash")
            dmp_files = shell.execute_command("ls -lt " + crash_dir)[0]
            dmp_files = [f for f in dmp_files if ".core" not in f]
            dmp_files = [f for f in dmp_files if "total" not in f]
            dmp_files = [f.split()[-1] for f in dmp_files if ".core" not in f]
            dmp_files = [f.strip("\n") for f in dmp_files]
            if dmp_files:
                msg = "%s: %d core dump seen" % (server.ip, len(dmp_files))
                self.log.critical(msg)
                self.log.critical("%s: Stack Trace of first crash - %s\n%s" %
                                  (server.ip, dmp_files[-1],
                                   get_gdb(shell, crash_dir, dmp_files[-1])))
                get_full_thread_dump(shell)
                if self.stop_server_on_crash:
                    shell.stop_couchbase()
                result = True
            else:
                self.log.debug(server.ip + ": No crash files found")

            logs_dir = lib_cb + "logs/"
            if int(server.port) in range(ClusterRun.port,
                                         ClusterRun.port + 10):
                logs_dir = os.path.join(
                    TestInputSingleton.input.servers[0].cli_path, "ns_server",
                    "logs", "n_%s" % str(idx))

            # Perform log file searching based on the input yaml config
            yaml = YAML()
            with open("lib/couchbase_helper/error_log_config.yaml", "r") as fp:
                y_data = yaml.load(fp.read())

            for file_data in y_data["file_name_patterns"]:
                log_files = shell.execute_command(
                    "ls " + os.path.join(logs_dir, file_data['file']))[0]

                if len(log_files) == 0:
                    self.log.debug("%s: No '%s' files found" %
                                   (server.ip, file_data['file']))
                    continue

                if 'target_file_index' in file_data:
                    log_files = [
                        log_files[int(file_data['target_file_index'])]
                    ]

                for log_file in log_files:
                    log_file = log_file.strip("\n")
                    for grep_pattern in file_data['grep_for']:
                        grep_for_str = grep_pattern['string']
                        err_pattern = exclude_pattern = None
                        if 'error_patterns' in grep_pattern:
                            err_pattern = grep_pattern['error_patterns']
                        if 'exclude_patterns' in grep_pattern:
                            exclude_pattern = grep_pattern['exclude_patterns']

                        cmd_to_run = "grep -r '%s' %s" \
                                     % (grep_for_str, log_file)
                        if exclude_pattern is not None:
                            for pattern in exclude_pattern:
                                cmd_to_run += " | grep -v '%s'" % pattern

                        grep_output = shell.execute_command(cmd_to_run)[0]
                        if grep_output and check_if_new_messages(grep_output):
                            regex = r"(\bkvstore-\d+)"
                            grep_str = "".join(grep_output)
                            kvstores = list(set(re.findall(regex, grep_str)))
                            self.data_sets[server] = kvstores
                            grep_str = None
                        if err_pattern is not None:
                            for pattern in err_pattern:
                                index = find_index_of(grep_output, pattern)
                                grep_output = grep_output[:index]
                                if grep_output:
                                    self.log.info("unwanted messages in %s" %
                                                  log_file)
                                    if check_if_new_messages(grep_output):
                                        self.log.critical(
                                            "%s: Found '%s' logs - %s" %
                                            (server.ip, grep_for_str,
                                             "".join(grep_output)))
                                        result = True
                                        break
                        else:
                            if grep_output \
                                    and check_if_new_messages(grep_output):
                                self.log.info("unwanted messages in %s" %
                                              log_file)
                                self.log.critical(
                                    "%s: Found '%s' logs - %s" %
                                    (server.ip, grep_for_str, grep_output))
                                result = True
                                break
                    if result is True:
                        if self.stop_server_on_crash:
                            shell.stop_couchbase()
                        break

            shell.disconnect()
        if result and force_collect and not self.stop_server_on_crash:
            self.fetch_cb_collect_logs()
            self.get_cbcollect_info = False
        if (self.is_test_failed() or result) and self.collect_data:
            self.copy_data_on_slave()

        return result

    def copy_data_on_slave(self, servers=None):
        log_path = TestInputSingleton.input.param("logs_folder", "/tmp")
        if servers is None:
            servers = self.cluster.nodes_in_cluster
            for node in servers:
                if "kv" not in node.services.lower():
                    servers.remove(node)
        if type(servers) is not list:
            servers = [servers]
        remote_path = RestConnection(servers[0]).get_data_path()
        file_path = os.path.join(remote_path, self.cluster.buckets[0].name)
        file_name = self.cluster.buckets[0].name + ".tar.gz"

        def get_tar(remotepath, filepath, filename, servers, todir="."):
            if type(servers) is not list:
                servers = [servers]
            for server in servers:
                shell = RemoteMachineShellConnection(server)
                _ = shell.execute_command("tar -zcvf %s.tar.gz %s" %
                                          (filepath, filepath))
                file_check = shell.file_exists(remotepath, filename)
                if not file_check:
                    self.log.error(
                        "Tar File {} doesn't exist".format(filename))
                tar_file_copied = shell.get_file(remotepath, filename, todir)
                if not tar_file_copied:
                    self.log.error("Failed to copy Tar file")

                _ = shell.execute_command("rm -rf %s.tar.gz" % filepath)

        copy_path_msg_format = "Copying data, Server :: %s, Path :: %s"
        '''
          Temporarily enabling data copy
          of all nodes irrespective of nodes in
          data_sets
        '''
        if False and self.data_sets and self.bucket_storage == "magma":
            self.log.critical("data_sets ==> {}".format(self.data_sets))
            wal_tar = "wal.tar.gz"
            config_json_tar = "config.json.tar.gz"
            for server, kvstores in self.data_sets.items():
                shell = RemoteMachineShellConnection(server)
                if not kvstores:
                    copy_to_path = os.path.join(log_path,
                                                server.ip.replace(".", "_"))
                    if not os.path.isdir(copy_to_path):
                        os.makedirs(copy_to_path, 0o777)
                    self.log.info(copy_path_msg_format %
                                  (server.ip, copy_to_path))
                    get_tar(remote_path,
                            file_path,
                            file_name,
                            server,
                            todir=copy_to_path)
                else:
                    for kvstore in kvstores:
                        if int(kvstore.split("-")[1]) >= self.vbuckets:
                            continue
                        kvstore_path = shell.execute_command(
                            "find %s -type d -name '%s'" %
                            (remote_path, kvstore))[0][0]
                        magma_dir = kvstore_path.split(kvstore)[0]
                        wal_path = kvstore_path.split(kvstore)[0] + "wal"
                        config_json_path = kvstore_path.split(kvstore)[0]
                        + "config.json"
                        kvstore_path = kvstore_path.split(kvstore)[0] + kvstore
                        kvstore_tar = kvstore + ".tar.gz"
                        copy_to_path = os.path.join(log_path, kvstore)
                        if not os.path.isdir(copy_to_path):
                            os.makedirs(copy_to_path, 0o777)
                        self.log.info(copy_path_msg_format %
                                      (server.ip, copy_to_path))
                        get_tar(magma_dir,
                                kvstore_path,
                                kvstore_tar,
                                server,
                                todir=copy_to_path)
                        get_tar(magma_dir,
                                wal_path,
                                wal_tar,
                                server,
                                todir=copy_to_path)
                        get_tar(magma_dir,
                                config_json_path,
                                config_json_tar,
                                server,
                                todir=copy_to_path)
        else:
            for server in servers:
                copy_to_path = os.path.join(log_path,
                                            server.ip.replace(".", "_"))
                if not os.path.isdir(copy_to_path):
                    os.makedirs(copy_to_path, 0o777)
                self.log.info(copy_path_msg_format % (server.ip, copy_to_path))
                get_tar(remote_path,
                        file_path,
                        file_name,
                        server,
                        todir=copy_to_path)
Пример #9
0
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.primary_index_created = False
        self.use_sdk_client = self.input.param("use_sdk_client", False)
        if self.input.param("log_level", None):
            log.setLevel(level=0)
            for hd in log.handlers:
                if str(hd.__class__).find('FileHandler') != -1:
                    hd.setLevel(level=logging.DEBUG)
                else:
                    hd.setLevel(level=getattr(
                        logging, self.input.param("log_level", None)))
        self.servers = self.input.servers
        self.buckets = []
        self.case_number = self.input.param("case_number", 0)
        self.thread_to_use = self.input.param("threads_to_use", 10)
        self.cluster = CBCluster(servers=self.input.servers)
        self.task_manager = TaskManager(self.thread_to_use)
        self.cluster_util = cluster_utils(self.cluster, self.task_manager)
        self.bucket_util = bucket_utils(self.cluster, self.task_manager,
                                        self.cluster_util)
        self.task = ServerTasks(self.task_manager)
        self.cleanup = False
        self.nonroot = False
        shell = RemoteMachineShellConnection(self.cluster.master)
        self.os_info = shell.extract_remote_info().type.lower()
        if self.os_info != 'windows':
            if self.cluster.master.ssh_username != "root":
                self.nonroot = True
        shell.disconnect()
        """ some tests need to bypass checking cb server at set up
            to run installation """
        self.skip_init_check_cbserver = self.input.param(
            "skip_init_check_cbserver", False)

        try:
            self.vbuckets = self.input.param("vbuckets", 1024)
            self.skip_setup_cleanup = self.input.param("skip_setup_cleanup",
                                                       False)
            self.index_quota_percent = self.input.param(
                "index_quota_percent", None)
            self.num_servers = self.input.param("servers",
                                                len(self.cluster.servers))
            # initial number of items in the cluster
            self.services_init = self.input.param("services_init", None)
            self.nodes_init = self.input.param("nodes_init", 1)
            self.nodes_in = self.input.param("nodes_in", 1)
            self.nodes_out = self.input.param("nodes_out", 1)
            self.services_in = self.input.param("services_in", None)
            self.forceEject = self.input.param("forceEject", False)
            self.num_items = self.input.param("num_items", 100000)
            self.num_replicas = self.input.param("replicas", 1)
            self.value_size = self.input.param("value_size", 1)
            self.wait_timeout = self.input.param("wait_timeout", 60)
            self.dgm_run = self.input.param("dgm_run", False)
            self.active_resident_threshold = int(
                self.input.param("active_resident_threshold", 0))
            self.verify_unacked_bytes = self.input.param(
                "verify_unacked_bytes", False)
            self.force_kill_memcached = TestInputSingleton.input.param(
                'force_kill_memcached', False)
            self.disabled_consistent_view = self.input.param(
                "disabled_consistent_view", None)
            self.rebalanceIndexWaitingDisabled = self.input.param(
                "rebalanceIndexWaitingDisabled", None)
            self.rebalanceIndexPausingDisabled = self.input.param(
                "rebalanceIndexPausingDisabled", None)
            self.maxParallelIndexers = self.input.param(
                "maxParallelIndexers", None)
            self.maxParallelReplicaIndexers = self.input.param(
                "maxParallelReplicaIndexers", None)
            self.quota_percent = self.input.param("quota_percent", None)
            self.port = None
            self.log_info = self.input.param("log_info", None)
            self.log_location = self.input.param("log_location", None)
            self.stat_info = self.input.param("stat_info", None)
            self.port_info = self.input.param("port_info", None)
            if not hasattr(self, 'skip_buckets_handle'):
                self.skip_buckets_handle = self.input.param(
                    "skip_buckets_handle", False)
            self.test_timeout = self.input.param(
                "test_timeout", 3600)  # kill hang test and jump to next one.
            self.gsi_type = self.input.param("gsi_type", 'plasma')
            self.compression_mode = self.input.param("compression_mode",
                                                     'passive')
            self.sdk_compression = self.input.param("sdk_compression", True)
            self.replicate_to = self.input.param("replicate_to", 0)
            self.persist_to = self.input.param("persist_to", 0)
            #jre-path for cbas
            self.jre_path = self.input.param("jre_path", None)
            # end of bucket parameters spot (this is ongoing)

            if self.skip_setup_cleanup:
                self.buckets = self.bucket_util.get_all_buckets()
                return
            if not self.skip_init_check_cbserver:
                self.cb_version = None
                if RestHelper(RestConnection(
                        self.cluster.master)).is_ns_server_running():
                    """ since every new couchbase version, there will be new features
                        that test code will not work on previous release.  So we need
                        to get couchbase version to filter out those tests. """
                    self.cb_version = RestConnection(
                        self.cluster.master).get_nodes_version()
                else:
                    log.info("couchbase server does not run yet")
                self.protocol = self.cluster_util.get_protocol_type()
            self.services_map = None

            log.info("==============  basetestcase setup was started for test #{0} {1}==============" \
                          .format(self.case_number, self._testMethodName))
            if not self.skip_buckets_handle and not self.skip_init_check_cbserver:
                self.cluster_util.cluster_cleanup(self.bucket_util)

            # avoid any cluster operations in setup for new upgrade
            #  & upgradeXDCR tests
            if str(self.__class__).find('newupgradetests') != -1 or \
                            str(self.__class__).find('upgradeXDCR') != -1 or \
                            str(self.__class__).find('Upgrade_EpTests') != -1 or \
                            hasattr(self, 'skip_buckets_handle') and \
                            self.skip_buckets_handle:
                log.info("any cluster operation in setup will be skipped")
                self.primary_index_created = True
                log.info("==============  basetestcase setup was finished for test #{0} {1} ==============" \
                              .format(self.case_number, self._testMethodName))
                return
            # avoid clean up if the previous test has been tear down
            if self.case_number == 1 or self.case_number > 1000:
                if self.case_number > 1000:
                    log.warn(
                        "teardDown for previous test failed. will retry..")
                    self.case_number -= 1000
                self.cleanup = True
                if not self.skip_init_check_cbserver:
                    self.tearDownEverything()
                self.task = ServerTasks(self.task_manager)
            if not self.skip_init_check_cbserver:
                log.info("initializing cluster")
                #                 self.cluster_util.reset_cluster()
                master_services = self.cluster_util.get_services(self.servers[:1], \
                                                    self.services_init, \
                                                    start_node=0)
                if master_services != None:
                    master_services = master_services[0].split(",")

                self.quota = self._initialize_nodes(self.task, self.cluster.servers, \
                                                    self.disabled_consistent_view, \
                                                    self.rebalanceIndexWaitingDisabled, \
                                                    self.rebalanceIndexPausingDisabled, \
                                                    self.maxParallelIndexers, \
                                                    self.maxParallelReplicaIndexers, \
                                                    self.port, \
                                                    self.quota_percent, \
                                                    services=master_services)

                self.cluster_util.change_env_variables()
                self.cluster_util.change_checkpoint_params()
                log.info("done initializing cluster")
            else:
                self.quota = ""
            if self.input.param("log_info", None):
                self.cluster_util.change_log_info()
            if self.input.param("log_location", None):
                self.cluster_util.change_log_location()
            if self.input.param("stat_info", None):
                self.cluster_util.change_stat_info()
            if self.input.param("port_info", None):
                self.cluster_util.change_port_info()
            if self.input.param("port", None):
                self.port = str(self.input.param("port", None))

            log.info("==============  basetestcase setup was finished for test #{0} {1} ==============" \
                          .format(self.case_number, self._testMethodName))

            if not self.skip_init_check_cbserver:
                self._log_start()
                self.sleep(5)
        except Exception, e:
            traceback.print_exc()
            self.task.shutdown(force=True)
            self.fail(e)
Пример #10
0
class ApiClient(object):
    """Generic API client for Swagger client library builds.

    Swagger generic API client. This client handles the client-
    server communication, and is invariant across implementations. Specifics of
    the methods and models for each application are generated from the Swagger
    templates.

    NOTE: This class is auto generated by the swagger code generator program.
    Ref: https://github.com/swagger-api/swagger-codegen
    Do not edit the class manually.

    :param configuration: .Configuration object for this client
    :param header_name: a header to pass when making calls to the API.
    :param header_value: a header value to pass when making calls to
        the API.
    :param cookie: a cookie to include in the header when making calls
        to the API
    """

    PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
    NATIVE_TYPES_MAPPING = {
        'int': int,
        'long': int if six.PY3 else long,  # noqa: F821
        'float': float,
        'str': str,
        'bool': bool,
        'date': datetime.date,
        'datetime': datetime.datetime,
        'object': object,
    }

    def __init__(self,
                 configuration=None,
                 header_name=None,
                 header_value=None,
                 cookie=None):
        if configuration is None:
            configuration = Configuration()
        self.configuration = configuration

        self.pool = TaskManager()
        self.rest_client = rest.RESTClientObject(configuration)
        self.default_headers = {}
        if header_name is not None:
            self.default_headers[header_name] = header_value
        self.cookie = cookie
        # Set default User-Agent.
        self.user_agent = 'Swagger-Codegen/1.0.0/python'

    def __del__(self):
        self.pool.shutdown()

    @property
    def user_agent(self):
        """User agent for this API client"""
        return self.default_headers['User-Agent']

    @user_agent.setter
    def user_agent(self, value):
        self.default_headers['User-Agent'] = value

    def set_default_header(self, header_name, header_value):
        self.default_headers[header_name] = header_value

    def __call_api(self,
                   resource_path,
                   method,
                   path_params=None,
                   query_params=None,
                   header_params=None,
                   body=None,
                   post_params=None,
                   files=None,
                   response_type=None,
                   auth_settings=None,
                   _return_http_data_only=None,
                   collection_formats=None,
                   _preload_content=True,
                   _request_timeout=None):

        config = self.configuration

        if not auth_settings:
            auth_settings = config.auth_settings()

        # header parameters
        header_params = header_params or {}
        header_params.update(self.default_headers)
        if self.cookie:
            header_params['Cookie'] = self.cookie
        if header_params:
            header_params = self.sanitize_for_serialization(header_params)
            header_params = dict(
                self.parameters_to_tuples(header_params, collection_formats))

        # path parameters
        if path_params:
            path_params = self.sanitize_for_serialization(path_params)
            path_params = self.parameters_to_tuples(path_params,
                                                    collection_formats)
            for k, v in path_params:
                # specified safe chars, encode everything
                resource_path = resource_path.replace(
                    '{%s}' % k,
                    quote(str(v), safe=config.safe_chars_for_path_param))

        # query parameters
        if query_params:
            query_params = self.sanitize_for_serialization(query_params)
            query_params = self.parameters_to_tuples(query_params,
                                                     collection_formats)

        # post parameters
        if post_params or files:
            post_params = self.prepare_post_parameters(post_params, files)
            post_params = self.sanitize_for_serialization(post_params)
            post_params = self.parameters_to_tuples(post_params,
                                                    collection_formats)

        # auth setting
        self.update_params_for_auth(header_params, query_params, auth_settings)

        # body
        if body:
            body = self.sanitize_for_serialization(body)

        # request url
        url = self.configuration.host + resource_path

        # perform request and return response
        response_data = self.request(
            method,
            url,
            query_params=query_params,
            headers=header_params,
            post_params=post_params,
            body=body,
            _preload_content=_preload_content,
            _request_timeout=_request_timeout,
            no_api_exception=_return_http_data_only is None)

        self.last_response = response_data

        return_data = response_data
        if _preload_content:
            # deserialize response data
            if response_type:
                return_data = self.deserialize(response_data, response_type)
            else:
                return_data = None

        if _return_http_data_only:
            return (return_data)
        else:
            return return_data, response_data.status, response_data.getheaders(
            ), response_data

    def sanitize_for_serialization(self, obj):
        """Builds a JSON POST object.

        If obj is None, return None.
        If obj is str, int, long, float, bool, return directly.
        If obj is datetime.datetime, datetime.date
            convert to string in iso8601 format.
        If obj is list, sanitize each element in the list.
        If obj is dict, return the dict.
        If obj is swagger model, return the properties dict.

        :param obj: The data to serialize.
        :return: The serialized form of data.
        """
        if obj is None:
            return None
        elif isinstance(obj, self.PRIMITIVE_TYPES):
            return obj
        elif isinstance(obj, list):
            return [
                self.sanitize_for_serialization(sub_obj) for sub_obj in obj
            ]
        elif isinstance(obj, tuple):
            return tuple(
                self.sanitize_for_serialization(sub_obj) for sub_obj in obj)
        elif isinstance(obj, (datetime.datetime, datetime.date)):
            return obj.isoformat()

        if isinstance(obj, dict):
            obj_dict = obj
        else:
            # Convert model obj to dict except
            # attributes `swagger_types`, `attribute_map`
            # and attributes which value is not None.
            # Convert attribute name to json key in
            # model definition for request.
            obj_dict = {
                obj.attribute_map[attr]: getattr(obj, attr)
                for attr, _ in six.iteritems(obj.swagger_types)
                if getattr(obj, attr) is not None
            }

        return {
            key: self.sanitize_for_serialization(val)
            for key, val in six.iteritems(obj_dict)
        }

    def deserialize(self, response, response_type):
        """Deserializes response into an object.

        :param response: RESTResponse object to be deserialized.
        :param response_type: class literal for
            deserialized object, or string of class name.

        :return: deserialized object.
        """
        # handle file downloading
        # save response body into a tmp file and return the instance
        if response_type == "file":
            return self.__deserialize_file(response)

        # fetch data from response object
        try:
            data = json.loads(response.data)
        except ValueError:
            data = response.data

        return self.__deserialize(data, response_type)

    def __deserialize(self, data, klass):
        """Deserializes dict, list, str into an object.

        :param data: dict, list or str.
        :param klass: class literal, or string of class name.

        :return: object.
        """
        if data is None:
            return None

        if type(klass) == str:
            if klass.startswith('list['):
                sub_kls = re.match(r'list\[(.*)\]', klass).group(1)
                return [
                    self.__deserialize(sub_data, sub_kls) for sub_data in data
                ]

            if klass.startswith('dict('):
                sub_kls = re.match(r'dict\(([^,]*), (.*)\)', klass).group(2)
                return {
                    k: self.__deserialize(v, sub_kls)
                    for k, v in six.iteritems(data)
                }

            # convert str to class
            if klass in self.NATIVE_TYPES_MAPPING:
                klass = self.NATIVE_TYPES_MAPPING[klass]
            else:
                klass = getattr(backup_service_client.models, klass)

        if klass in self.PRIMITIVE_TYPES:
            return self.__deserialize_primitive(data, klass)
        elif klass == object:
            return self.__deserialize_object(data)
        elif klass == datetime.date:
            return self.__deserialize_date(data)
        elif klass == datetime.datetime:
            return self.__deserialize_datatime(data)
        else:
            return self.__deserialize_model(data, klass)

    def call_api(self,
                 resource_path,
                 method,
                 path_params=None,
                 query_params=None,
                 header_params=None,
                 body=None,
                 post_params=None,
                 files=None,
                 response_type=None,
                 auth_settings=None,
                 async_req=None,
                 _return_http_data_only=None,
                 collection_formats=None,
                 _preload_content=True,
                 _request_timeout=None):
        """Makes the HTTP request (synchronous) and returns deserialized data.

        To make an async request, set the async_req parameter.

        :param resource_path: Path to method endpoint.
        :param method: Method to call.
        :param path_params: Path parameters in the url.
        :param query_params: Query parameters in the url.
        :param header_params: Header parameters to be
            placed in the request header.
        :param body: Request body.
        :param post_params dict: Request post form parameters,
            for `application/x-www-form-urlencoded`, `multipart/form-data`.
        :param auth_settings list: Auth Settings names for the request.
        :param response: Response data type.
        :param files dict: key -> filename, value -> filepath,
            for `multipart/form-data`.
        :param async_req bool: execute request asynchronously
        :param _return_http_data_only: response data without head status code
                                       and headers
        :param collection_formats: dict of collection formats for path, query,
            header, and post parameters.
        :param _preload_content: if False, the urllib3.HTTPResponse object will
                                 be returned without reading/decoding response
                                 data. Default is True.
        :param _request_timeout: timeout setting for this request. If one
                                 number provided, it will be total request
                                 timeout. It can also be a pair (tuple) of
                                 (connection, read) timeouts.
        :return:
            If async_req parameter is True,
            the request will be called asynchronously.
            The method will return the request thread.
            If parameter async_req is False or missing,
            then the method will return the response directly.
        """
        args = (resource_path, method, path_params, query_params,
                header_params, body, post_params, files, response_type,
                auth_settings, _return_http_data_only, collection_formats,
                _preload_content, _request_timeout)

        if not async_req:
            return self.__call_api(*args)
        else:
            function_call_task = FunctionCallTask(self.__call_api, args)
            self.pool.add_new_task(function_call_task)
            return AsyncResult(self.pool, function_call_task)

    def request(self,
                method,
                url,
                query_params=None,
                headers=None,
                post_params=None,
                body=None,
                _preload_content=True,
                _request_timeout=None,
                no_api_exception=False):
        """Makes the HTTP request using RESTClient."""
        if method == "GET":
            return self.rest_client.GET(url,
                                        query_params=query_params,
                                        _preload_content=_preload_content,
                                        _request_timeout=_request_timeout,
                                        headers=headers,
                                        no_api_exception=no_api_exception)
        elif method == "HEAD":
            return self.rest_client.HEAD(url,
                                         query_params=query_params,
                                         _preload_content=_preload_content,
                                         _request_timeout=_request_timeout,
                                         headers=header,
                                         no_api_exception=no_api_exception)
        elif method == "OPTIONS":
            return self.rest_client.OPTIONS(url,
                                            query_params=query_params,
                                            headers=headers,
                                            post_params=post_params,
                                            _preload_content=_preload_content,
                                            _request_timeout=_request_timeout,
                                            body=body,
                                            no_api_exception=no_api_exception)
        elif method == "POST":
            return self.rest_client.POST(url,
                                         query_params=query_params,
                                         headers=headers,
                                         post_params=post_params,
                                         _preload_content=_preload_content,
                                         _request_timeout=_request_timeout,
                                         body=body,
                                         no_api_exception=no_api_exception)
        elif method == "PUT":
            return self.rest_client.PUT(url,
                                        query_params=query_params,
                                        headers=headers,
                                        post_params=post_params,
                                        _preload_content=_preload_content,
                                        _request_timeout=_request_timeout,
                                        body=body,
                                        no_api_exception=no_api_exception)
        elif method == "PATCH":
            return self.rest_client.PATCH(url,
                                          query_params=query_params,
                                          headers=headers,
                                          post_params=post_params,
                                          _preload_content=_preload_content,
                                          _request_timeout=_request_timeout,
                                          body=body,
                                          no_api_exception=no_api_exception)
        elif method == "DELETE":
            return self.rest_client.DELETE(url,
                                           query_params=query_params,
                                           headers=headers,
                                           _preload_content=_preload_content,
                                           _request_timeout=_request_timeout,
                                           body=body,
                                           no_api_exception=no_api_exception)
        else:
            raise ValueError("http method must be `GET`, `HEAD`, `OPTIONS`,"
                             " `POST`, `PATCH`, `PUT` or `DELETE`.")

    def parameters_to_tuples(self, params, collection_formats):
        """Get parameters as list of tuples, formatting collections.

        :param params: Parameters as dict or list of two-tuples
        :param dict collection_formats: Parameter collection formats
        :return: Parameters as list of tuples, collections formatted
        """
        new_params = []
        if collection_formats is None:
            collection_formats = {}
        for k, v in six.iteritems(params) if isinstance(
                params, dict) else params:  # noqa: E501
            if k in collection_formats:
                collection_format = collection_formats[k]
                if collection_format == 'multi':
                    new_params.extend((k, value) for value in v)
                else:
                    if collection_format == 'ssv':
                        delimiter = ' '
                    elif collection_format == 'tsv':
                        delimiter = '\t'
                    elif collection_format == 'pipes':
                        delimiter = '|'
                    else:  # csv is the default
                        delimiter = ','
                    new_params.append(
                        (k, delimiter.join(str(value) for value in v)))
            else:
                new_params.append((k, v))
        return new_params

    def prepare_post_parameters(self, post_params=None, files=None):
        """Builds form parameters.

        :param post_params: Normal form parameters.
        :param files: File parameters.
        :return: Form parameters with files.
        """
        params = []

        if post_params:
            params = post_params

        if files:
            for k, v in six.iteritems(files):
                if not v:
                    continue
                file_names = v if type(v) is list else [v]
                for n in file_names:
                    with open(n, 'rb') as f:
                        filename = os.path.basename(f.name)
                        filedata = f.read()
                        mimetype = (mimetypes.guess_type(filename)[0]
                                    or 'application/octet-stream')
                        params.append(
                            tuple([k, tuple([filename, filedata, mimetype])]))

        return params

    def select_header_accept(self, accepts):
        """Returns `Accept` based on an array of accepts provided.

        :param accepts: List of headers.
        :return: Accept (e.g. application/json).
        """
        if not accepts:
            return

        accepts = [x.lower() for x in accepts]

        if 'application/json' in accepts:
            return 'application/json'
        else:
            return ', '.join(accepts)

    def select_header_content_type(self, content_types):
        """Returns `Content-Type` based on an array of content_types provided.

        :param content_types: List of content-types.
        :return: Content-Type (e.g. application/json).
        """
        if not content_types:
            return 'application/json'

        content_types = [x.lower() for x in content_types]

        if 'application/json' in content_types or '*/*' in content_types:
            return 'application/json'
        else:
            return content_types[0]

    def update_params_for_auth(self, headers, querys, auth_settings):
        """Updates header and query params based on authentication setting.

        :param headers: Header parameters dict to be updated.
        :param querys: Query parameters tuple list to be updated.
        :param auth_settings: Authentication setting identifiers list.
        """
        if not auth_settings:
            return

        for auth in auth_settings:
            auth_setting = self.configuration.auth_settings().get(auth)
            if auth_setting:
                if not auth_setting['value']:
                    continue
                elif auth_setting['in'] == 'header':
                    headers[auth_setting['key']] = auth_setting['value']
                elif auth_setting['in'] == 'query':
                    querys.append((auth_setting['key'], auth_setting['value']))
                else:
                    raise ValueError(
                        'Authentication token must be in `query` or `header`')

    def __deserialize_file(self, response):
        """Deserializes body to file

        Saves response body into a file in a temporary folder,
        using the filename from the `Content-Disposition` header if provided.

        :param response:  RESTResponse.
        :return: file path.
        """
        fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path)
        os.close(fd)
        os.remove(path)

        content_disposition = response.getheader("Content-Disposition")
        if content_disposition:
            filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
                                 content_disposition).group(1)
            path = os.path.join(os.path.dirname(path), filename)

        with open(path, "wb") as f:
            f.write(response.data)

        return path

    def __deserialize_primitive(self, data, klass):
        """Deserializes string to primitive type.

        :param data: str.
        :param klass: class literal.

        :return: int, long, float, str, bool.
        """
        try:
            return klass(data)
        except UnicodeEncodeError:
            return six.text_type(data)
        except TypeError:
            return data

    def __deserialize_object(self, value):
        """Return a original value.

        :return: object.
        """
        return value

    def __deserialize_date(self, string):
        """Deserializes string to date.

        :param string: str.
        :return: date.
        """
        try:
            from dateutil.parser import parse
            return parse(string).date()
        except ImportError:
            return string
        except ValueError:
            raise rest.ApiException(
                status=0,
                reason="Failed to parse `{0}` as date object".format(string))

    def __deserialize_datatime(self, string):
        """Deserializes string to datetime.

        The string should be in iso8601 datetime format.

        :param string: str.
        :return: datetime.
        """
        try:
            from dateutil.parser import parse
            return parse(string)
        except ImportError:
            return string
        except ValueError:
            raise rest.ApiException(
                status=0,
                reason=(
                    "Failed to parse `{0}` as datetime object".format(string)))

    def __hasattr(self, object, name):
        return name in object.__class__.__dict__

    def __deserialize_model(self, data, klass):
        """Deserializes list or dict to model.

        :param data: dict, list.
        :param klass: class literal.
        :return: model object.
        """

        if not klass.swagger_types and not self.__hasattr(
                klass, 'get_real_child_model'):
            return data

        kwargs = {}
        if klass.swagger_types is not None:
            for attr, attr_type in six.iteritems(klass.swagger_types):
                if (data is not None and klass.attribute_map[attr] in data
                        and isinstance(data, (list, dict))):
                    value = data[klass.attribute_map[attr]]
                    kwargs[attr] = self.__deserialize(value, attr_type)

        instance = klass(**kwargs)

        if (isinstance(instance, dict) and klass.swagger_types is not None
                and isinstance(data, dict)):
            for key, value in data.items():
                if key not in klass.swagger_types:
                    instance[key] = value
        if self.__hasattr(instance, 'get_real_child_model'):
            klass_name = instance.get_real_child_model(data)
            if klass_name:
                instance = self.__deserialize(data, klass_name)
        return instance