예제 #1
0
    def test_rebalance_in_multiple_cbas_on_a_busy_system(self):
        node_services = [] 
        node_services.append(self.input.param('service',"cbas"))
        self.log.info("Setup CBAS")
        self.setup_for_test(skip_data_loading=True)

        self.log.info("Run KV ops in async while rebalance is in progress")
        json_generator = JsonGenerator()
        generators = json_generator.generate_docs_simple(docs_per_day=self.num_items, start=0)
        tasks = self._async_load_all_buckets(self.master, generators, "create", 0)
        
        self.log.info("Run concurrent queries to simulate busy system")
        statement = "select sleep(count(*),50000) from {0} where mutated=0;".format(self.cbas_dataset_name)
        handles = self.cbas_util._run_concurrent_queries(statement, self.mode, self.num_concurrent_queries)

        self.log.info("Rebalance in CBAS nodes")
        self.add_node(node=self.rebalanceServers[1], services=node_services, rebalance=False, wait_for_rebalance_completion=False)
        self.add_node(node=self.rebalanceServers[3], services=node_services, rebalance=True, wait_for_rebalance_completion=True)

        self.log.info("Get KV ops result")
        for task in tasks:
            task.get_result()
        
        self.log.info("Log concurrent query status")
        self.cbas_util.log_concurrent_query_outcome(self.master, handles)

        if not self.cbas_util.validate_cbas_dataset_items_count(self.cbas_dataset_name, self.num_items, 0):
            self.fail("No. of items in CBAS dataset do not match that in the CB bucket")
예제 #2
0
 def _load_buckets(self):
     """
     1. Remove existing buckets
     2. Create 2 buckets and load documents
     3. Create full_doc_list for both buckets
     :return:
     """
     rest = RestConnection(self.master)
     json_generator = JsonGenerator()
     self.standard_gens_load = json_generator.generate_doc_for_aggregate_pushdown(docs_per_day=self.docs_per_day,
                                                                                  start=0)
     self.standard_full_docs_list = self.generate_full_docs_list(self.standard_gens_load)
     self.default_gens_load = json_generator.generate_doc_for_aggregate_pushdown(docs_per_day=self.docs_per_day,
                                                                                 start=0)
     self.default_full_docs_list = self.generate_full_docs_list(self.default_gens_load)
     for bucket in self.buckets:
         rest.flush_bucket(bucket.name)
         count = 0
         while rest.get_bucket_status(bucket.name) != "healthy" and count < 10:
             log.info(
                 "Bucket {0} Status is {1}. Sleeping...".format(bucket.name, rest.get_bucket_status(bucket.name)))
             count += 1
             self.sleep(15)
         if bucket.name.startswith("standard"):
             self.load(self.standard_gens_load, flag=self.item_flag, buckets=[bucket], verify_data=False)
         if bucket.name.startswith("default"):
             self.load(self.default_gens_load, flag=self.item_flag, buckets=[bucket], verify_data=False)
예제 #3
0
 def _load_buckets(self):
     """
     1. Remove existing buckets
     2. Create 2 buckets and load documents
     3. Create full_doc_list for both buckets
     :return:
     """
     rest = RestConnection(self.master)
     json_generator = JsonGenerator()
     self.standard_gens_load = json_generator.generate_doc_for_aggregate_pushdown(docs_per_day=self.docs_per_day,
                                                                                  start=0)
     self.standard_full_docs_list = self.generate_full_docs_list(self.standard_gens_load)
     self.default_gens_load = json_generator.generate_doc_for_aggregate_pushdown(docs_per_day=self.docs_per_day,
                                                                                 start=0)
     self.default_full_docs_list = self.generate_full_docs_list(self.default_gens_load)
     for bucket in self.buckets:
         rest.flush_bucket(bucket.name)
         count = 0
         while rest.get_bucket_status(bucket.name) != "healthy" and count < 10:
             log.info("Bucket {0} Status is {1}. Sleeping...".format(bucket.name, rest.get_bucket_status(bucket.name)))
             count += 1
             self.sleep(15)
         if bucket.name.startswith("standard"):
             self.load(self.standard_gens_load, flag=self.item_flag, buckets=[bucket], verify_data=False)
         if bucket.name.startswith("default"):
             self.load(self.default_gens_load, flag=self.item_flag, buckets=[bucket], verify_data=False)
예제 #4
0
    def test_gsi_with_crud_with_redaction_enabled(self):
        # load bucket and do some ops
        self.set_indexer_logLevel("trace")
        json_generator = JsonGenerator()
        gen_docs = json_generator.generate_all_type_documents_for_gsi(
            docs_per_day=self.doc_per_day, start=0)
        full_docs_list = self.generate_full_docs_list(gen_docs)
        n1ql_helper = N1QLHelper(use_rest=True,
                                 buckets=self.buckets,
                                 full_docs_list=full_docs_list,
                                 log=log,
                                 input=self.input,
                                 master=self.master)
        self.load(gen_docs)
        n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
        query_definition_generator = SQLDefinitionGenerator()
        query_definitions = query_definition_generator.generate_airlines_data_query_definitions(
        )
        query_definitions = query_definition_generator.filter_by_group(
            "all", query_definitions)
        # set log redaction level, collect logs, verify log files exist and verify them for redaction
        self.set_redaction_level()
        self.start_logs_collection()
        # Create partial Index
        for query_definition in query_definitions:
            for bucket in self.buckets:
                create_query = query_definition.generate_index_create_query(
                    bucket.name)
                n1ql_helper.run_cbq_query(query=create_query, server=n1ql_node)

        for query_definition in query_definitions:
            for bucket in self.buckets:
                scan_query = query_definition.generate_query(
                    bucket=bucket.name)
                n1ql_helper.run_cbq_query(query=scan_query, server=n1ql_node)

        for query_definition in query_definitions:
            for bucket in self.buckets:
                drop_query = query_definition.generate_index_drop_query(
                    bucket=bucket.name)
                n1ql_helper.run_cbq_query(query=drop_query, server=n1ql_node)
        result = self.monitor_logs_collection()
        log.info(result)
        try:
            logs_path = result["perNode"]["ns_1@" +
                                          str(self.master.ip)]["path"]
        except KeyError:
            logs_path = result["perNode"]["[email protected]"]["path"]
        redactFileName = logs_path.split('/')[-1]
        nonredactFileName = logs_path.split('/')[-1].replace('-redacted', '')
        remotepath = logs_path[0:logs_path.rfind('/') + 1]
        log_file = self.input.param("log_file_name", "indexer.log")
        self.verify_log_files_exist(remotepath=remotepath,
                                    redactFileName=redactFileName,
                                    nonredactFileName=nonredactFileName)
        self.verify_log_redaction(remotepath=remotepath,
                                  redactFileName=redactFileName,
                                  nonredactFileName=nonredactFileName,
                                  logFileName="ns_server.{0}".format(log_file))
예제 #5
0
    def test_gsi_with_index_rebalance_redaction_enabled(self):
        # load bucket and do some ops
        self.set_indexer_logLevel("trace")
        json_generator = JsonGenerator()
        gen_docs = json_generator.generate_all_type_documents_for_gsi(docs_per_day=self.doc_per_day, start=0)
        full_docs_list = self.generate_full_docs_list(gen_docs)
        n1ql_helper = N1QLHelper(use_rest=True, buckets=self.buckets, full_docs_list=full_docs_list,
                                 log=log, input=self.input, master=self.master)
        self.load(gen_docs)
        n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
        query_definition_generator = SQLDefinitionGenerator()
        query_definitions = query_definition_generator.generate_airlines_data_query_definitions()
        query_definitions = query_definition_generator.filter_by_group("all", query_definitions)
        # set log redaction level, collect logs, verify log files exist and verify them for redaction
        self.set_redaction_level()
        self.start_logs_collection()
        # Create partial Index
        for query_definition in query_definitions:
            for bucket in self.buckets:
                create_query = query_definition.generate_index_create_query(bucket.name)
                n1ql_helper.run_cbq_query(query=create_query, server=n1ql_node)

        for query_definition in query_definitions:
            for bucket in self.buckets:
                scan_query = query_definition.generate_query(bucket=bucket.name)
                n1ql_helper.run_cbq_query(query=scan_query, server=n1ql_node)

        rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], self.nodes_in_list,
                                                 [], services=self.services_in)

        rebalance.result()
        self.sleep(30)

        for query_definition in query_definitions:
            for bucket in self.buckets:
                scan_query = query_definition.generate_query(bucket=bucket.name)
                n1ql_helper.run_cbq_query(query=scan_query, server=n1ql_node)

        for query_definition in query_definitions:
            for bucket in self.buckets:
                drop_query = query_definition.generate_index_drop_query(bucket=bucket.name)
                n1ql_helper.run_cbq_query(query=drop_query, server=n1ql_node)
        result = self.monitor_logs_collection()
        log.info(result)
        try:
            logs_path = result["perNode"]["ns_1@" + str(self.master.ip)]["path"]
        except KeyError:
            logs_path = result["perNode"]["[email protected]"]["path"]
        redactFileName = logs_path.split('/')[-1]
        nonredactFileName = logs_path.split('/')[-1].replace('-redacted', '')
        remotepath = logs_path[0:logs_path.rfind('/') + 1]
        log_file = self.input.param("log_file_name", "indexer.log")
        self.verify_log_files_exist(remotepath=remotepath,
                                    redactFileName=redactFileName,
                                    nonredactFileName=nonredactFileName)
        self.verify_log_redaction(remotepath=remotepath,
                                  redactFileName=redactFileName,
                                  nonredactFileName=nonredactFileName,
                                  logFileName="ns_server.{0}".format(log_file))
예제 #6
0
 def generate_docs_bigdata(self,
                           docs_per_day,
                           start=0,
                           document_size=1024000):
     json_generator = JsonGenerator()
     return json_generator.generate_docs_bigdata(end=docs_per_day,
                                                 start=start,
                                                 value_size=document_size)
예제 #7
0
 def generate_docs_bigdata(num_of_documents,
                           start=0,
                           document_size_in_mb=1):
     json_generator = JsonGenerator()
     return json_generator.generate_docs_bigdata(end=num_of_documents,
                                                 start=start,
                                                 value_size=1024000 *
                                                 document_size_in_mb)
예제 #8
0
 def test_rebalance_swap_multiple_cbas_on_a_busy_system(self):
     '''
     1. We have 4 node cluster with 1 KV and 3 CBAS. Assume the IPS end with 101(KV), 102(CBAS), 103(CBAS), 104(CBAS)
     2, Post initial setup - 101 running KV and 102 running CBAS as CC node
     3. As part of test test add an extra NC node that we will swap rebalance later - Adding 103 and rebalance
     4. If swap rebalance NC - then select the node added in #3 for remove and 104 to add during swap 
     5. If swap rebalance CC - then select the CC node added for remove and 104 to add during swap
     '''
     
     self.log.info('Read service input param')
     node_services = []
     node_services.append(self.input.param('service', "cbas"))
     
     self.log.info("Rebalance in CBAS nodes, this node will be removed during swap")
     self.add_node(node=self.rebalanceServers[1], services=node_services)
 
     self.log.info("Setup CBAS")
     self.setup_for_test(skip_data_loading=True)
 
     self.log.info("Run KV ops in async while rebalance is in progress")
     json_generator = JsonGenerator()
     generators = json_generator.generate_docs_simple(docs_per_day=self.num_items, start=0)
     tasks = self._async_load_all_buckets(self.master, generators, "create", 0)
 
     self.log.info("Run concurrent queries to simulate busy system")
     statement = "select sleep(count(*),50000) from {0} where mutated=0;".format(self.cbas_dataset_name)
     handles = self.cbas_util._run_concurrent_queries(statement, self.mode, self.num_concurrent_queries)
 
     self.log.info("Fetch node to remove during rebalance")
     self.rebalance_cc = self.input.param("rebalance_cc", False)
     out_nodes = []
     nodes = self.rest.node_statuses()
     reinitialize_cbas_util = False
     for node in nodes:
         if self.rebalance_cc and (node.ip == self.cbas_node.ip):
             out_nodes.append(node)
             reinitialize_cbas_util = True
         elif not self.rebalance_cc and node.ip == self.rebalanceServers[1].ip:
             out_nodes.append(node)
 
     self.log.info("Swap rebalance CBAS nodes")
     self.add_node(node=self.rebalanceServers[3], services=node_services, rebalance=False)
     self.remove_node([out_nodes[0]], wait_for_rebalance=True)
 
     self.log.info("Get KV ops result")
     for task in tasks:
         task.get_result()
     
     if reinitialize_cbas_util is True:
         self.cbas_util = cbas_utils(self.master, self.rebalanceServers[3])
         self.cbas_util.createConn("default")
     
     self.log.info("Log concurrent query status")
     self.cbas_util.log_concurrent_query_outcome(self.master, handles)
     
     count_n1ql = self.rest.query_tool('select count(*) from %s' % (self.cb_bucket_name))['results'][0]['$1']
     if not self.cbas_util.validate_cbas_dataset_items_count(self.cbas_dataset_name, count_n1ql, 0):
         self.fail("No. of items in CBAS dataset do not match that in the CB bucket")
    def test_auto_failure_on_kv_busy_system(self):
        
        self.log.info('Read service input param')
        node_services = []
        node_services.append(self.input.param('service', "cbas"))
        
        self.log.info("Add KV node so we can auto failover a KV node later")
        self.add_node(self.servers[1], node_services, rebalance=False)
        self.add_node(self.cbas_servers[0], node_services, rebalance=True)
        
        self.log.info("Create bucket")
        self.create_bucket(self.master, self.cb_bucket_name, replica=self.num_replicas)
        
        self.log.info("Perform Async doc operations on KV")
        json_generator = JsonGenerator()
        generators = json_generator.generate_docs_simple(docs_per_day=self.num_items)
        kv_task = self._async_load_all_buckets(self.master, generators, "create", 0, batch_size=1000)
        
        self.log.info("Create connection")
        self.cbas_util.createConn(self.cb_bucket_name)
        
        self.log.info("Create bucket on CBAS")
        self.cbas_util.create_bucket_on_cbas(cbas_bucket_name=self.cbas_bucket_name,
                                             cb_bucket_name=self.cb_bucket_name,
                                             cb_server_ip=self.cb_server_ip)
        
        self.log.info("Create dataset on the CBAS bucket")
        self.cbas_util.create_dataset_on_bucket(cbas_bucket_name=self.cb_bucket_name,
                                                cbas_dataset_name=self.cbas_dataset_name)

        self.log.info("Connect to Bucket")
        self.cbas_util.connect_to_bucket(cbas_bucket_name=self.cbas_bucket_name,
                                         cb_bucket_password=self.cb_bucket_password)
        
        self.log.info("Auto fail over KV node")
        autofailover_timeout = 40
        status = RestConnection(self.master).update_autofailover_settings(True, autofailover_timeout)
        self.assertTrue(status, 'failed to change autofailover_settings!')
        servr_out = [self.cbas_servers[0]]
        remote = RemoteMachineShellConnection(servr_out[0])
        try:
            remote.stop_server()
            self.sleep(autofailover_timeout + 10, "Wait for auto fail over")
            self.cluster.rebalance(self.servers[:self.nodes_init],[], [servr_out[0]])
        finally:
            remote = RemoteMachineShellConnection(servr_out[0])
            remote.start_server()
                
        self.log.info("Get KV ops result")
        for task in kv_task:
            task.get_result()
        
        self.log.info("Assert document count on CBAS")
        count_n1ql = self.rest.query_tool('select count(*) from `%s`' % (self.cb_bucket_name))['results'][0]['$1']
        self.log.info("Document count on CB %d" % count_n1ql)
        
        self.log.info("Validate count on CBAS")
        self.assertTrue(self.cbas_util.validate_cbas_dataset_items_count(self.cbas_dataset_name, count_n1ql), msg="Count mismatch")
예제 #10
0
    def test_heavy_dgm_on_kv_and_then_rebalance(self):
        
        self.log.info('Read input param')
        node_services = []
        node_services.append(self.input.param('service', "kv"))
        bucket_ram = self.input.param('bucket_ram', 100)
        
        self.log.info("Pick the incoming and outgoing nodes during rebalance")
        self.rebalance_type = self.input.param("rebalance_type", "in")
        nodes_to_add = [self.servers[1]]
        nodes_to_remove = []
        if self.rebalance_type == 'out':
            self.add_node(self.servers[1], node_services)
            nodes_to_remove.append(self.servers[1])
            nodes_to_add = []
        elif self.rebalance_type == 'swap':
            self.add_node(self.servers[3], node_services)
            nodes_to_remove.append(self.servers[3])
        self.log.info("Incoming nodes - %s, outgoing nodes - %s. For rebalance type %s " %(nodes_to_add, nodes_to_remove, self.rebalance_type))    
        
        self.log.info("Create bucket")
        self.create_bucket(self.master, self.cb_bucket_name, bucket_ram=100)
        
        self.log.info("Create connection")
        self.cbas_util.createConn(self.cb_bucket_name)
        
        self.log.info("Create bucket on CBAS")
        self.cbas_util.create_bucket_on_cbas(cbas_bucket_name=self.cbas_bucket_name,
                                             cb_bucket_name=self.cb_bucket_name,
                                             cb_server_ip=self.cb_server_ip)
        
        self.log.info("Create dataset on the CBAS bucket")
        self.cbas_util.create_dataset_on_bucket(cbas_bucket_name=self.cb_bucket_name,
                                                cbas_dataset_name=self.cbas_dataset_name)

        self.log.info("Connect to Bucket")
        self.cbas_util.connect_to_bucket(cbas_bucket_name=self.cbas_bucket_name,
                                         cb_bucket_password=self.cb_bucket_password)
        
        self.log.info("Perform Async doc operations on KV")
        json_generator = JsonGenerator()
        generators = json_generator.generate_docs_simple(docs_per_day=self.num_items)
        kv_task = self._async_load_all_buckets(self.master, generators, "create", 0, batch_size=20000)
        
        self.log.info("Get KV ops result")
        for task in kv_task:
            task.get_result()
        
        self.log.info("Rebalance %s" % self.rebalance_type)
        self.assertTrue(self.cluster.rebalance(self.servers, nodes_to_add, nodes_to_remove, services=node_services))
        
        self.log.info("Assert document count on CBAS")
        count_n1ql = self.rest.query_tool('select count(*) from `%s`' % (self.cb_bucket_name))['results'][0]['$1']
        self.log.info("Document count on CB %d" % count_n1ql)
        
        self.log.info("Validate count on CBAS")
        self.assertTrue(self.cbas_util.validate_cbas_dataset_items_count(self.cbas_dataset_name, count_n1ql), msg="Count mismatch")
예제 #11
0
    def test_rebalance_on_nodes_running_multiple_services(self):

        self.log.info("Pick the incoming and outgoing nodes during rebalance")
        active_services = ['cbas,fts,kv']
        self.rebalance_type = self.input.param("rebalance_type", "in")
        nodes_to_add = [self.rebalanceServers[1]]
        nodes_to_remove = []
        if self.rebalance_type == 'out':
            # This node will be rebalanced out
            nodes_to_remove.append(self.rebalanceServers[1])
            # Will be running services as specified in the list - active_services
            self.add_node(nodes_to_add[0], services=active_services)
            # No nodes to remove so making the add notes empty
            nodes_to_add = []
        elif self.rebalance_type == 'swap':
            # Below node will be swapped with the incoming node specified in nodes_to_add
            self.add_node(nodes_to_add[0], services=active_services)
            nodes_to_add = []
            nodes_to_add.append(self.rebalanceServers[3])
            # Below node will be removed and swapped with node that was added earlier
            nodes_to_remove.append(self.rebalanceServers[1])
            
        self.log.info("Incoming nodes - %s, outgoing nodes - %s. For rebalance type %s " % (
        nodes_to_add, nodes_to_remove, self.rebalance_type))

        self.log.info("Creates cbas buckets and dataset")
        dataset_count_query = "select count(*) from {0};".format(self.cbas_dataset_name)
        self.setup_for_test()

        self.log.info("Perform async doc operations on KV")
        json_generator = JsonGenerator()
        generators = json_generator.generate_docs_simple(docs_per_day=self.num_items * 3 / 2, start=self.num_items)
        kv_task = self._async_load_all_buckets(self.master, generators, "create", 0, batch_size=5000)

        self.log.info("Run concurrent queries on CBAS")
        handles = self.cbas_util._run_concurrent_queries(dataset_count_query, "async", self.num_concurrent_queries)

        self.log.info("Rebalance nodes")
        # Do not add node to nodes_to_add if already added as add_node earlier
        self.cluster.rebalance(self.servers, nodes_to_add, nodes_to_remove, services=active_services)

        self.log.info("Get KV ops result")
        for task in kv_task:
            task.get_result()
        
        self.log.info("Log concurrent query status")
        self.cbas_util.log_concurrent_query_outcome(self.master, handles)

        self.log.info("Validate dataset count on CBAS")
        if not self.cbas_util.validate_cbas_dataset_items_count(self.cbas_dataset_name, self.num_items * 3 / 2, 0):
            self.fail("No. of items in CBAS dataset do not match that in the CB bucket")
예제 #12
0
    def test_rebalance_out_multiple_cbas_on_a_busy_system(self):
        node_services = [] 
        node_services.append(self.input.param('service',"cbas"))
        self.log.info("Rebalance in CBAS nodes")
        self.add_node(node=self.rebalanceServers[1], services=node_services)
        self.add_node(node=self.rebalanceServers[3], services=node_services)

        self.log.info("Setup CBAS")
        self.setup_for_test(skip_data_loading=True)

        self.log.info("Run KV ops in async while rebalance is in progress")
        json_generator = JsonGenerator()
        generators = json_generator.generate_docs_simple(docs_per_day=self.num_items, start=0)
        tasks = self._async_load_all_buckets(self.master, generators, "create", 0)

        self.log.info("Run concurrent queries to simulate busy system")
        statement = "select sleep(count(*),50000) from {0} where mutated=0;".format(self.cbas_dataset_name)
        handles = self.cbas_util._run_concurrent_queries(statement, self.mode, self.num_concurrent_queries)

        self.log.info("Fetch and remove nodes to rebalance out")
        self.rebalance_cc = self.input.param("rebalance_cc", False)
        out_nodes = []
        nodes = self.rest.node_statuses()
        
        if self.rebalance_cc:
            for node in nodes:
                if node.ip == self.cbas_node.ip or node.ip == self.servers[1].ip:
                    out_nodes.append(node)
            self.cbas_util.closeConn()
            self.log.info("Reinitialize CBAS utils with ip %s, since CC node is rebalanced out" %self.servers[3].ip)
            self.cbas_util = cbas_utils(self.master, self.servers[3])
            self.cbas_util.createConn("default")
        else:
            for node in nodes:
                if node.ip == self.servers[3].ip or node.ip == self.servers[1].ip:
                    out_nodes.append(node)
        
        self.log.info("Rebalance out CBAS nodes %s %s" % (out_nodes[0].ip, out_nodes[1].ip))
        self.remove_all_nodes_then_rebalance([out_nodes[0],out_nodes[1]])

        self.log.info("Get KV ops result")
        for task in tasks:
            task.get_result()
        
        self.log.info("Log concurrent query status")
        self.cbas_util.log_concurrent_query_outcome(self.master, handles)

        if not self.cbas_util.validate_cbas_dataset_items_count(self.cbas_dataset_name, self.num_items, 0):
            self.fail("No. of items in CBAS dataset do not match that in the CB bucket")
예제 #13
0
    def test_pending_mutations_busy_kv_system(self):

        self.log.info("Load documents in KV")
        self.perform_doc_ops_in_all_cb_buckets("create", 0, self.num_items)

        self.log.info("Create dataset on the CBAS")
        self.cbas_util.create_dataset_on_bucket(self.cb_bucket_name, self.cbas_dataset_name)

        self.log.info("Connect link")
        self.cbas_util.connect_link()
        
        self.log.info("Perform async doc operations on KV")
        json_generator = JsonGenerator()
        generators = json_generator.generate_docs_simple(docs_per_day=self.num_items * 4, start=self.num_items)
        kv_task = self.bucket_util._async_load_all_buckets(self.cluster, generators, "create", 0, batch_size=5000)
        
        self.log.info("Fetch cluster remaining mutations")
        aggregate_remaining_mutations_list = []
        while True:
            status, content, _ = self.cbas_util.fetch_pending_mutation_on_cbas_cluster()
            self.assertTrue(status, msg="Fetch pending mutations failed")
            content = json.loads(content)
            if content:
                aggregate_remaining_mutations_list.append(content["Default"]["ds"])
                total_count, _ = self.cbas_util.get_num_items_in_cbas_dataset(self.cbas_dataset_name)
                if total_count == self.num_items * 4:
                    break

        self.log.info("Get KV ops result")
        for task in kv_task:
            self.task_manager.get_task_result(task)

        self.log.info("Verify remaining mutation count is reducing as ingestion progress's")
        self.log.info(aggregate_remaining_mutations_list)
        is_remaining_mutation_count_reducing = True

        for i in range(len(aggregate_remaining_mutations_list)):
            if aggregate_remaining_mutations_list[i] < 0:
                self.fail("Remaining mutation count must be non -ve")

        for i in range(1, len(aggregate_remaining_mutations_list)):
            if not aggregate_remaining_mutations_list[i-1] >= aggregate_remaining_mutations_list[i]:
                is_remaining_mutation_count_reducing = False
                break

        self.log.info("Assert mutation progress API response")
        self.assertTrue(self.cbas_util.validate_cbas_dataset_items_count(self.cbas_dataset_name, self.num_items * 4), msg="Count mismatch on CBAS")
        self.assertTrue(len(aggregate_remaining_mutations_list) > 1, msg="Found no items during ingestion")
        self.assertFalse(is_remaining_mutation_count_reducing, msg="Remaining mutation must increase as ingestion progress's")
예제 #14
0
 def generate_ops_docs(self, num_items, start=0):
     try:
         json_generator = JsonGenerator()
         if self.dataset == "simple":
             return self.generate_ops(num_items, start,
                                      json_generator.generate_docs_simple)
         if self.dataset == "sales":
             return self.generate_ops(num_items, start,
                                      json_generator.generate_docs_sales)
         if self.dataset == "employee" or self.dataset == "default":
             return self.generate_ops(num_items, start,
                                      json_generator.generate_docs_employee)
         if self.dataset == "sabre":
             return self.generate_ops(num_items, start,
                                      json_generator.generate_docs_sabre)
         if self.dataset == "bigdata":
             return self.generate_ops(num_items, start,
                                      json_generator.generate_docs_bigdata)
         if self.dataset == "array":
             return self.generate_ops(
                 num_items, start,
                 json_generator.generate_all_type_documents_for_gsi)
     except Exception as ex:
         self.log.info(ex)
         self.fail("There is no dataset %s, please enter a valid one" %
                   self.dataset)
예제 #15
0
 def generate_ops_docs(self, num_items, start=0):
     try:
         json_generator = JsonGenerator()
         if self.dataset == "simple":
             return self.generate_ops(num_items, start,
                                      json_generator.generate_docs_simple)
         if self.dataset == "array":
             return self.generate_ops(
                 num_items, start,
                 json_generator.generate_all_type_documents_for_gsi)
     except Exception as ex:
         log.info(ex)
         self.fail("There is no dataset %s, please enter a valid one" %
                   self.dataset)
예제 #16
0
 def generate_docs_sabre(self, docs_per_day, start=0):
     json_generator = JsonGenerator()
     return json_generator.generate_docs_sabre(docs_per_day, start)
예제 #17
0
 def generate_docs_simple(self, docs_per_day, start=0):
     json_generator = JsonGenerator()
     return json_generator.generate_docs_simple(start=start,
                                                docs_per_day=docs_per_day)
예제 #18
0
 def generate_docs_array(self, num_items=10, start=0):
     json_generator = JsonGenerator()
     return json_generator.generate_all_type_documents_for_gsi(
         start=start, docs_per_day=num_items)
예제 #19
0
    def test_auto_retry_failed_rebalance(self):

        # Auto-retry rebalance settings
        body = {"enabled": "true", "afterTimePeriod": self.retry_time, "maxAttempts": self.num_retries}
        rest = RestConnection(self.master)
        rest.set_retry_rebalance_settings(body)
        result = rest.get_retry_rebalance_settings()

        self.log.info("Pick the incoming and outgoing nodes during rebalance")
        self.rebalance_type = self.input.param("rebalance_type", "in")
        nodes_to_add = [self.rebalanceServers[1]]
        nodes_to_remove = []
        reinitialize_cbas_util = False
        if self.rebalance_type == 'out':
            nodes_to_remove.append(self.rebalanceServers[1])
            self.add_node(self.rebalanceServers[1])
            nodes_to_add = []
        elif self.rebalance_type == 'swap':
            self.add_node(nodes_to_add[0], rebalance=False)
            nodes_to_remove.append(self.cbas_node)
            reinitialize_cbas_util = True
        self.log.info("Incoming nodes - %s, outgoing nodes - %s. For rebalance type %s " % (
        nodes_to_add, nodes_to_remove, self.rebalance_type))

        self.log.info("Creates cbas buckets and dataset")
        dataset_count_query = "select count(*) from {0};".format(self.cbas_dataset_name)
        self.setup_for_test()

        self.log.info("Perform async doc operations on KV")
        json_generator = JsonGenerator()
        generators = json_generator.generate_docs_simple(docs_per_day=self.num_items * 3 / 2, start=self.num_items)
        kv_task = self._async_load_all_buckets(self.master, generators, "create", 0, batch_size=5000)

        self.log.info("Run concurrent queries on CBAS")
        handles = self.cbas_util._run_concurrent_queries(dataset_count_query, "async", self.num_concurrent_queries)

        self.log.info("Fetch the server to restart couchbase on")
        restart_couchbase_on_incoming_or_outgoing_node = self.input.param(
            "restart_couchbase_on_incoming_or_outgoing_node", True)
        if not restart_couchbase_on_incoming_or_outgoing_node:
            node = self.cbas_node
        else:
            node = self.rebalanceServers[1]
        shell = RemoteMachineShellConnection(node)

        try:
            self.log.info("Rebalance nodes")
            self.cluster.async_rebalance(self.servers, nodes_to_add, nodes_to_remove)

            self.sleep(10, message="Restarting couchbase after 10s on node %s" % node.ip)

            shell.restart_couchbase()
            self.sleep(30, message="Waiting for service to be back again...")

            self.sleep(self.retry_time, "Wait for retry time to complete and then check the rebalance results")

            reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
            self.log.info("Rebalance status : {0}".format(reached))
            self.sleep(20)

            self._check_retry_rebalance_succeeded()

            if reinitialize_cbas_util is True:
                self.cbas_util = cbas_utils(self.master, self.rebalanceServers[1])
                self.cbas_util.createConn("default")
                self.cbas_util.wait_for_cbas_to_recover()

            self.log.info("Get KV ops result")
            for task in kv_task:
                task.get_result()

            self.log.info("Log concurrent query status")
            self.cbas_util.log_concurrent_query_outcome(self.master, handles)

            self.log.info("Validate dataset count on CBAS")
            if not self.cbas_util.validate_cbas_dataset_items_count(self.cbas_dataset_name, self.num_items * 3 / 2, 0):
                self.fail("No. of items in CBAS dataset do not match that in the CB bucket")
        except Exception as e:
            self.fail("Some exception occurred : {0}".format(e.message))


        finally:
            body = {"enabled": "false"}
            rest.set_retry_rebalance_settings(body)
예제 #20
0
    def setupForTest(self):
        #         self.create_default_bucket()
        self.cbas_util.createConn("default")
        json_generator = JsonGenerator()
        generators = json_generator.generate_all_type_documents_for_gsi(
            docs_per_day=10, start=0)
        tasks = self._async_load_all_buckets(self.master, generators, "create",
                                             0)
        for task in tasks:
            task.get_result()
#         # Create bucket on CBAS
        self.cbas_util.create_bucket_on_cbas(
            cbas_bucket_name=self.cbas_bucket_name,
            cb_bucket_name="default",
            cb_server_ip=self.cb_server_ip)

        # Create dataset on the CBAS bucket
        self.cbas_util.create_dataset_on_bucket(
            cbas_bucket_name=self.cb_bucket_name,
            cbas_dataset_name=self.cbas_dataset_name)

        # Connect to Bucket
        self.cbas_util.connect_to_bucket(
            cbas_bucket_name=self.cbas_bucket_name,
            cb_bucket_password=self.cb_bucket_password)

        # Allow ingestion to complete
        self.cbas_util.wait_for_ingestion_complete([self.cbas_dataset_name],
                                                   10, 300)

        #load some data to allow incompatible comparisons.
        data_dict = {
            "name": [
                123456, [234234, 234234], None, {
                    'key': 'value'
                }, True, 12345.12345
            ],
            "age": [
                "String", [234234, 234234], None, {
                    'key': 'value'
                }, True, 12345.12345
            ],
            "premium_customer": [
                "String",
                12345567,
                [234234, 234234, "string"],
                None,
                {
                    'key': 'value'
                },
                123456.123456,
            ],
            "travel_history": [
                "String",
                12345567,
                None,
                {
                    'key': 'value'
                },
                123456.123456,
            ],
            "address": [
                "String",
                12345567,
                [234234, 134234, "string"],
                None,
                123456.123456,
            ]
        }
        self.client = SDKSmartClient(RestConnection(self.master), "default",
                                     self.master)
        i = 0
        for key in data_dict.keys():
            for value in data_dict[key]:
                #                 jsonDump = json.dumps({key:value})
                self.client.set("incompatible_doc_%s" % i, 0, 0, {key: value})
                i += 1
        self.client.close()
예제 #21
0
 def generate_docs_default(self, docs_per_day, start=0):
     json_generator = JsonGenerator()
     return json_generator.generate_docs_employee(docs_per_day, start)
예제 #22
0
 def generate_docs_default(self, docs_per_day, start=0):
     json_generator = JsonGenerator()
     return json_generator.generate_docs_employee(docs_per_day, start)
예제 #23
0
 def generate_docs_sales(self, docs_per_day, start=0):
     json_generator = JsonGenerator()
     return json_generator.generate_docs_employee_sales_data(
         docs_per_day=docs_per_day, start=start)
예제 #24
0
    def test_fail_over_node_followed_by_rebalance_out_or_add_back(self):
        """
        1. Start with an initial setup, having 1 KV and 1 CBAS
        2. Add a node that will be failed over - KV/CBAS
        3. Create CBAS buckets and dataset
        4. Fail over the KV node based in graceful_failover parameter specified
        5. Rebalance out/add back based on input param specified in conf file
        6. Perform doc operations
        7. run concurrent queries
        8. Verify document count on dataset post failover
        """
        self.log.info("Add an extra node to fail-over")
        self.add_node(node=self.rebalanceServers[1])

        self.log.info("Read the failure out type to be performed")
        graceful_failover = self.input.param("graceful_failover", True)

        self.log.info("Set up test - Create cbas buckets and data-sets")
        self.setup_for_test()

        self.log.info("Perform Async doc operations on KV")
        json_generator = JsonGenerator()
        generators = json_generator.generate_docs_simple(
            docs_per_day=self.num_items * 3 / 2, start=self.num_items)
        kv_task = self._async_load_all_buckets(self.master, generators,
                                               "create", 0)

        self.log.info("Run concurrent queries on CBAS")
        query = "select count(*) from {0};".format(self.cbas_dataset_name)
        handles = self.cbas_util._run_concurrent_queries(
            query,
            "async",
            self.num_concurrent_queries,
            batch_size=self.concurrent_batch_size)

        self.log.info("fail-over the node")
        fail_task = self._cb_cluster.async_failover(self.input.servers,
                                                    [self.rebalanceServers[1]],
                                                    graceful_failover)
        fail_task.get_result()

        self.log.info(
            "Read input param to decide on add back or rebalance out")
        self.rebalance_out = self.input.param("rebalance_out", False)
        if self.rebalance_out:
            self.log.info("Rebalance out the fail-over node")
            self.rebalance()
        else:
            self.recovery_strategy = self.input.param("recovery_strategy",
                                                      "full")
            self.log.info("Performing %s recovery" % self.recovery_strategy)
            success = False
            end_time = datetime.datetime.now() + datetime.timedelta(
                minutes=int(1))
            while datetime.datetime.now() < end_time or not success:
                try:
                    self.sleep(10, message="Wait for fail over complete")
                    self.rest.set_recovery_type(
                        'ns_1@' + self.rebalanceServers[1].ip,
                        self.recovery_strategy)
                    success = True
                except Exception:
                    self.log.info(
                        "Fail over in progress. Re-try after 10 seconds.")
                    pass
            if not success:
                self.fail("Recovery %s failed." % self.recovery_strategy)
            self.rest.add_back_node('ns_1@' + self.rebalanceServers[1].ip)
            self.rebalance()

        self.log.info("Get KV ops result")
        for task in kv_task:
            task.get_result()

        self.log.info("Log concurrent query status")
        self.cbas_util.log_concurrent_query_outcome(self.master, handles)

        self.log.info("Validate dataset count on CBAS")
        count_n1ql = self.rest.query_tool(
            'select count(*) from `%s`' %
            self.cb_bucket_name)['results'][0]['$1']
        if not self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name,
                count_n1ql,
                0,
                timeout=400,
                analytics_timeout=400):
            self.fail(
                "No. of items in CBAS dataset do not match that in the CB bucket"
            )
    def test_scan_consistency_with_kv_mutations(self):

        self.log.info('Load documents in the default bucket')
        self.perform_doc_ops_in_all_cb_buckets(self.num_items, "create", 0,
                                               self.num_items)

        self.log.info('Create dataset')
        self.cbas_util.create_dataset_on_bucket(self.cb_bucket_name,
                                                self.cbas_dataset_name)

        self.log.info('Connect link')
        self.cbas_util.connect_link()

        self.log.info('Verify dataset count')
        self.cbas_util.validate_cbas_dataset_items_count(
            self.cbas_dataset_name, self.num_items)

        self.log.info("Perform async doc operations on KV")
        json_generator = JsonGenerator()
        generators = json_generator.generate_docs_simple(
            docs_per_day=self.num_items * 4, start=self.num_items)
        kv_task = self._async_load_all_buckets(self.master,
                                               generators,
                                               "create",
                                               0,
                                               batch_size=5000)

        self.log.info('Validate count')
        query = 'select count(*) from %s' % self.cbas_dataset_name
        dataset_count = 0
        start_time = time.time()
        output_with_scan = []
        output_without_scan = []
        while time.time() < start_time + 120:
            try:
                response_with_scan, _, _, results_with_scan, _ = self.cbas_util.execute_statement_on_cbas_util(
                    query,
                    scan_consistency=self.scan_consistency,
                    scan_wait=self.scan_wait)
                self.assertEqual(response_with_scan, "success",
                                 "Query failed...")
                output_with_scan.append(results_with_scan[0]['$1'])

                response_without_scan, _, _, results_without_scan, _ = self.cbas_util.execute_statement_on_cbas_util(
                    query, scan_consistency='not_bounded')
                self.assertEqual(response_without_scan, "success",
                                 "Query failed...")
                output_without_scan.append(results_without_scan[0]['$1'])

                if results_without_scan[0]['$1'] == self.num_items * 4:
                    break
            except Exception as e:
                self.log.info('Try again neglect failures...')

        self.log.info("Get KV ops result")
        for task in kv_task:
            task.get_result()

        self.log.info(
            'Compare the output result length of count query with scan and with scan parameters'
        )
        self.assertTrue(
            len(set(output_with_scan)) < len(set(output_without_scan)),
            msg='Select query with scan consistency must take fewer results')
        cbas_datasets = sorted(list(set(output_with_scan)))
        count_n1ql = self.rest.query_tool(
            'select count(*) from %s' %
            self.cb_bucket_name)['results'][0]['$1']
        self.assertEqual(cbas_datasets[len(cbas_datasets) - 1],
                         count_n1ql,
                         msg='KV-CBAS count mismatch. Actual %s, expected %s' %
                         (dataset_count, count_n1ql))
예제 #26
0
 def generate_docs_sabre(self, docs_per_day, start=0):
     json_generator = JsonGenerator()
     return json_generator.generate_docs_sabre(docs_per_day, start)
예제 #27
0
 def generate_docs_sales(self, docs_per_day, start=0):
     json_generator = JsonGenerator()
     return json_generator.generate_docs_employee_sales_data(docs_per_day = docs_per_day, start = start)
예제 #28
0
 def generate_docs_bigdata(self, docs_per_day, start=0):
     json_generator = JsonGenerator()
     return json_generator.generate_docs_bigdata(end=(1000 * docs_per_day),
                                                 start=start,
                                                 value_size=self.value_size)
예제 #29
0
 def generate_docs_bigdata(self, docs_per_day, start=0):
     json_generator = JsonGenerator()
     return json_generator.generate_docs_bigdata(end=(1000*docs_per_day), start=start, value_size=self.value_size)
예제 #30
0
    def test_to_fail_initial_rebalance_and_verify_subsequent_rebalance_succeeds(
            self):

        self.log.info("Pick the incoming and outgoing nodes during rebalance")
        self.rebalance_type = self.input.param("rebalance_type", "in")
        nodes_to_add = [self.rebalanceServers[1]]
        nodes_to_remove = []
        reinitialize_cbas_util = False
        if self.rebalance_type == 'out':
            nodes_to_remove.append(self.rebalanceServers[1])
            self.add_node(self.rebalanceServers[1])
            nodes_to_add = []
        elif self.rebalance_type == 'swap':
            self.add_node(nodes_to_add[0], rebalance=False)
            nodes_to_remove.append(self.cbas_node)
            reinitialize_cbas_util = True
        self.log.info(
            "Incoming nodes - %s, outgoing nodes - %s. For rebalance type %s "
            % (nodes_to_add, nodes_to_remove, self.rebalance_type))

        self.log.info("Creates cbas buckets and dataset")
        dataset_count_query = "select count(*) from {0};".format(
            self.cbas_dataset_name)
        self.setup_for_test()

        self.log.info("Perform async doc operations on KV")
        json_generator = JsonGenerator()
        generators = json_generator.generate_docs_simple(
            docs_per_day=self.num_items * 3 / 2, start=self.num_items)
        kv_task = self._async_load_all_buckets(self.master,
                                               generators,
                                               "create",
                                               0,
                                               batch_size=5000)

        self.log.info("Run concurrent queries on CBAS")
        handles = self.cbas_util._run_concurrent_queries(
            dataset_count_query, "async", self.num_concurrent_queries)

        self.log.info("Fetch the server to restart couchbase on")
        restart_couchbase_on_incoming_or_outgoing_node = self.input.param(
            "restart_couchbase_on_incoming_or_outgoing_node", True)
        if not restart_couchbase_on_incoming_or_outgoing_node:
            node = self.cbas_node
        else:
            node = self.rebalanceServers[1]
        shell = RemoteMachineShellConnection(node)

        self.log.info("Rebalance nodes")
        self.cluster.async_rebalance(self.servers, nodes_to_add,
                                     nodes_to_remove)

        self.log.info("Restart Couchbase on node %s" % node.ip)
        shell.restart_couchbase()
        self.sleep(30, message="Waiting for service to be back again...")

        self.log.info("Verify subsequent rebalance is successful")
        nodes_to_add = [
        ]  # Node is already added to cluster in previous rebalance, adding it again will throw exception
        self.assertTrue(
            self.cluster.rebalance(self.servers, nodes_to_add,
                                   nodes_to_remove))

        self.log.info("Get KV ops result")
        for task in kv_task:
            task.get_result()

        self.log.info("Log concurrent query status")
        self.cbas_util.log_concurrent_query_outcome(self.master, handles)

        if reinitialize_cbas_util is True:
            self.cbas_util = cbas_utils(self.master, self.rebalanceServers[1])
            self.cbas_util.createConn("default")

        self.log.info("Validate dataset count on CBAS")
        if not self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, self.num_items * 3 / 2, 0):
            self.fail(
                "No. of items in CBAS dataset do not match that in the CB bucket"
            )
예제 #31
0
 def generate_docs(self, docs_per_day, years):
     return JsonGenerator().generate_docs_sabre(docs_per_day=docs_per_day,
                                                years=years)