Пример #1
0
 def test_multi_create_query_explain_drop_index_scan_consistency_with_index_expressions(self):
     query_definition_generator = SQLDefinitionGenerator()
     self.query_definitions = (
         query_definition_generator.generate_employee_data_query_definitions_for_index_expressions()
     )
     self.query_definitions = query_definition_generator.filter_by_group(self.groups, self.query_definitions)
     self.test_multi_create_query_explain_drop_index_scan_consistency()
Пример #2
0
    def test_gsi_with_index_rebalance_redaction_enabled(self):
        # load bucket and do some ops
        self.set_indexer_logLevel("trace")
        json_generator = JsonGenerator()
        gen_docs = json_generator.generate_all_type_documents_for_gsi(docs_per_day=self.doc_per_day, start=0)
        full_docs_list = self.generate_full_docs_list(gen_docs)
        n1ql_helper = N1QLHelper(use_rest=True, buckets=self.buckets, full_docs_list=full_docs_list,
                                 log=log, input=self.input, master=self.master)
        self.load(gen_docs)
        n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
        query_definition_generator = SQLDefinitionGenerator()
        query_definitions = query_definition_generator.generate_airlines_data_query_definitions()
        query_definitions = query_definition_generator.filter_by_group("all", query_definitions)
        # set log redaction level, collect logs, verify log files exist and verify them for redaction
        self.set_redaction_level()
        self.start_logs_collection()
        # Create partial Index
        for query_definition in query_definitions:
            for bucket in self.buckets:
                create_query = query_definition.generate_index_create_query(bucket.name)
                n1ql_helper.run_cbq_query(query=create_query, server=n1ql_node)

        for query_definition in query_definitions:
            for bucket in self.buckets:
                scan_query = query_definition.generate_query(bucket=bucket.name)
                n1ql_helper.run_cbq_query(query=scan_query, server=n1ql_node)

        rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], self.nodes_in_list,
                                                 [], services=self.services_in)

        rebalance.result()
        self.sleep(30)

        for query_definition in query_definitions:
            for bucket in self.buckets:
                scan_query = query_definition.generate_query(bucket=bucket.name)
                n1ql_helper.run_cbq_query(query=scan_query, server=n1ql_node)

        for query_definition in query_definitions:
            for bucket in self.buckets:
                drop_query = query_definition.generate_index_drop_query(bucket=bucket.name)
                n1ql_helper.run_cbq_query(query=drop_query, server=n1ql_node)
        result = self.monitor_logs_collection()
        log.info(result)
        try:
            logs_path = result["perNode"]["ns_1@" + str(self.master.ip)]["path"]
        except KeyError:
            logs_path = result["perNode"]["[email protected]"]["path"]
        redactFileName = logs_path.split('/')[-1]
        nonredactFileName = logs_path.split('/')[-1].replace('-redacted', '')
        remotepath = logs_path[0:logs_path.rfind('/') + 1]
        log_file = self.input.param("log_file_name", "indexer.log")
        self.verify_log_files_exist(remotepath=remotepath,
                                    redactFileName=redactFileName,
                                    nonredactFileName=nonredactFileName)
        self.verify_log_redaction(remotepath=remotepath,
                                  redactFileName=redactFileName,
                                  nonredactFileName=nonredactFileName,
                                  logFileName="ns_server.{0}".format(log_file))
Пример #3
0
    def test_gsi_with_crud_with_redaction_enabled(self):
        # load bucket and do some ops
        self.set_indexer_logLevel("trace")
        json_generator = JsonGenerator()
        gen_docs = json_generator.generate_all_type_documents_for_gsi(
            docs_per_day=self.doc_per_day, start=0)
        full_docs_list = self.generate_full_docs_list(gen_docs)
        n1ql_helper = N1QLHelper(use_rest=True,
                                 buckets=self.buckets,
                                 full_docs_list=full_docs_list,
                                 log=log,
                                 input=self.input,
                                 master=self.master)
        self.load(gen_docs)
        n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
        query_definition_generator = SQLDefinitionGenerator()
        query_definitions = query_definition_generator.generate_airlines_data_query_definitions(
        )
        query_definitions = query_definition_generator.filter_by_group(
            "all", query_definitions)
        # set log redaction level, collect logs, verify log files exist and verify them for redaction
        self.set_redaction_level()
        self.start_logs_collection()
        # Create partial Index
        for query_definition in query_definitions:
            for bucket in self.buckets:
                create_query = query_definition.generate_index_create_query(
                    bucket.name)
                n1ql_helper.run_cbq_query(query=create_query, server=n1ql_node)

        for query_definition in query_definitions:
            for bucket in self.buckets:
                scan_query = query_definition.generate_query(
                    bucket=bucket.name)
                n1ql_helper.run_cbq_query(query=scan_query, server=n1ql_node)

        for query_definition in query_definitions:
            for bucket in self.buckets:
                drop_query = query_definition.generate_index_drop_query(
                    bucket=bucket.name)
                n1ql_helper.run_cbq_query(query=drop_query, server=n1ql_node)
        result = self.monitor_logs_collection()
        log.info(result)
        try:
            logs_path = result["perNode"]["ns_1@" +
                                          str(self.master.ip)]["path"]
        except KeyError:
            logs_path = result["perNode"]["[email protected]"]["path"]
        redactFileName = logs_path.split('/')[-1]
        nonredactFileName = logs_path.split('/')[-1].replace('-redacted', '')
        remotepath = logs_path[0:logs_path.rfind('/') + 1]
        log_file = self.input.param("log_file_name", "indexer.log")
        self.verify_log_files_exist(remotepath=remotepath,
                                    redactFileName=redactFileName,
                                    nonredactFileName=nonredactFileName)
        self.verify_log_redaction(remotepath=remotepath,
                                  redactFileName=redactFileName,
                                  nonredactFileName=nonredactFileName,
                                  logFileName="ns_server.{0}".format(log_file))
Пример #4
0
 def test_multi_create_query_explain_drop_index_scan_consistency_with_index_expressions(
         self):
     query_definition_generator = SQLDefinitionGenerator()
     self.query_definitions = query_definition_generator.generate_employee_data_query_definitions_for_index_expressions(
     )
     self.query_definitions = query_definition_generator.filter_by_group(
         self.groups, self.query_definitions)
     self.test_multi_create_query_explain_drop_index_scan_consistency()
Пример #5
0
 def test_multi_create_query_explain_drop_index_with_index_where_clause(self):
     query_definition_generator = SQLDefinitionGenerator()
     self.query_definitions = (
         query_definition_generator.generate_employee_data_query_definitions_for_index_where_clause()
     )
     self.use_where_clause_in_index = True
     self.query_definitions = query_definition_generator.filter_by_group(self.groups, self.query_definitions)
     self.test_multi_create_query_explain_drop_index()
Пример #6
0
 def test_multi_create_query_explain_drop_index_with_index_where_clause(
         self):
     query_definition_generator = SQLDefinitionGenerator()
     self.query_definitions = query_definition_generator.generate_employee_data_query_definitions_for_index_where_clause(
     )
     self.use_where_clause_in_index = True
     self.query_definitions = query_definition_generator.filter_by_group(
         self.groups, self.query_definitions)
     self.test_multi_create_query_explain_drop_index()
Пример #7
0
 def _pick_query_definitions_employee(self):
     query_definition_generator = SQLDefinitionGenerator()
     if self.create_index_usage == "where":
         self.query_definitions = query_definition_generator.generate_employee_data_query_definitions_for_where_clause()
         self.use_where_clause_in_index = True
     elif self.create_index_usage == "expressions":
         self.query_definitions = query_definition_generator.generate_employee_data_query_definitions_for_index_expressions()
         self.use_where_clause_in_index = True
     else:
         self.query_definitions =  query_definition_generator.generate_employee_data_query_definitions()
Пример #8
0
 def setUp(self):
     super(BaseSecondaryIndexingTests, self).setUp()
     self.index_lost_during_move_out = []
     self.verify_using_index_status = self.input.param(
         "verify_using_index_status", False)
     self.use_replica_when_active_down = self.input.param(
         "use_replica_when_active_down", True)
     self.use_where_clause_in_index = self.input.param(
         "use_where_clause_in_index", False)
     self.scan_consistency = self.input.param("scan_consistency",
                                              "request_plus")
     self.scan_vector_per_values = self.input.param(
         "scan_vector_per_values", None)
     self.timeout_for_index_online = self.input.param(
         "timeout_for_index_online", 600)
     self.verify_query_result = self.input.param("verify_query_result",
                                                 True)
     self.verify_explain_result = self.input.param("verify_explain_result",
                                                   True)
     self.defer_build = self.input.param("defer_build", True)
     self.run_query_with_explain = self.input.param(
         "run_query_with_explain", True)
     self.run_query = self.input.param("run_query", True)
     self.graceful = self.input.param("graceful", False)
     self.groups = self.input.param("groups", "all").split(":")
     self.use_rest = self.input.param("use_rest", False)
     if not self.use_rest:
         query_definition_generator = SQLDefinitionGenerator()
         if self.dataset == "default" or self.dataset == "employee":
             self.query_definitions = query_definition_generator.generate_employee_data_query_definitions(
             )
         if self.dataset == "simple":
             self.query_definitions = query_definition_generator.generate_simple_data_query_definitions(
             )
         if self.dataset == "sabre":
             self.query_definitions = query_definition_generator.generate_sabre_data_query_definitions(
             )
         if self.dataset == "bigdata":
             self.query_definitions = query_definition_generator.generate_big_data_query_definitions(
             )
         if self.dataset == "array":
             self.query_definitions = query_definition_generator.generate_airlines_data_query_definitions(
             )
         self.query_definitions = query_definition_generator.filter_by_group(
             self.groups, self.query_definitions)
     self.ops_map = self._create_operation_map()
     self.find_nodes_in_list()
     self.generate_map_nodes_out_dist()
     self.memory_create_list = []
     self.memory_drop_list = []
     self.skip_cleanup = self.input.param("skip_cleanup", False)
     self.index_loglevel = self.input.param("index_loglevel", None)
     if self.index_loglevel:
         self.set_indexer_logLevel(self.index_loglevel)
     if self.dgm_run:
         self._load_doc_data_all_buckets(gen_load=self.gens_load)
     self.gsi_thread = Cluster()
     self.defer_build = self.defer_build and self.use_gsi_for_secondary
Пример #9
0
    def create_indexes(self, num=0, defer_build=False, itr=0, expected_failure=[]):
        query_definition_generator = SQLDefinitionGenerator()
        index_create_tasks = []
        if self.system_failure in self.failure_map.keys():
            expected_failure = self.failure_map[self.system_failure]["expected_failure"]
        self.log.info(threading.currentThread().getName() + " Started")
        if len(self.keyspace) < num:
            num_indexes_collection = math.ceil(num / len(self.keyspace))
        else:
            num_indexes_collection = 1
        for collection_keyspace in self.keyspace:
            if self.run_tasks:
                collection_name = collection_keyspace.split('.')[-1]
                scope_name = collection_keyspace.split('.')[-2]
                query_definitions = query_definition_generator. \
                    generate_employee_data_query_definitions(index_name_prefix='idx_' +
                                                                               scope_name + "_"
                                                                               + collection_name,
                                                             keyspace=collection_keyspace)
                server = random.choice(self.n1ql_nodes)
                index_create_task = ConcurrentIndexCreateTask(server, self.test_bucket, scope_name,
                                          collection_name, query_definitions,
                                          self.index_ops_obj, self.n1ql_helper, num_indexes_collection, defer_build,
                                                              itr, expected_failure)
                self.index_create_task_manager.schedule(index_create_task)
                index_create_tasks.append(index_create_task)
        self.log.info(threading.currentThread().getName() + " Completed")

        return index_create_tasks
 def setUp(self):
     super(QueryHelperTests, self).setUp()
     self.create_primary_index = self.input.param("create_primary_index",
                                                  True)
     self.use_gsi_for_primary = self.input.param("use_gsi_for_primary",
                                                 True)
     self.use_gsi_for_secondary = self.input.param("use_gsi_for_secondary",
                                                   True)
     self.scan_consistency = self.input.param("scan_consistency",
                                              "request_plus")
     self.skip_host_login = self.input.param("skip_host_login", False)
     if not self.skip_host_login:
         self.shell = RemoteMachineShellConnection(self.master)
     else:
         self.shell = None
     if not self.skip_init_check_cbserver:  # for upgrade tests
         self.buckets = RestConnection(self.master).get_buckets()
     self.docs_per_day = self.input.param("doc-per-day", 49)
     self.use_rest = self.input.param("use_rest", True)
     self.max_verify = self.input.param("max_verify", None)
     self.item_flag = self.input.param("item_flag", 0)
     self.n1ql_port = self.input.param("n1ql_port", 8093)
     self.dataset = self.input.param("dataset", "default")
     self.groups = self.input.param("groups", "all").split(":")
     self.doc_ops = self.input.param("doc_ops", False)
     self.batch_size = self.input.param("batch_size", 1)
     self.create_ops_per = self.input.param("create_ops_per", 0)
     self.expiry_ops_per = self.input.param("expiry_ops_per", 0)
     self.delete_ops_per = self.input.param("delete_ops_per", 0)
     self.update_ops_per = self.input.param("update_ops_per", 0)
     self.gens_load = self.generate_docs(self.docs_per_day)
     self.full_docs_list = self.generate_full_docs_list(self.gens_load)
     self.gen_results = TuqGenerators(self.log,
                                      full_set=self.full_docs_list)
     if not self.skip_init_check_cbserver:  # for upgrade tests
         self.n1ql_server = self.get_nodes_from_services_map(
             service_type="n1ql")
     query_definition_generator = SQLDefinitionGenerator()
     if self.dataset == "default" or self.dataset == "employee":
         self.query_definitions = query_definition_generator.generate_employee_data_query_definitions(
         )
     if self.dataset == "simple":
         self.query_definitions = query_definition_generator.generate_simple_data_query_definitions(
         )
     if self.dataset == "sabre":
         self.query_definitions = query_definition_generator.generate_sabre_data_query_definitions(
         )
     if self.dataset == "bigdata":
         self.query_definitions = query_definition_generator.generate_big_data_query_definitions(
         )
     if self.dataset == "array":
         self.query_definitions = query_definition_generator.generate_airlines_data_query_definitions(
         )
     self.query_definitions = query_definition_generator.filter_by_group(
         self.groups, self.query_definitions)
     self.num_index_replicas = self.input.param("num_index_replica", 0)
Пример #11
0
 def setUp(self):
     super(BaseSecondaryIndexingTests, self).setUp()
     self.run_create_index = self.input.param("run_create_index", True)
     self.run_drop_index = self.input.param("run_drop_index", True)
     self.run_query_with_explain = self.input.param(
         "run_query_with_explain", True)
     self.run_query = self.input.param("run_query", True)
     self.graceful = self.input.param("graceful", False)
     self.groups = self.input.param("groups", "simple").split(":")
     query_definition_generator = SQLDefinitionGenerator()
     if self.dataset == "default" or self.dataset == "employee":
         self.query_definitions = query_definition_generator.generate_employee_data_query_definitions(
         )
     if self.dataset == "simple":
         self.query_definitions = query_definition_generator.generate_simple_data_query_definitions(
         )
     self.query_definitions = query_definition_generator.filter_by_group(
         self.groups, self.query_definitions)
     self.ops_map = self._create_operation_map()
     self.find_nodes_in_list()
     self.generate_map_nodes_out_dist()
     self.memory_create_list = []
     self.memory_drop_list = []
Пример #12
0
 def setUp(self):
     super(BaseSecondaryIndexingTests, self).setUp()
     self.initial_stats = None
     self.final_stats = None
     self.index_lost_during_move_out =[]
     self.verify_using_index_status = self.input.param("verify_using_index_status",False)
     self.use_replica_when_active_down = self.input.param("use_replica_when_active_down",True)
     self.use_where_clause_in_index= self.input.param("use_where_clause_in_index",False)
     self.check_stats= self.input.param("check_stats",True)
     self.create_index_usage= self.input.param("create_index_usage","no_usage")
     self.scan_consistency= self.input.param("scan_consistency","request_plus")
     self.scan_vector_per_values= self.input.param("scan_vector_per_values",None)
     self.timeout_for_index_online= self.input.param("timeout_for_index_online",600)
     self.run_create_index= self.input.param("run_create_index",True)
     self.verify_query_result= self.input.param("verify_query_result",True)
     self.verify_explain_result= self.input.param("verify_explain_result",True)
     self.defer_build= self.input.param("defer_build",True)
     self.deploy_on_particular_node= self.input.param("deploy_on_particular_node",None)
     self.run_drop_index= self.input.param("run_drop_index",True)
     self.run_query_with_explain= self.input.param("run_query_with_explain",True)
     self.run_query= self.input.param("run_query",True)
     self.graceful = self.input.param("graceful",False)
     self.groups = self.input.param("groups", "all").split(":")
     self.use_rest = self.input.param("use_rest", False)
     if not self.use_rest:
         query_definition_generator = SQLDefinitionGenerator()
         if self.dataset == "default" or self.dataset == "employee":
             self.query_definitions = query_definition_generator.generate_employee_data_query_definitions()
         if self.dataset == "simple":
             self.query_definitions = query_definition_generator.generate_simple_data_query_definitions()
         if self.dataset == "sabre":
             self.query_definitions = query_definition_generator.generate_sabre_data_query_definitions()
         if self.dataset == "bigdata":
             self.query_definitions = query_definition_generator.generate_big_data_query_definitions()
         if self.dataset == "array":
             self.query_definitions = query_definition_generator.generate_airlines_data_query_definitions()
         self.query_definitions = query_definition_generator.filter_by_group(self.groups, self.query_definitions)
     self.ops_map = self._create_operation_map()
     self.find_nodes_in_list()
     self.generate_map_nodes_out_dist()
     self.memory_create_list = []
     self.memory_drop_list = []
     self.n1ql_node = self.get_nodes_from_services_map(service_type = "n1ql")
     self.skip_cleanup = self.input.param("skip_cleanup", False)
     self.index_loglevel = self.input.param("index_loglevel", None)
     if self.index_loglevel:
         self.set_indexer_logLevel(self.index_loglevel)
     if self.dgm_run:
         self._load_doc_data_all_buckets(gen_load=self.gens_load)
     self.gsi_thread = Cluster()
     self.index_op = self.input.param("index_op", None)
     self.defer_build = self.defer_build and self.use_gsi_for_secondary
Пример #13
0
 def setUp(self):
     super(BaseSecondaryIndexingTests, self).setUp()
     self.initial_stats = None
     self.final_stats = None
     self.index_lost_during_move_out =[]
     self.verify_using_index_status = self.input.param("verify_using_index_status",False)
     self.use_replica_when_active_down = self.input.param("use_replica_when_active_down",True)
     self.use_where_clause_in_index= self.input.param("use_where_clause_in_index",False)
     self.check_stats= self.input.param("check_stats",True)
     self.create_index_usage= self.input.param("create_index_usage","no_usage")
     self.scan_consistency= self.input.param("scan_consistency","request_plus")
     self.scan_vector_per_values= self.input.param("scan_vector_per_values",None)
     self.timeout_for_index_online= self.input.param("timeout_for_index_online",600)
     self.max_attempts_check_index= self.input.param("max_attempts_check_index",10)
     self.max_attempts_query_and_validate= self.input.param("max_attempts_query_and_validate",10)
     self.index_present= self.input.param("index_present",True)
     self.run_create_index= self.input.param("run_create_index",True)
     self.verify_query_result= self.input.param("verify_query_result",True)
     self.verify_explain_result= self.input.param("verify_explain_result",True)
     self.defer_build= self.input.param("defer_build",True)
     self.deploy_on_particular_node= self.input.param("deploy_on_particular_node",None)
     self.run_drop_index= self.input.param("run_drop_index",True)
     self.run_query_with_explain= self.input.param("run_query_with_explain",True)
     self.run_query= self.input.param("run_query",True)
     self.graceful = self.input.param("graceful",False)
     self.groups = self.input.param("groups", "all").split(":")
     query_definition_generator = SQLDefinitionGenerator()
     if self.dataset == "default" or self.dataset == "employee":
         self.query_definitions = query_definition_generator.generate_employee_data_query_definitions()
     if self.dataset == "simple":
         self.query_definitions = query_definition_generator.generate_simple_data_query_definitions()
     if self.dataset == "sabre":
         self.query_definitions = query_definition_generator.generate_sabre_data_query_definitions()
     if self.dataset == "bigdata":
         self.query_definitions = query_definition_generator.generate_big_data_query_definitions()
     self.query_definitions = query_definition_generator.filter_by_group(self.groups, self.query_definitions)
     self.ops_map = self._create_operation_map()
     self.log.info(self.ops_map)
     self.find_nodes_in_list()
     self.generate_map_nodes_out_dist()
     self.memory_create_list = []
     self.memory_drop_list = []
     self.n1ql_node = self.get_nodes_from_services_map(service_type = "n1ql")