def test_failure_drop_index_non_existing_index(self):
     query_definition = QueryDefinition(
         index_name="test_failure_create_index_existing_index", index_fields="crap", query_template="", groups=[]
     )
     self.query = query_definition.generate_index_drop_query(bucket=self.buckets[0].name)
     try:
         # create index
         server = self.get_nodes_from_services_map(service_type="n1ql")
         self.n1ql_helper.run_cbq_query(query=self.query, server=server)
     except Exception, ex:
         msg = "GSI index test_failure_create_index_existing_index not found"
         self.assertTrue(msg in str(ex), " 5000 error not recived as expected {0}".format(ex))
 def test_failure_create_index_big_fields(self):
     field_name = ""
     field_name += ",".join([str(a) for a in range(1, 100)]).replace(",", "_")
     query_definition = QueryDefinition(
         index_name="test_failure_create_index_existing_index", index_fields=field_name, query_template="", groups=[]
     )
     self.query = query_definition.generate_index_create_query(bucket=self.buckets[0], gsi_type=self.gsi_type)
     try:
         # create index
         server = self.get_nodes_from_services_map(service_type="n1ql")
         self.n1ql_helper.run_cbq_query(query=self.query, server=server)
     except Exception, ex:
         msg = "Expression not indexable"
         self.assertTrue(msg in str(ex), " 5000 error not recived as expected {0}".format(ex))
 def test_failure_create_index_non_existing_bucket(self):
 	self.indexes= self.input.param("indexes","").split(":")
 	query_definition = QueryDefinition(
 		index_name="test_failure_create_index_existing_index",
 		index_fields = self.indexes,
 		query_template = "",
 		groups = [])
 	self.query = query_definition.generate_index_create_query(bucket = "not_present_bucket")
 	try:
 		# create index
 		server = self.get_nodes_from_services_map(service_type = "n1ql")
     	self.n1ql_helper.run_cbq_query(query = self.query, server = server)
 	except Exception, ex:
 		msg="Keyspace not found keyspace not_present_bucket - cause: No bucket named not_present_bucket"
 		self.assertTrue(msg in str(ex),
 			" 5000 error not recived as expected {0}".format(ex))
 def _create_query_definitions(self, start=0, index_count=2):
     query_definitions = []
     for ctr in range(start, start + index_count):
         index_name = "index_name_{0}".format(ctr)
         query_definition = QueryDefinition(index_name=index_name, index_fields = ["join_yr"], \
                 query_template = "SELECT * from %s WHERE join_yr == 2010 ", groups = [])
         query_definitions.append(query_definition)
     return query_definitions
 def test_failure_drop_index_non_existing_index(self):
     query_definition = QueryDefinition(
         index_name="test_failure_create_index_existing_index",
         index_fields="crap",
         query_template="",
         groups=[])
     self.query = query_definition.generate_index_drop_query(
         bucket=self.buckets[0].name)
     try:
         # create index
         server = self.get_nodes_from_services_map(service_type="n1ql")
         self.n1ql_helper.run_cbq_query(query=self.query, server=server)
     except Exception as ex:
         msg = "GSI index test_failure_create_index_existing_index not found"
         self.assertTrue(
             msg in str(ex),
             " 5000 error not received as expected {0}".format(ex))
 def test_failure_create_index_existing_index(self):
 	self.indexes= self.input.param("indexes","").split(":")
 	query_definition = QueryDefinition(
 		index_name="test_failure_create_index_existing_index",
 		index_fields = self.indexes,
 		query_template = "",
 		groups = [])
 	self.query = query_definition.generate_index_create_query(bucket = self.buckets[0].name)
 	try:
 		# create index
 		server = self.get_nodes_from_services_map(service_type = "n1ql")
     	self.n1ql_helper.run_cbq_query(query = self.query, server = server)
 		# create same index again
 		self.n1ql_helper.run_cbq_query(query = self.query, server = server)
 	except Exception, ex:
 		self.assertTrue("Index test_failure_create_index_existing_index already exist" in str(ex),
 			" 5000 error not recived as expected {0}".format(ex))
Exemple #7
0
 def test_failure_drop_index_non_existing_bucket(self):
     query_definition = QueryDefinition(
         index_name="test_failure_create_index_existing_index",
         index_fields="crap",
         query_template="",
         groups=[])
     self.query = query_definition.generate_index_drop_query(
         bucket="not_present_bucket")
     try:
         # create index
         server = self.get_nodes_from_services_map(service_type="n1ql")
         self.n1ql_helper.run_cbq_query(query=self.query, server=server)
     except Exception, ex:
         msg = "Keyspace not found keyspace not_present_bucket - cause: No bucket named not_present_bucket"
         self.assertTrue(
             msg in str(ex),
             " 5000 error not recived as expected {0}".format(ex))
 def test_failover_indexer_restart(self):
     """
     CBQE-3153
     Indexer add back scenarios
     :return:
     """
     index_dist_factor = 1
     #Create Indexes
     index_servers = self.get_nodes_from_services_map(service_type="index",
                                                      get_all_nodes=True)
     num_indexes = len(index_servers) * index_dist_factor
     self.query_definitions = []
     for ctr in range(num_indexes):
         index_name = "test_restart_index_{0}".format(ctr)
         query_definition = QueryDefinition(
             index_name=index_name,
             index_fields=["join_yr"],
             query_template=
             "SELECT * from %s USE INDEX ({0}) WHERE join_yr == 2010 ".
             format(index_name),
             groups=[])
         self.query_definitions.append(query_definition)
     node_count = 0
     for query_definition in self.query_definitions:
         for bucket in self.buckets:
             deploy_node_info = [
                 "{0}:{1}".format(index_servers[node_count].ip,
                                  index_servers[node_count].port)
             ]
             self.log.info(
                 "Creating {0} index on bucket {1} on node {2}...".format(
                     query_definition.index_name, bucket.name,
                     deploy_node_info[0]))
             self.create_index(bucket.name,
                               query_definition,
                               deploy_node_info=deploy_node_info)
             node_count += 1
     self.sleep(30)
     kvOps_tasks = self._run_kvops_tasks()
     remote = RemoteMachineShellConnection(index_servers[0])
     remote.stop_server()
     self.sleep(20)
     for bucket in self.buckets:
         for query in self.query_definitions:
             try:
                 self.query_using_index(bucket=bucket,
                                        query_definition=query,
                                        expected_result=None,
                                        scan_consistency=None,
                                        scan_vector=None,
                                        verify_results=True)
             except Exception, ex:
                 msg = "queryport.indexNotFound"
                 if msg in str(ex):
                     continue
                 else:
                     self.log.info(str(ex))
                     break
Exemple #9
0
 def test_three_indexes_on_different_fields_with_asc_desc_combinations(self):
     failed_scans = []
     query_definition1 = QueryDefinition(
         index_name="index_field1",
         index_fields=["name"],
         groups=["simple"], index_where_clause=" name IS NOT NULL ")
     scan_content1 = [{"Seek": None,
                       "Filter": [{"Low": "Adara", "High": "Winta"}]}]
     query_definition2 = QueryDefinition(
         index_name="index_field2",
         index_fields=["age"],
         groups=["simple"], index_where_clause=" age IS NOT NULL ")
     scan_content2 = [{"Seek": None,
                       "Filter": [{"Low": 20, "High": 60}]}]
     query_definition3 = QueryDefinition(
         index_name="index_field3",
         index_fields=["premium_customer"],
         groups=["simple"], index_where_clause=" premium_customer IS NOT NULL ")
     scan_content3 = [{"Seek": None,
                       "Filter": [{"Low": False, "High": True}]}]
     dict = {query_definition1: scan_content1, query_definition2: scan_content2, query_definition3: scan_content3}
     desc_values = [[True], [False]]
     for bucket in self.buckets:
         for query_definition, scan_content in dict.items():
             for desc_value in desc_values:
                 id_map = self.create_index_using_rest(bucket, query_definition, desc=desc_value)
                 multiscan_content = self._update_multiscan_content(index_fields=1)
                 for inclusion in range(4):
                     scan_content[0]["Filter"][0]["Inclusion"] = \
                         inclusion
                     multiscan_content["scans"] = json.dumps(scan_content)
                     multiscan_result = \
                         self.rest.multiscan_for_gsi_index_with_rest(
                             id_map["id"], json.dumps(multiscan_content))
                     multiscan_count_result = self.rest.multiscan_count_for_gsi_index_with_rest(
                         id_map["id"], json.dumps(multiscan_content))
                     print(multiscan_result)
                     check = self._verify_items_indexed_for_two_field_index(
                         bucket, id_map["id"],
                         ["name"], scan_content, multiscan_result, desc_value, multiscan_count_result, )
                     if not check:
                         failed_scans.append(copy.deepcopy(scan_content))
                 self.drop_index(bucket, query_definition)
     msg = "Failed Scans: {0}".format(failed_scans)
     self.assertEqual(len(failed_scans), 0, msg)
 def test_failure_create_index_existing_index(self):
     self.indexes= self.input.param("indexes", "").split(":")
     query_definition = QueryDefinition(index_name="test_failure_create_index_existing_index",
                                        index_fields=self.indexes, query_template="", groups=[])
     self.query = query_definition.generate_index_create_query(namespace=self.buckets[0].name,
                                                               gsi_type=self.gsi_type)
     try:
         # create index
         server = self.get_nodes_from_services_map(service_type = "n1ql")
         self.n1ql_helper.run_cbq_query(query = self.query, server = server)
         # create same index again
         self.n1ql_helper.run_cbq_query(query = self.query, server = server)
     except Exception as ex:
         self.assertTrue("index test_failure_create_index_existing_index already exist" in str(ex),
             " 5000 error not received as expected {0}".format(ex))
     finally:
         self.query = query_definition.generate_index_drop_query(namespace=self.buckets[0].name)
         self.n1ql_helper.run_cbq_query(query = self.query, server = server)
 def test_failure_create_index_non_existing_bucket(self):
     self.indexes = self.input.param("indexes", "").split(":")
     query_definition = QueryDefinition(
         index_name="test_failure_create_index_existing_index",
         index_fields=self.indexes,
         query_template="",
         groups=[])
     self.query = query_definition.generate_index_create_query(
         bucket="not_present_bucket", gsi_type=self.gsi_type)
     try:
         # create index
         server = self.get_nodes_from_services_map(service_type="n1ql")
         self.n1ql_helper.run_cbq_query(query=self.query, server=server)
     except Exception, ex:
         msg = "Keyspace not found in CB datastore keyspace not_present_bucket - cause: No bucket named not_present_bucket"
         self.assertTrue(
             msg in str(ex),
             " 12003 error not recived as expected {0}".format(ex))
Exemple #12
0
 def generate_query_definition_for_aggr_data(self):
     query_definitions = []
     query_definition = QueryDefinition(
         index_name="agg_func_int_arr",
         index_fields=["int_num", "ALL ARRAY t FOR t in int_arr END"],
         query_template="SELECT {0} int_num){1} FROM %s where any t in int_arr satisfies t > 50 end")
     query_definitions.append(query_definition)
     query_definition = QueryDefinition(
         index_name="agg_func_float_arr",
         index_fields=["float_num", "ALL ARRAY t FOR t in float_arr END"],
         query_template="SELECT {0} float_num){1} FROM %s where any t in float_arr satisfies t > 50.0 end")
     query_definitions.append(query_definition)
     query_definition = QueryDefinition(
         index_name="agg_func_str_arr",
         index_fields=["name", "ALL ARRAY t FOR t in str_arr END"],
         query_template="SELECT {0} name){1} FROM %s where any t in str_arr satisfies t = \"India\" end")
     query_definitions.append(query_definition)
     return query_definitions
 def test_create_gsi_index_without_primary_index(self):
     self.indexes = self.input.param("indexes", "").split(":")
     query_definition = QueryDefinition(
         index_name="test_failure_create_index_existing_index",
         index_fields=self.indexes,
         query_template="",
         groups=[])
     self.query = query_definition.generate_index_create_query(
         bucket=self.buckets[0].name, gsi_type=self.gsi_type)
     try:
         # create index
         server = self.get_nodes_from_services_map(service_type="n1ql")
         self.n1ql_helper.run_cbq_query(query=self.query, server=server)
     except Exception as ex:
         msg = "Keyspace not_present_bucket name not found - cause: Bucket not_present_bucket not found"
         self.assertTrue(
             msg in str(ex),
             " 5000 error not received as expected {0}".format(ex))
 def test_delete_deleted_bsc(self):
     num_of_docs_per_collection = 10**5
     self.prepare_collection_for_indexing(
         num_of_docs_per_collection=num_of_docs_per_collection)
     collection_namespace = self.namespaces[0]
     index_gen = QueryDefinition(index_name='idx',
                                 index_fields=['age', 'city'])
     query = index_gen.generate_index_create_query(
         namespace=collection_namespace)
     self.run_cbq_query(query=query)
     self.wait_until_indexes_online()
     query = f'select count(age) from {collection_namespace} where age > 0 and city like "A%"'
     result = self.run_cbq_query(query=query)['results'][0]['$1']
     self.assertTrue(result > 0)
     _, keyspace = collection_namespace.split(':')
     bucket, scope, collection = keyspace.split('.')
     try:
         with ThreadPoolExecutor() as executor:
             task1 = executor.submit(self.delete_bucket_scope_collection,
                                     server=self.servers[0],
                                     delete_item=self.item_to_delete,
                                     bucket=bucket,
                                     scope=scope,
                                     collection=collection)
             # retrying delete of deleted BSC
             self.sleep(1)
             task2 = executor.submit(self.delete_bucket_scope_collection,
                                     server=self.servers[0],
                                     delete_item=self.item_to_delete,
                                     bucket=bucket,
                                     scope=scope,
                                     collection=collection,
                                     timeout=5)
             result = task1.result()
             self.assertTrue(result,
                             f"Failed to Delete {self.item_to_delete}")
             result = task2.result()
             self.assertFalse(result,
                              f"Got second success for delete operation")
             self.sleep(10)
             index_status = self.rest.get_index_status()
             self.assertFalse(index_status)
     except Exception as err:
         self.fail(str(err))
Exemple #15
0
    def test_index_status_with_node_disconnect_during_flush(self):
        data_nodes = self.get_kv_nodes()
        self.assertTrue(len(data_nodes) >= 2)
        num_of_docs_per_collection = 10**5
        self.prepare_collection_for_indexing(
            num_of_docs_per_collection=num_of_docs_per_collection)
        collection_namespace = self.namespaces[0]
        index_gen = QueryDefinition(index_name='idx',
                                    index_fields=['age', 'city', 'country'])
        query = index_gen.generate_index_create_query(
            namespace=collection_namespace, defer_build=False)
        self.run_cbq_query(query=query)
        self.wait_until_indexes_online()

        select_query = f'select count(age) from {collection_namespace} where age >= 0'
        result = self.run_cbq_query(query=select_query)['results'][0]['$1']
        self.assertEqual(result, num_of_docs_per_collection,
                         "Doc count not matching")

        try:
            with ThreadPoolExecutor() as executor:
                task1 = executor.submit(self.cluster.async_bucket_flush,
                                        server=self.master,
                                        bucket=self.test_bucket)
                self.sleep(1)
                task2 = executor.submit(self.stop_server(data_nodes[1]))
                out2 = task2.result()
                self.log.info(out2)
                out1 = task1.result()
                self.log.info(out1)

            self.sleep(5, "Wait for few secs before bringing node back on")
            self.start_server(data_nodes[1])
            result = self.run_cbq_query(query=select_query)['results'][0]['$1']
            self.assertEqual(result, 0, "Doc count not matching")
        except Exception as err:
            self.log.info(err)
            self.start_server(data_nodes[1])
        self.sleep(10)
        result = self.run_cbq_query(query=select_query)['results'][0]['$1']
        self.log.info(
            f"Doc count in collection with flush failed due to node disconnect: {result}"
        )
        self.assertTrue(result > 0, "Doc count not matching")
Exemple #16
0
    def test_index_status_with_multiple_collection_with_bucket_flush(self):
        num_of_docs_per_collection = 10**5
        self.prepare_collection_for_indexing(
            num_of_docs_per_collection=num_of_docs_per_collection,
            num_collections=2)

        index_gen_list = []
        for collection_namespace in self.namespaces:
            index_gen = QueryDefinition(
                index_name='idx', index_fields=['age', 'city', 'country'])
            index_gen_list.append(index_gen)
            query = index_gen.generate_index_create_query(
                namespace=collection_namespace, defer_build=False)
            self.run_cbq_query(query=query)
            self.wait_until_indexes_online()

            select_query = f'select count(age) from {collection_namespace} where age >= 0'
            result = self.run_cbq_query(query=select_query)['results'][0]['$1']
            self.assertEqual(result, num_of_docs_per_collection,
                             "Doc count not matching")

        # Checking indexer status after bucket flush
        try:
            num_rollback = self.rest.get_num_rollback_stat(
                bucket=self.test_bucket)
            self.log.info(f"num_rollback before flush:{num_rollback}")
            task = self.cluster.async_bucket_flush(server=self.master,
                                                   bucket=self.test_bucket)
            result = task.result(timeout=200)
            self.log.info(result)
            self.sleep(
                15,
                "Giving some time to indexer to update indexes after flush")
            rollback = self.rest.get_num_rollback_stat(bucket=self.test_bucket)
            self.log.info(f"num_rollback after flush:{rollback}")
            # self.assertEqual(rollback, num_rollback+1)
            for collection_namespace in self.namespaces:
                select_query = f'select count(age) from {collection_namespace} where age >= 0'
                result = self.run_cbq_query(
                    query=select_query)['results'][0]['$1']
                self.assertEqual(result, 0, "Doc count not matching")
        except Exception as err:
            self.fail(err)
    def test_delete_bsc_with_flush_running(self):
        num_of_docs_per_collection = 10**6
        self.prepare_collection_for_indexing(
            num_of_docs_per_collection=num_of_docs_per_collection,
            batch_size=5 * 10**4)
        collection_namespace = self.namespaces[0]
        index_gen = QueryDefinition(index_name='idx',
                                    index_fields=['age', 'city', 'country'])
        query = index_gen.generate_primary_index_create_query(
            namespace=collection_namespace, defer_build=False)
        self.run_cbq_query(query=query)
        self.wait_until_indexes_online()
        _, keyspace = collection_namespace.split(':')
        bucket, scope, collection = keyspace.split('.')
        try:
            # running a  select query
            query = f'select count(*) from {collection_namespace} where age >= 0'
            result = self.run_cbq_query(query=query)['results'][0]['$1']
            self.assertEqual(
                result, num_of_docs_per_collection,
                f"Result not matching. Expected: {num_of_docs_per_collection}, Actual: {result}"
            )

            # deleting BSC while Flushing
            with ThreadPoolExecutor() as executor:
                task1 = executor.submit(self.cluster.async_bucket_flush,
                                        self.master, self.test_bucket)
                task2 = executor.submit(self.delete_bucket_scope_collection,
                                        server=self.servers[0],
                                        delete_item=self.item_to_delete,
                                        bucket=bucket,
                                        scope=scope,
                                        collection=collection)
                result = task2.result()
                self.assertTrue(result,
                                f"Failed to Delete {self.item_to_delete}")
                flush_result = task1.result()
                self.log.info(flush_result)
                self.sleep(10)
                index_status = self.rest.get_index_status()
                self.assertFalse(index_status)
        except Exception as err:
            self.fail(str(err))
 def test_create_gsi_index_existing_view_index(self):
     self.indexes = self.input.param("indexes", "").split(":")
     query_definition = QueryDefinition(
         index_name="test_failure_create_index_existing_index",
         index_fields=self.indexes,
         query_template="",
         groups=[])
     self.query = query_definition.generate_index_create_query(
         bucket=self.buckets[0].name, use_gsi_for_secondary=False)
     try:
         # create index
         server = self.get_nodes_from_services_map(service_type="n1ql")
         self.n1ql_helper.run_cbq_query(query=self.query, server=server)
         # create same index again
         self.query += " USING GSI "
         self.n1ql_helper.run_cbq_query(query=self.query, server=server)
     except Exception, ex:
         self.log.info(ex)
         raise
 def test_create_gsi_index_existing_view_index(self):
 	self.indexes= self.input.param("indexes","").split(":")
 	query_definition = QueryDefinition(
 		index_name="test_failure_create_index_existing_index",
 		index_fields = self.indexes,
 		query_template = "",
 		groups = [])
 	self.query = query_definition.generate_index_create_query(bucket = self.buckets[0].name,
 	 use_gsi_for_secondary = False)
 	try:
 		# create index
 		server = self.get_nodes_from_services_map(service_type = "n1ql")
     	self.n1ql_helper.run_cbq_query(query = self.query, server = server)
 		# create same index again
 		self.query += " USING GSI "
 		self.n1ql_helper.run_cbq_query(query = self.query, server = server)
 	except Exception, ex:
 		self.log.info(ex)
 		raise
 def test_failure_create_index_big_fields(self):
     field_name = ""
     field_name += ",".join([str(a)
                             for a in range(1, 100)]).replace(",", "_")
     query_definition = QueryDefinition(
         index_name="test_failure_create_index_existing_index",
         index_fields=field_name,
         query_template="",
         groups=[])
     self.query = query_definition.generate_index_create_query(
         bucket=self.buckets[0], gsi_type=self.gsi_type)
     try:
         # create index
         server = self.get_nodes_from_services_map(service_type="n1ql")
         self.n1ql_helper.run_cbq_query(query=self.query, server=server)
     except Exception, ex:
         msg = "Expression not indexable"
         self.assertTrue(
             msg in str(ex),
             " 5000 error not recived as expected {0}".format(ex))
Exemple #21
0
 def test_composite_indexes_mutation(self):
     definitions_list = []
     if not self.dataset is "array":
         pass
     else:
         query_definition = QueryDefinition(index_name="index_name_travel_history_leading",
                                             index_fields=["ALL `travel_history` END", "name", "age"],
                                             query_template="SELECT {0} FROM %s WHERE `travel_history` IS NOT NULL",
                                             groups=["array"], index_where_clause=" `travel_history` IS NOT NULL ")
         definitions_list.append(query_definition)
         query_definition = QueryDefinition(index_name="index_name_travel_history_non_leading_end",
                                             index_fields=["name", "age", "ALL `travel_history` END"],
                                             query_template="SELECT {0} FROM %s WHERE `travel_history` IS NOT NULL",
                                             groups=["array"], index_where_clause=" `travel_history` IS NOT NULL ")
         definitions_list.append(query_definition)
         query_definition = QueryDefinition(index_name="index_name_travel_history_non_leading_middle",
                                             index_fields=["name", "ALL `travel_history` END", "age"],
                                             query_template="SELECT {0} FROM %s WHERE `travel_history` IS NOT NULL",
                                             groups=["array"], index_where_clause=" `travel_history` IS NOT NULL ")
         definitions_list.append(query_definition)
         self.multi_create_index_using_rest(buckets=self.buckets, query_definitions=definitions_list)
         self.sleep(20)
         index_map = self.rest.get_index_id_map()
         for query_definition in definitions_list:
             for bucket in self.buckets:
                 doc_list = self.full_docs_list[:len(self.full_docs_list)/2]
                 index_id = str(index_map[bucket.name][query_definition.index_name]["id"])
                 for data in DATATYPES:
                     self.change_index_field_type(bucket.name, "travel_history",
                                                  doc_list, data, query_definition)
                     actual_result = self.rest.full_table_scan_gsi_index_with_rest(
                     index_id, body={"stale": "false"})
                     expected_result = self._get_expected_results_for_full_table_scan(
                         query_definition)
                     msg = "Results don't match for index {0}. Actual number: {1}, Expected number: {2}"
                     self.assertEqual(sorted(actual_result), sorted(expected_result),
                                      msg.format(query_definition.index_name,
                                                 actual_result, expected_result))
                     self.full_docs_list = self.generate_full_docs_list(self.gens_load)
         self.multi_drop_index_using_rest(buckets=self.buckets, query_definitions=definitions_list)
Exemple #22
0
    def create_gsi_indexes(self, bucket):
        self.log.info("Create indexes on 'default' bucket")
        self.n1ql_helper.run_cbq_query(
            "CREATE PRIMARY INDEX ON default USING VIEW")
        query_def = QueryDefinition(
            index_name="durable_add_aborts",
            index_fields=["age", "first_name"],
            query_template=FULL_SCAN_TEMPLATE.format("*", "name IS NOT NULL"),
            groups=[SIMPLE_INDEX, FULL_SCAN, "isnotnull", NO_ORDERBY_GROUPBY])
        query = query_def.generate_index_create_query(
            bucket.name,
            use_gsi_for_secondary=True,
            index_where_clause="mutation_type='ADD'")
        self.n1ql_helper.run_cbq_query(query)

        query_def = QueryDefinition(
            index_name="durable_set_aborts",
            index_fields=["age", "first_name"],
            query_template=FULL_SCAN_TEMPLATE.format("*", "name IS NOT NULL"),
            groups=[SIMPLE_INDEX, FULL_SCAN, "isnotnull", NO_ORDERBY_GROUPBY])
        query = query_def.generate_index_create_query(
            bucket.name,
            use_gsi_for_secondary=True,
            index_where_clause="mutation_type='SET'")
        self.n1ql_helper.run_cbq_query(query)
 def test_create_primary_using_gsi_with_existing_primary_index_views(self):
     query_definition = QueryDefinition(
         index_name="test_failure_create_primary_using_gsi_with_existing_primary_index_views", index_fields="crap",
         query_template="", groups=[])
     check = False
     self.query = "CREATE PRIMARY INDEX ON {0} USING GSI".format(self.buckets[0].name)
     try:
         # create index
         server = self.get_nodes_from_services_map(service_type = "n1ql")
         self.n1ql_helper.run_cbq_query(query = self.query, server = server)
     except Exception as ex:
         self.log.info(ex)
         raise
Exemple #24
0
    def test_gsi_on_ephemeral_bucket(self):
        self.prepare_collection_for_indexing(num_of_docs_per_collection=self.num_of_docs_per_collection)
        collection_namespace = self.namespaces[0]
        _, keyspace = collection_namespace.split(':')
        bucket, scope, collection = keyspace.split('.')
        index_gen = QueryDefinition(index_name='idx', index_fields=['age', 'country', 'city'])
        meta_index_gen = QueryDefinition(index_name='meta_idx', index_fields=['meta().id'])

        bucket_params = self._create_bucket_params(server=self.master, size=self.bucket_size,
                                                   replicas=self.num_replicas, bucket_type='membase',
                                                   enable_replica_index=self.enable_replica_index,
                                                   eviction_policy='valueOnly', lww=self.lww)
        self.cluster.create_standard_bucket(name='default', port=11222,
                                            bucket_params=bucket_params)

        query = index_gen.generate_index_create_query(namespace=collection_namespace, defer_build=self.defer_build)
        self.run_cbq_query(query)
        if self.defer_build:
            build_query = index_gen.generate_build_query(namespace=collection_namespace)
            self.run_cbq_query(build_query)
        self.wait_until_indexes_online()
        query = meta_index_gen.generate_index_create_query(namespace=collection_namespace, defer_build=self.defer_build)
        self.run_cbq_query(query)
        if self.defer_build:
            build_query = meta_index_gen.generate_build_query(namespace=collection_namespace)
            self.run_cbq_query(build_query)
        self.wait_until_indexes_online()

        index_storage_mode = self.index_rest.get_index_storage_mode()
        self.assertEqual(index_storage_mode, self.gsi_type)

        select_query = f'Select * from {collection_namespace} where age >10 and country like "A%";'
        select_meta_id_query = f'Select * from {collection} where meta().id like "doc_%";'
        count_query = f'Select count(*) from {collection_namespace} where age >= 0;'
        named_collection_query_context = f'default:{bucket}.{scope}'

        with ThreadPoolExecutor() as executor:
            select_task = executor.submit(self.run_cbq_query, query=select_query)
            meta_task = executor.submit(self.run_cbq_query, query=select_meta_id_query,
                                        query_context=named_collection_query_context)
            count_task = executor.submit(self.run_cbq_query, query=count_query)
            result = select_task.result()['results']
            meta_id_result = meta_task.result()['results']
            count_result = count_task.result()['results'][0]['$1']

        self.assertTrue(len(result) > 0)
        self.assertEqual(len(meta_id_result), self.num_of_docs_per_collection)
        self.assertEqual(count_result, self.num_of_docs_per_collection)
 def test_index_creation_with_keyspace_delete(self):
     """
     summary: This test validate process of index creation with delete/drop of Bucket/Scope/Collection
     """
     self.prepare_collection_for_indexing(num_of_docs_per_collection=10**6)
     collection_namespace = self.namespaces[0]
     index_gen = QueryDefinition(index_name='idx',
                                 index_fields=['age', 'city'])
     query = index_gen.generate_index_create_query(
         namespace=collection_namespace)
     _, keyspace = collection_namespace.split(':')
     bucket, scope, collection = keyspace.split('.')
     try:
         with ThreadPoolExecutor() as executor:
             task1 = executor.submit(self.run_cbq_query,
                                     query=query,
                                     rest_timeout=30)
             self.sleep(5)
             task2 = executor.submit(self.delete_bucket_scope_collection,
                                     server=self.servers[0],
                                     delete_item=self.item_to_delete,
                                     bucket=bucket,
                                     scope=scope,
                                     collection=collection)
             result = task2.result()
             self.assertTrue(result,
                             f"Failed to Delete {self.item_to_delete}")
             result = task1.result()
             self.assertTrue(
                 result,
                 f"Index was successfully created with deletion of {self.item_to_delete}"
             )
     except Exception as err:
         self.log.info(str(err))
         self.sleep(30)
         index_status = self.rest.get_index_status()
         self.assertFalse(index_status)
Exemple #26
0
    def test_gsi_on_ephemeral_with_server_restart(self):
        self.prepare_collection_for_indexing(num_of_docs_per_collection=self.num_of_docs_per_collection)
        collection_namespace = self.namespaces[0]
        _, keyspace = collection_namespace.split(':')
        bucket, scope, collection = keyspace.split('.')
        index_gen = QueryDefinition(index_name='idx', index_fields=['age', 'country', 'city'])
        meta_index_gen = QueryDefinition(index_name='meta_idx', index_fields=['meta().id'])

        query = index_gen.generate_index_create_query(namespace=collection_namespace, defer_build=self.defer_build)
        self.run_cbq_query(query)
        if self.defer_build:
            build_query = index_gen.generate_build_query(namespace=collection_namespace)
            self.run_cbq_query(build_query)
        self.wait_until_indexes_online()
        query = meta_index_gen.generate_index_create_query(namespace=collection_namespace, defer_build=self.defer_build)
        self.run_cbq_query(query)
        if self.defer_build:
            build_query = meta_index_gen.generate_build_query(namespace=collection_namespace)
            self.run_cbq_query(build_query)
        self.wait_until_indexes_online()

        select_query = f'Select * from {collection_namespace} where age >10 and country like "A%";'
        select_meta_id_query = f'Select * from {collection} where meta().id like "doc_%";'
        count_query = f'Select count(*) from {collection_namespace} where age >= 0;'
        named_collection_query_context = f'default:{bucket}.{scope}'

        select_result = self.run_cbq_query(query=select_query)['results']
        meta_result = self.run_cbq_query(query=select_meta_id_query,
                                         query_context=named_collection_query_context)['results']
        count_result = self.run_cbq_query(query=count_query)['results'][0]['$1']
        self.assertTrue(len(select_result) > 0)
        self.assertEqual(len(meta_result), self.num_of_docs_per_collection)
        self.assertEqual(count_result, self.num_of_docs_per_collection)

        # restarting couchbase services
        shell = RemoteMachineShellConnection(self.master)
        shell.restart_couchbase()
        shell.disconnect()

        self.sleep(30, "Waiting for server to be up again after restart")
        select_result = self.run_cbq_query(query=select_query)['results']
        meta_result = self.run_cbq_query(query=select_meta_id_query,
                                         query_context=named_collection_query_context)['results']
        count_result = self.run_cbq_query(query=count_query)['results'][0]['$1']
        self.assertEqual(len(select_result), 0)
        self.assertEqual(len(meta_result), 0)
        self.assertEqual(count_result, 0)
 def test_deployment_plan_with_nodes_only_plan_create_drop_index_for_secondary_index(
         self):
     query_definitions = []
     tasks = []
     verification_map = {}
     query_definition_map = {}
     servers = self.get_nodes_from_services_map(service_type="index",
                                                get_all_nodes=True)
     try:
         servers.reverse()
         for bucket in self.buckets:
             query_definition_map[bucket.name] = []
             for server in servers:
                 index_name = "index_name_ip_{0}_port_{1}_{2}".format(
                     server.ip.replace(".", "_"), server.port, bucket.name)
                 query_definition = QueryDefinition(index_name=index_name, index_fields = ["join_yr"], \
                     query_template = "", groups = [])
                 query_definition_map[bucket.name].append(query_definition)
                 query_definitions.append(query_definition)
                 node_key = "{0}:{1}".format(server.ip, server.port)
                 deploy_node_info = [node_key]
                 if node_key not in list(verification_map.keys()):
                     verification_map[node_key] = {}
                 verification_map[node_key][bucket.name] = index_name
                 tasks.append(
                     self.async_create_index(
                         bucket.name,
                         query_definition,
                         deploy_node_info=deploy_node_info))
             for task in tasks:
                 task.result()
         index_map = self.get_index_stats(perNode=True)
         self.log.info(index_map)
         for bucket in self.buckets:
             for node in list(index_map.keys()):
                 self.log.info(" verifying node {0}".format(node))
                 self.assertTrue(verification_map[node][bucket.name] in list(index_map[node][bucket.name].keys()), \
                     "for bucket {0} and node {1}, could not find key {2} in {3}".format(bucket.name, node, verification_map[node][bucket.name], index_map))
     except Exception as ex:
         self.log.info(ex)
         raise
     finally:
         for bucket in self.buckets:
             self.log.info("<<<<<<<<<<<< drop index {0} >>>>>>>>>>>".format(
                 bucket.name))
             self.run_multi_operations(
                 buckets=[bucket],
                 query_definitions=query_definition_map[bucket.name],
                 drop_index=True)
Exemple #28
0
 def setUp(self):
     super(SecondaryIndexDGMTests, self).setUp()
     self.num_plasma_buckets = self.input.param("standard_buckets", 1)
     self.indexMemQuota = self.input.param("indexMemQuota", 256)
     self.in_mem_comp = self.input.param("in_mem_comp", None)
     self.sweep_interval = self.input.param("sweep_interval", 120)
     self.dgmServer = self.get_nodes_from_services_map(service_type="index")
     self.rest = RestConnection(self.dgmServer)
     if self.indexMemQuota > 256:
         log.info("Setting indexer memory quota to {0} MB...".format(
             self.indexMemQuota))
         self.rest.set_service_memoryQuota(service='indexMemoryQuota',
                                           memoryQuota=self.indexMemQuota)
         self.sleep(30)
     if self.in_mem_comp:
         self.rest.set_index_settings({
             "indexer.plasma.mainIndex.evictSweepInterval":
             self.sweep_interval
         })
         self.rest.set_index_settings({
             "indexer.plasma.backIndex.evictSweepInterval":
             self.sweep_interval
         })
         self.rest.set_index_settings(
             {"indexer.plasma.backIndex.enableInMemoryCompression": True})
         self.rest.set_index_settings(
             {"indexer.plasma.mainIndex.enableInMemoryCompression": True})
         # add if condition
         self.rest.set_index_settings(
             {"indexer.settings.persisted_snapshot.moi.interval": 60000})
     self.deploy_node_info = [
         "{0}:{1}".format(self.dgmServer.ip, self.node_port)
     ]
     self.load_query_definitions = []
     self.initial_index_number = self.input.param("initial_index_number", 5)
     for x in range(self.initial_index_number):
         index_name = "index_name_" + str(x)
         query_definition = QueryDefinition(
             index_name=index_name,
             index_fields=["VMs"],
             query_template="SELECT * FROM %s ",
             groups=["simple"],
             index_where_clause=" VMs IS NOT NULL ")
         self.load_query_definitions.append(query_definition)
     if self.load_query_definitions:
         self.multi_create_index(
             buckets=self.buckets,
             query_definitions=self.load_query_definitions,
             deploy_node_info=self.deploy_node_info)
 def setUp(self):
     self.use_replica = True
     super(SecondaryIndexingPlasmaDGMRecoveryTests, self).setUp()
     self.initial_index_number = self.input.param("initial_index_number", 10)
     self.load_query_definitions = []
     for x in range(self.initial_index_number):
         index_name = "index_name_" + str(x)
         query_definition = QueryDefinition(
             index_name=index_name, index_fields=["VMs"],
             query_template="SELECT * FROM %s ", groups=["simple"],
             index_where_clause = " VMs IS NOT NULL ")
         self.load_query_definitions.append(query_definition)
     if self.load_query_definitions:
         self.multi_create_index(buckets=self.buckets,
                                 query_definitions=self.load_query_definitions)
Exemple #30
0
    def test_new_index_placement_by_greedy_planner(self):
        index_nodes = self.get_nodes_from_services_map(service_type="index",
                                                       get_all_nodes=True)
        if len(index_nodes) < 3:
            self.fail("Need at least 3 index nodes")
        self.prepare_collection_for_indexing(
            num_of_docs_per_collection=self.num_of_docs_per_collection)
        collection_namespace = self.namespaces[0]

        index_gen = QueryDefinition(index_name='idx',
                                    index_fields=['age', 'country', 'city'])
        query = index_gen.generate_index_create_query(
            namespace=collection_namespace,
            defer_build=self.defer_build,
            num_replica=self.num_replicas)
        self.run_cbq_query(query)
        if self.defer_build:
            build_query = index_gen.generate_build_query(
                namespace=collection_namespace)
            self.run_cbq_query(build_query)
        self.wait_until_indexes_online()
        least_loaded_node = self._find_least_loaded_index_node()[0]

        # creating new index and checking where does planner place it
        idx_new = 'idx_new'
        new_index_gen = QueryDefinition(index_name=idx_new,
                                        index_fields=['age'])
        query = new_index_gen.generate_index_create_query(
            namespace=collection_namespace, defer_build=self.defer_build)
        self.run_cbq_query(query=query)
        if self.defer_build:
            build_query = index_gen.generate_build_query(
                namespace=collection_namespace)
            self.run_cbq_query(build_query)
        self.wait_until_indexes_online()

        index_metadata = self.index_rest.get_indexer_metadata()['status']
        for index in index_metadata:
            if index['indexName'] == idx_new:
                host = index['hosts'][0].split(':')[0]
                self.assertEqual(
                    host, least_loaded_node,
                    "Index not created on Least Loaded Index node")
                break
        else:
            self.fail("new Index stats not available in index_metadata")
Exemple #31
0
 def test_change_key_size(self):
     self.iterations = self.input.param("num_iterations", 5)
     buckets = self._create_plasma_buckets()
     if self.plasma_dgm:
         self.get_dgm_for_plasma(indexer_nodes=[self.dgmServer])
     query_definition =  QueryDefinition(
         index_name="index_name_big_values",
         index_fields=["bigValues"],
         query_template="SELECT * FROM %s WHERE bigValues IS NOT NULL",
         groups=["simple"], index_where_clause=" bigValues IS NOT NULL ")
     self.multi_create_index(buckets=buckets,
                             query_definitions=[query_definition])
     template = '{{"name":"{0}", "age":{1}, "bigValues": "{2}" }}'
     generators = []
     for j in range(self.iterations):
         for i in range(10):
             name = FIRST_NAMES[random.choice(range(len(FIRST_NAMES)))]
             id_size = random.choice(range(5, 10))
             short_str = "".join(random.choice(lowercase) for k in range(id_size))
             id = "{0}-{1}".format(name, short_str)
             age = random.choice(range(4, 19))
             bigValues = "".join(random.choice(lowercase) for k in range(5))
             generators.append(DocumentGenerator(
                 id, template, [name], [age], [bigValues], start=0, end=10))
         self.load(generators, flag=self.item_flag, verify_data=False,
                   batch_size=self.batch_size)
         self.full_docs_list = self.generate_full_docs_list(generators)
         self.gen_results = TuqGenerators(self.log, self.full_docs_list)
         self.multi_query_using_index(buckets=buckets,
                                      query_definitions=[query_definition])
         for i in range(10):
             name = FIRST_NAMES[random.choice(range(len(FIRST_NAMES)))]
             id_size = random.choice(range(100, 200))
             long_str = "".join(random.choice(lowercase) for k in range(id_size))
             id = "{0}-{1}".format(name, long_str)
             age = random.choice(range(4, 19))
             bigValues = "".join(random.choice(lowercase) for k in range(5))
             generators.append(DocumentGenerator(
                 id, template, [name], [age], [bigValues], start=0, end=10))
         self.load(generators, flag=self.item_flag, verify_data=False,
                   batch_size=self.batch_size)
         self.full_docs_list = self.generate_full_docs_list(generators)
         self.gen_results = TuqGenerators(self.log, self.full_docs_list)
         self.multi_query_using_index(buckets=buckets,
                                      query_definitions=[query_definition])
     self.sleep(30)
     self.multi_drop_index(buckets=buckets,
                           query_definitions=[query_definition])
    def test_alter_index_with_num_partition(self):
        index_nodes = self.get_nodes_from_services_map(service_type="index",
                                                       get_all_nodes=True)
        if len(index_nodes) < 3:
            self.skipTest(
                "Can't run Alter index tests with less than 2 Index nodes")
        new_replica = 1
        self.prepare_collection_for_indexing()
        collection_namespace = self.namespaces[0]
        idx1, idx2 = 'idx1', 'idx2'
        index_gen1 = QueryDefinition(index_name=idx1,
                                     index_fields=['age'],
                                     partition_by_fields=['meta().id'])
        index_gen2 = QueryDefinition(index_name=idx2,
                                     index_fields=['city'],
                                     partition_by_fields=['meta().id'])
        query = index_gen1.generate_index_create_query(
            namespace=collection_namespace)
        self.run_cbq_query(query=query)
        query = index_gen2.generate_index_create_query(
            namespace=collection_namespace)
        self.run_cbq_query(query=query)
        self.wait_until_indexes_online()
        self.sleep(5)

        index_metadata = self.rest.get_indexer_metadata()['status']
        self.assertEqual(len(index_metadata), 2,
                         "No. of indexes are not matching.")
        for index in index_metadata:
            self.assertTrue(index['partitioned'],
                            f"{index} is not a partitioned index")
            self.assertEqual(index['numPartition'], 8,
                             "No. of partitions are not matching")
            self.assertEqual(index['numReplica'], 0,
                             "No. of replicas are not matching")

        self.alter_index_replicas(namespace=collection_namespace,
                                  index_name=idx1,
                                  num_replicas=new_replica)
        self.alter_index_replicas(namespace=collection_namespace,
                                  index_name=idx2,
                                  num_replicas=new_replica)
        self.sleep(10)
        self.wait_until_indexes_online()

        index_metadata = self.rest.get_indexer_metadata()['status']
        self.assertEqual(len(index_metadata), 4,
                         "No. of indexes are not matching.")
        for index in index_metadata:
            self.assertTrue(index['partitioned'],
                            f"{index} is not a partitioned index")
            self.assertEqual(index['numPartition'], 8,
                             "No. of partitions are not matching")
            self.assertEqual(index['numReplica'], new_replica,
                             "No. of replicas are not matching")
Exemple #33
0
    def test_index_placement_for_equivalent_indexes(self):
        index_nodes = self.get_nodes_from_services_map(service_type="index",
                                                       get_all_nodes=True)
        if len(index_nodes) != 3:
            self.fail("Need 3 index nodes")
        self.prepare_collection_for_indexing(
            num_of_docs_per_collection=self.num_of_docs_per_collection)
        collection_namespace = self.namespaces[0]

        for item in range(self.initial_index_num):
            index_fields = list(
                self.index_field_set[item % len(self.index_field_set)])
            index_gen = QueryDefinition(index_name=f'idx_{item}',
                                        index_fields=index_fields)
            query = index_gen.generate_index_create_query(
                namespace=collection_namespace, defer_build=self.defer_build)
            self.run_cbq_query(query)
            if self.defer_build:
                build_query = index_gen.generate_build_query(
                    namespace=collection_namespace)
                self.run_cbq_query(build_query)
            self.wait_until_indexes_online()
        self.sleep(10)

        least_loaded_node = self._find_least_loaded_index_node()
        indexer_meta_data = self.index_rest.get_indexer_metadata()['status']
        equivalent_index_field = None
        for index in indexer_meta_data:
            host = index['hosts'][0].split(':')[0]
            if host in least_loaded_node:
                equivalent_index_field = index['secExprs'][0]
                break

        # creating equivalent index with one replica, so that index replicas be place on other than least loaded node
        new_idx = "new_idx"
        new_index_gen = QueryDefinition(index_name=new_idx,
                                        index_fields=[equivalent_index_field])
        query = new_index_gen.generate_index_create_query(
            namespace=collection_namespace, num_replica=self.num_replicas)
        self.run_cbq_query(query=query)
        self.wait_until_indexes_online()

        indexer_meta_data = self.index_rest.get_indexer_metadata()['status']
        for index in indexer_meta_data:
            if index['indexName'] == new_idx:
                host = index['hosts'][0].split(':')[0]
                self.assertTrue(
                    host not in least_loaded_node,
                    "Equivalent index replica created on least loaded node, not maintaining HA"
                )
Exemple #34
0
 def test_oom_create_build_index(self):
     """
     Create an
     :return:
     """
     self.assertTrue(self._push_indexer_off_the_cliff(),
                     "OOM Can't be achieved")
     index_name = "oom_index"
     query_definition = QueryDefinition(index_name=index_name,
                                        index_fields=["join_mo"],
                                        query_template="",
                                        groups=["simple"])
     try:
         self.create_index(self.buckets[0].name, query_definition,
                           self.deploy_node_info)
     except Exception as ex:
         self.log.info("{0}".format(str(ex)))
Exemple #35
0
 def test_sorted_removed_items_indexed(self):
     if self.plasma_dgm:
         self.get_dgm_for_plasma(indexer_nodes=[self.dgmServer])
     generators = self._upload_documents_in_sorted()
     self.full_docs_list = self.generate_full_docs_list(generators)
     self.gen_results = TuqGenerators(self.log, self.full_docs_list)
     query_definition = QueryDefinition(
         index_name="index_range_shrink_name",
         index_fields=["name"],
         query_template="SELECT * FROM %s WHERE name IS NOT NULL",
         groups=["simple"],
         index_where_clause=" name IS NOT NULL ")
     buckets = []
     for bucket in self.buckets:
         if bucket.name.startswith("plasma_dgm"):
             buckets.append(bucket)
     self.multi_create_index(buckets=buckets,
                             query_definitions=[query_definition])
     self.sleep(30)
     intervals = [["d", "e", "f"], ["j", "k", "l", "m"],
                  ["p", "q", "r", "s"]]
     temp_list = []
     for doc_gen in self.full_docs_list:
         for interval in intervals:
             for character in interval:
                 if doc_gen["name"].lower().startswith(character):
                     for bucket in buckets:
                         url = "couchbase://{0}/{1}".format(
                             self.master.ip, bucket.name)
                         cb = Bucket(url,
                                     username=bucket.name,
                                     password="******")
                         cb.remove(doc_gen["_id"])
                         temp_list.append(doc_gen)
             if not self.multi_intervals:
                 break
     self.full_docs_list = [
         doc for doc in self.full_docs_list if doc not in temp_list
     ]
     self.gen_results = TuqGenerators(self.log, self.full_docs_list)
     self.multi_query_using_index(buckets=buckets,
                                  query_definitions=[query_definition])
     self.sleep(30)
     self.multi_drop_index(buckets=buckets,
                           query_definitions=[query_definition])