def test_rbac_flex(self):
        self._load_test_buckets(create_index=False)
        user = self.input.param("user", '')
        if user == '':
            raise Exception(
                "Invalid test configuration! User name should not be empty.")

        self.cbcluster = CouchbaseCluster(name='cluster',
                                          nodes=self.servers,
                                          log=self.log)
        fts_index = self.create_fts_index(name="idx_beer_sample_fts",
                                          doc_count=7303,
                                          source_name='beer-sample')

        self.wait_for_fts_indexing_complete(fts_index, 7303)

        self._create_user(user, 'beer-sample')

        username = self.users[user]['username']
        password = self.users[user]['password']
        query = "select meta().id from `beer-sample` use index (using fts, using gsi) where state = \"California\""

        master_result = self.run_cbq_query(query=query,
                                           server=self.master,
                                           username=username,
                                           password=password)
        self.assertEquals(master_result['status'], 'success',
                          username + " query run failed on non-fts node")

        self.cbcluster.delete_all_fts_indexes()
    def test_rbac_flex_not_granted_n1ql(self):
        self._load_test_buckets()
        user = self.input.param("user", '')
        if user == '':
            raise Exception("Invalid test configuration! User name should not be empty.")

        self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
        self.create_fts_index(name="idx_beer_sample_fts", doc_count=7303, source_name='beer-sample')
        self._create_user(user, 'beer-sample')

        username = self.users[user]['username']
        password = self.users[user]['password']
        query = "select meta().id from `beer-sample` use index (using fts, using gsi) where state = \"California\""

        try:
            self.run_cbq_query(query=query, server=self.master, username=username, password=password)
            self.fail("Could able to run query without n1ql permissions")
        except CBQError as e:
            self.log.info(str(e))
            if not "User does not have credentials to run SELECT queries" in str(e):
                self.fail("Failed to run query with other CBQ issues: {0}".format(str(e)))
        except Exception as e:
            self.fail("Failed to run query with other issues: {0}".format(str(e)))

        self.cbcluster.delete_all_fts_indexes()
    def test_partitioning(self):
        partitions_number = self.input.param("partitions_num")
        self.load_test_buckets()
        self.cbcluster = CouchbaseCluster(name='cluster',
                                          nodes=self.servers,
                                          log=self.log)
        fts_idx = self._create_fts_index(index_name="idx_beer_sample_fts",
                                         doc_count=7303,
                                         source_name='beer-sample')
        n1ql_query = "select meta().id from `beer-sample` where search(`beer-sample`, {\"query\":{\"field\":\"state\", \"match\":\"California\"}, \"size\":10000})"
        default_results = self.run_cbq_query(n1ql_query)
        self._update_partiotions_for_fts_index(fts_idx, partitions_number)
        self.sleep(60)
        new_partitioning_result = self.run_cbq_query(n1ql_query)

        n1ql_doc_ids_before_partitioning = []
        for result in default_results['results']:
            n1ql_doc_ids_before_partitioning.append(result['id'])

        n1ql_doc_ids_after_partitioning = []
        for result in new_partitioning_result['results']:
            n1ql_doc_ids_after_partitioning.append(result['id'])

        self.assertEqual(sorted(n1ql_doc_ids_before_partitioning),
                         sorted(n1ql_doc_ids_after_partitioning),
                         "Results after partitioning do not match.")
Exemple #4
0
    def test_cluster_config_stable(self):
        self.load_test_buckets()
        self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
        rest = self.get_rest_client(self.servers[0].rest_username, self.servers[0].rest_password)

        self._create_fts_index(index_name="idx_beer_sample_fts", doc_count=7303, source_name='beer-sample')
        n1ql_node = self.find_child_node_with_service("n1ql")
        if n1ql_node is None:
            self.log("Cannot find n1ql child node!")
        fts_node = self.find_child_node_with_service("fts")
        if fts_node is None:
            self.log("Cannot find fts child node!")

        n1ql_query = "select meta().id from `beer-sample` where search(`beer-sample`, {\"query\":{\"field\":\"state\", \"match\":\"California\"}, \"size\":1000})"
        fts_request = {"query":{"field":"state", "match":"California"}, "size":1000}
        n1ql_results = self.run_cbq_query(n1ql_query, server=n1ql_node)['results']
        n1ql_doc_ids = []
        for result in n1ql_results:
            n1ql_doc_ids.append(result['id'])

        total_hits, hits, took, status = \
            rest.run_fts_query(index_name="idx_beer_sample_fts",
                               query_json = fts_request)

        fts_doc_ids = []
        for hit in hits:
            fts_doc_ids.append(hit['id'])

        self.assertEqual(len(n1ql_doc_ids), len(fts_doc_ids),
                          "Results count does not match for test . FTS - " + str(
                              len(fts_doc_ids)) + ", N1QL - " + str(len(n1ql_doc_ids)))
        self.assertEqual(sorted(fts_doc_ids), sorted(n1ql_doc_ids),
                          "Found mismatch in results for test .")

        self.remove_all_fts_indexes()
    def test_cluster_add_new_fts_node(self):
        self.load_test_buckets()
        self.cbcluster = CouchbaseCluster(name='cluster',
                                          nodes=self.servers,
                                          log=self.log)
        fts_idx = self._create_fts_index(index_name="idx_beer_sample_fts",
                                         doc_count=7303,
                                         source_name='beer-sample')
        number_of_replicas = self.input.param("num_replicas", 0)
        self._update_replica_for_fts_index(fts_idx, number_of_replicas)
        self.sleep(60)
        n1ql_query = "select meta().id from `beer-sample` where search(`beer-sample`, {\"query\":{\"field\":\"state\", \"match\":\"California\"}, \"size\":10000})"
        n1ql_results = self.run_cbq_query(n1ql_query)['results']
        n1ql_doc_ids_before_rebalance = []
        for result in n1ql_results:
            n1ql_doc_ids_before_rebalance.append(result['id'])

        self.cluster.rebalance(self.servers, [self.servers[4]], [],
                               services=["fts"])

        n1ql_results = self.run_cbq_query(n1ql_query)['results']
        n1ql_doc_ids_after_rebalance = []
        for result in n1ql_results:
            n1ql_doc_ids_after_rebalance.append(result['id'])

        self.assertEqual(sorted(n1ql_doc_ids_before_rebalance),
                         sorted(n1ql_doc_ids_after_rebalance),
                         "Results after rebalance does not match.")
    def test_fts_node_failover_partial_results(self):
        self.load_test_buckets()
        self.cbcluster = CouchbaseCluster(name='cluster',
                                          nodes=self.servers,
                                          log=self.log)
        fts_idx = self._create_fts_index(index_name="idx_beer_sample_fts",
                                         doc_count=7303,
                                         source_name='beer-sample')
        self.run_cbq_query("drop index `beer-sample`.beer_primary")

        n1ql_query = "select meta().id from `beer-sample` where search(`beer-sample`, {\"query\":{\"field\":\"state\", \"match\":\"California\"}, \"size\":10000})"
        n1ql_results_before_failover = self.run_cbq_query(
            n1ql_query)['results']
        n1ql_doc_ids_before_failover = []
        for result in n1ql_results_before_failover:
            n1ql_doc_ids_before_failover.append(result['id'])

        self.cluster.failover(servers=self.servers,
                              failover_nodes=[self.servers[2]],
                              graceful=False)
        error_found = False
        try:
            n1ql_result_after_failover = self.run_cbq_query(n1ql_query)
        except CBQError as err:
            self.assertTrue("pindex not available" in str(err),
                            "Partial results error message is not graceful.")
            error_found = True
        self.assertEqual(
            error_found, True,
            "Partial result set is not allowed for SEARCH() queries.")
Exemple #7
0
    def test_cluster_replicas_failover_rebalance(self):
        self.load_test_buckets()
        self.cbcluster = CouchbaseCluster(name='cluster',
                                          nodes=self.servers,
                                          log=self.log)
        fts_idx = self._create_fts_index(index_name="idx_beer_sample_fts",
                                         doc_count=7303,
                                         source_name=self.sample_bucket)
        number_of_replicas = self.input.param("num_replicas", 0)
        self._update_replica_for_fts_index(fts_idx, number_of_replicas)
        self.sleep(60)
        n1ql_query = "select meta().id from `{0}` where search(`{0}`, {{\"query\":{{\"field\":\"state\"," \
                     " \"match\":\"California\"}}, \"size\":10000}})".format(self.sample_bucket)
        n1ql_results = self.run_cbq_query(n1ql_query)['results']
        n1ql_doc_ids_before_failover = []
        for result in n1ql_results:
            n1ql_doc_ids_before_failover.append(result['id'])

        self.cluster.failover(servers=self.servers,
                              failover_nodes=[self.servers[2]],
                              graceful=False)
        rebalance = self.cluster.rebalance(self.servers, [], [self.servers[2]])
        self.assertEqual(rebalance, True, "Rebalance is failed.")
        n1ql_results = self.run_cbq_query(n1ql_query)['results']
        n1ql_doc_ids_after_rebalance = []
        for result in n1ql_results:
            n1ql_doc_ids_after_rebalance.append(result['id'])

        self.assertEqual(sorted(n1ql_doc_ids_before_failover),
                         sorted(n1ql_doc_ids_after_rebalance),
                         "Results after rebalance does not match.")
    def setUp(self):
        super(N1qlFTSIntegrationPhase2ClusteropsTest, self).setUp()
        self.sample_bucket = 'beer-sample'
        self.query_buckets = self.get_query_buckets(sample_buckets=[self.sample_bucket])
        self.query_bucket = self.query_buckets[1]
        self.log.info("==============  N1qlFTSIntegrationPhase2ClusteropsTest setup has started ==============")
        self.log_config_info()
        cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
        fts_node = cbcluster.get_fts_nodes()
        RestConnection(fts_node[0]).set_fts_ram_quota(900)

        self.log.info("==============  N1qlFTSIntegrationPhase2ClusteropsTest setup has completed ==============")
    def test_n1ql_syntax_select_from_let(self):
        query_bucket = self.get_collection_name(self.default_bucket_name)
        self.cbcluster = CouchbaseCluster(name='cluster',
                                          nodes=self.servers,
                                          log=self.log)
        rest = self.get_rest_client(self.servers[0].rest_username,
                                    self.servers[0].rest_password)
        self._create_fts_index(index_name="idx_default_fts",
                               doc_count=98784,
                               source_name=query_bucket)
        self.drop_index_safe(bucket_name=self.default_bucket_name,
                             index_name="#primary",
                             is_primary=True)

        self.scan_consistency = "NOT_BOUNDED"
        n1ql_query = "select meta().id from {0} let res=true where search(default, {{\"query\": {{\"field\": " \
                     "\"email\", \"match\":\"'9'\"}}, \"size\":10000}})=res".format(query_bucket)
        fts_request = {
            "query": {
                "field": "email",
                "match": "'9'"
            },
            "size": 10000
        }
        n1ql_results = self.run_cbq_query(n1ql_query)['results']
        total_hits, hits, took, status = rest.run_fts_query(
            index_name="idx_default_fts", query_json=fts_request)
        comparison_results = self._compare_n1ql_results_against_fts(
            n1ql_results, hits)
        self.assertEqual(comparison_results, "OK", comparison_results)
        self.log.info(
            "n1ql+fts integration sanity test is passed. Results against n1ql query equal to fts service call results."
        )
        self.log.info("n1ql results: " + str(n1ql_results))

        explain_result = self.run_cbq_query("explain " + n1ql_query)
        self.assertTrue("idx_default_fts" in str(explain_result),
                        "FTS index is not used!")
        self.log.info(
            "n1ql+fts integration sanity test is passed. FTS index usage is found in execution plan."
        )
        self._remove_all_fts_indexes()
        self.scan_consistency = "REQUEST_PLUS"
Exemple #10
0
    def setUp(self):
        super(ClusterOpsLargeMetaKV, self).setUp()

        self.log.info(
            "==============  ClusterOpsLargeMetaKV setuAp has started =============="
        )
        self.log_config_info()
        self.dataset = self.input.param("dataset", "emp")
        self.custom_map = self.input.param("custom_map", False)
        self.bucket_name = self.input.param("bucket_name", 'default')
        self.rebalance_in = self.input.param("rebalance_in", False)
        self.failover = self.input.param("failover", False)
        self.rebalance_out = self.input.param("rebalance_out", False)
        self.swap_rebalance = self.input.param("swap_rebalance", False)
        self.node_service_in = self.input.param("node_service_in", None)
        self.node_service_out = self.input.param("node_service_out", None)
        self.graceful_failover = self.input.param("graceful_failover", True)
        self.num_fts_partitions = self.input.param("num_fts_partitions", 6)
        self.num_fts_replica = self.input.param("num_fts_replica", 0)
        self.num_gsi_indexes = self.input.param("num_gsi_indexes", 200)
        self.num_fts_indexes = self.input.param("num_fts_indexes", 30)
        self.index_ram = self.input.param('index_ram', 512)
        self.index_nodes = self.get_nodes_from_services_map(
            service_type="index", get_all_nodes=True)
        self.n1ql_nodes = self.get_nodes_from_services_map(service_type="n1ql",
                                                           get_all_nodes=True)
        for index_node in self.index_nodes:
            rest = RestConnection(index_node)
            rest.set_index_settings({"queryport.client.usePlanner": False})
            rest.set_service_memoryQuota(service='indexMemoryQuota',
                                         memoryQuota=self.index_ram)
        self.cbcluster = CouchbaseCluster(name='cluster',
                                          nodes=self.servers,
                                          log=self.log)
        self.log.info(
            "==============  ClusterOpsLargeMetaKV setup has completed =============="
        )
class FlexIndexTests(QueryTests):

    users = {}

    def suite_setUp(self):
        super(FlexIndexTests, self).suite_setUp()

    def init_flex_object(self, test_object):
        self.log = test_object.log
        self.master = test_object.master
        self.input = test_object.input
        self.buckets = test_object.buckets
        self.testrunner_client = test_object.testrunner_client
        self.use_rest = test_object.use_rest
        self.scan_consistency = test_object.scan_consistency
        self.hint_index = test_object.hint_index
        self.n1ql_port = test_object.n1ql_port
        self.analytics = test_object.analytics
        self.named_prepare = test_object.named_prepare
        self.servers = test_object.servers

    def setUp(self):
        super(FlexIndexTests, self).setUp()
        #self._load_test_buckets()
        self.log.info(
            "==============  FlexIndexTests setuAp has started ==============")
        self.query_node = self.get_nodes_from_services_map(service_type="n1ql")
        if self.query_node:
            self.log_config_info(self.query_node)
        self.dataset = self.input.param("flex_dataset", "emp")
        self.use_index_name_in_query = bool(
            self.input.param("use_index_name_in_query", True))
        self.expected_gsi_index_map = {}
        self.expected_fts_index_map = {}
        self.gsi_fields = []
        self.custom_map = self.input.param("custom_map", False)
        self.bucket_name = self.input.param("bucket_name", 'default')
        self.flex_query_option = self.input.param("flex_query_option",
                                                  "flex_use_fts_query")
        self.rebalance_in = self.input.param("rebalance_in", False)
        self.failover_fts = self.input.param("failover_fts", False)
        self.use_fts_query_param = self.input.param("use_fts_query_param",
                                                    None)
        self.log.info(
            "==============  FlexIndexTests setup has completed =============="
        )

    def tearDown(self):
        self.log.info(
            "==============  FlexIndexTests tearDown has started =============="
        )
        self.query_node = self.get_nodes_from_services_map(service_type="n1ql")
        if self.query_node:
            self.log_config_info(self.query_node)
        self.log.info(
            "==============  FlexIndexTests tearDown has completed =============="
        )
        super(FlexIndexTests, self).tearDown()

    def suite_tearDown(self):
        self.log.info(
            "==============  FlexIndexTests suite_tearDown has started =============="
        )
        self.log_config_info()
        self.log.info(
            "==============  FlexIndexTests suite_tearDown has completed =============="
        )
        super(FlexIndexTests, self).suite_tearDown()

# ============================ # Utils ===========================================

    def compare_results_with_gsi(self,
                                 flex_query,
                                 gsi_query,
                                 use_fts_query_param=None):
        try:
            flex_result = self.run_cbq_query(
                flex_query,
                query_params={},
                use_fts_query_param=use_fts_query_param,
                server=self.query_node)["results"]
            self.log.info(
                "Number of results from flex query: {0} is {1}".format(
                    flex_query, len(flex_result)))
        except Exception as e:
            self.log.info("Failed to run flex query: {0}".format(flex_query))
            self.log.error(e)
            return False

        try:
            gsi_result = self.run_cbq_query(gsi_query,
                                            query_params={},
                                            use_fts_query_param=None,
                                            server=self.query_node)["results"]
            self.log.info(
                "Number of results from gsi query: {0} is {1}".format(
                    gsi_query, len(gsi_result)))
        except Exception as e:
            self.log.info("Failed to run gsi query: {0}".format(gsi_query))
            self.log.error(e)
            return False

        if len(flex_result) != len(gsi_result):
            self.log.info(
                "Number of results not matching b/w flex and GSI results")
            return False

        diffs = DeepDiff(flex_result, gsi_result, ignore_order=True)

        if diffs:
            self.log.info(
                "There are differences in the results b/w flex and GSI results"
            )
            return False

        return True

    def get_gsi_fields_partial_sargability(self):
        fts_fields = self.query_gen.fields
        available_fields = DATASET.CONSOLIDATED_FIELDS
        for field in available_fields:
            field_found = False
            for k, v in fts_fields.items():
                if field in v:
                    field_found = True
                    break
            if not field_found:
                self.gsi_fields.append(field)
        self.gsi_fields = list(set(self.gsi_fields))

    def create_gsi_indexes(self):
        count = 0
        self.expected_gsi_index_map = {}
        for field in self.gsi_fields:
            field = self.query_gen.replace_underscores(field)
            field_proxy = field
            #handling array fields
            if field == "languages_known":
                field = "ALL ARRAY v for v in languages_known END"
            if field == "manages.reports":
                field = "ALL ARRAY v for v in manages.reports END"
            if field_proxy not in self.expected_gsi_index_map.keys(
            ) and field_proxy is not "type":
                gsi_index_name = "gsi_index_" + str(count)
                self.run_cbq_query("create index {0} on `{2}`({1})".format(
                    gsi_index_name, field, self.bucket_name))
                self.expected_gsi_index_map[field_proxy] = [gsi_index_name]
                count += 1
        self.log.info("expected_gsi_index_map {0}".format(
            self.expected_gsi_index_map))

    def check_if_predicate_has_gsi_field(self, query):
        for field in self.gsi_fields:
            field = self.query_gen.replace_underscores(field)
            if field in query:
                return True
        return False

    def get_all_indexnames_from_response(self, response_json):
        queue = deque([response_json])
        index_names = []
        while queue:
            node = queue.popleft()
            nodevalue = node
            if type(node) is tuple:
                nodekey = node[0]
                nodevalue = node[1]

            if isinstance(nodevalue, Mapping):
                for k, v in nodevalue.items():
                    queue.extend([(k, v)])
            elif isinstance(
                    nodevalue,
                (Sequence, Set)) and not isinstance(nodevalue, str):
                queue.extend(nodevalue)
            else:
                if nodekey == "index" and nodevalue not in index_names:
                    index_names.append(nodevalue)
        return index_names

    def check_if_expected_index_exist(self, result, expected_indexes):
        actual_indexes = self.get_all_indexnames_from_response(result)
        found = True
        self.log.info(
            "Actual indexes present: {0}, Expected Indexes: {1}".format(
                sorted(actual_indexes), sorted(expected_indexes)))
        for index in actual_indexes:
            if index not in expected_indexes:
                found = False
        return found

    def get_expected_indexes(self, flex_query, expected_index_map):
        available_fields = DATASET.CONSOLIDATED_FIELDS
        expected_indexes = []
        for field in available_fields:
            field = self.query_gen.replace_underscores(field)
            if " {0}".format(
                    field) in flex_query and field in expected_index_map.keys(
                    ):
                for index in expected_index_map[field]:
                    expected_indexes.append(index)

        return list(set(expected_indexes))

    def run_query_and_validate(self, query_list):
        failed_to_run_query = []
        not_found_index_in_response = []
        result_mismatch = []
        iteration = 1
        if not hasattr(self, "query_node"):
            self.query_node = self.get_nodes_from_services_map(
                service_type="n1ql")

        for query in query_list:
            query_num = iteration
            iteration += 1
            self.log.info(
                "======== Running Query # {0} =======".format(query_num))
            flex_query = query.format("USE INDEX (USING FTS, USING GSI)")
            gsi_query = query.format("")
            explain_query = "explain " + flex_query
            self.log.info("Query : {0}".format(explain_query))
            try:
                result = self.run_cbq_query(explain_query,
                                            server=self.query_node)
            except Exception as e:
                self.log.info("Failed to run query")
                self.log.error(e)
                failed_to_run_query.append(query_num)
                continue
            try:
                self.assertTrue(
                    self.check_if_expected_index_exist(result,
                                                       ["default_index"]))
            except Exception as e:
                self.log.info("Failed to find fts index name in plan query")
                self.log.error(e)
                not_found_index_in_response.append(query_num)
                continue

            if not self.compare_results_with_gsi(flex_query, gsi_query):
                self.log.error("Result mismatch found")
                result_mismatch.append(query_num)

            self.log.info("======== Done =======")

        return failed_to_run_query, not_found_index_in_response, result_mismatch

    def run_queries_and_validate(self, partial_sargability=None):
        iteration = 1
        failed_to_run_query = []
        not_found_index_in_response = []
        result_mismatch = []
        for flex_query_ph, gsi_query in zip(self.query_gen.fts_flex_queries,
                                            self.query_gen.gsi_queries):
            query_num = iteration
            iteration += 1
            self.log.info(
                "======== Running Query # {0} =======".format(query_num))
            expected_fts_index = []
            expected_gsi_index = []
            if self.flex_query_option != "flex_use_gsi_query":
                expected_fts_index = self.get_expected_indexes(
                    flex_query_ph, self.expected_fts_index_map)
            expected_gsi_index = self.get_expected_indexes(
                flex_query_ph, self.expected_gsi_index_map)
            if self.use_fts_query_param:
                flex_query = gsi_query
            else:
                flex_query = self.get_runnable_flex_query(
                    flex_query_ph, expected_fts_index, expected_gsi_index)
            if self.flex_query_option == "flex_use_gsi_query":
                expected_gsi_index.append("primary_gsi_index")
            # issue MB-39493
            if (partial_sargability and self.flex_query_option == "flex_use_fts_query" and flex_query.count("OR") > 1) \
                    or (self.check_if_predicate_has_gsi_field(flex_query) and
                        self.flex_query_option == "flex_use_fts_query" and flex_query.count("OR") >= 1):
                expected_gsi_index.append("primary_gsi_index")
            explain_query = "explain " + flex_query
            self.log.info("Query : {0}".format(explain_query))
            try:
                result = self.run_cbq_query(
                    explain_query,
                    query_params={},
                    use_fts_query_param=self.use_fts_query_param)
            except Exception as e:
                self.log.info("Failed to run query")
                self.log.error(e)
                failed_to_run_query.append(query_num)
                continue
            try:
                self.assertTrue(
                    self.check_if_expected_index_exist(
                        result, expected_fts_index + expected_gsi_index))
            except Exception as e:
                self.log.info("Failed to find fts index name in plan query")
                self.log.error(e)
                not_found_index_in_response.append(query_num)
                continue

            if not self.compare_results_with_gsi(flex_query, gsi_query,
                                                 self.use_fts_query_param):
                self.log.error("Result mismatch found")
                result_mismatch.append(query_num)

            self.log.info("======== Done =======")

        return failed_to_run_query, not_found_index_in_response, result_mismatch

    def merge_smart_fields(self, smart_fields1, smart_fields2):
        combined_fields = {}
        for key in smart_fields1.keys():
            if key in smart_fields2.keys():
                combined_fields[key] = list(
                    set(smart_fields1[key] + smart_fields2[key]))
            else:
                combined_fields[key] = smart_fields1[key]

        for key in smart_fields2.keys():
            if key not in smart_fields1.keys():
                combined_fields[key] = smart_fields2[key]
        return combined_fields

    def get_runnable_flex_query(self, flex_query_ph, expected_fts_index,
                                expected_gsi_index):
        use_fts_hint = "USING FTS"
        use_gsi_hint = "USING GSI"
        final_hint = ""
        if self.flex_query_option == "flex_use_fts_query" or self.flex_query_option == "flex_use_fts_gsi_query":
            if self.use_index_name_in_query:
                for index in expected_fts_index:
                    if final_hint == "":
                        final_hint = "{0} {1}".format(index, use_fts_hint)
                    else:
                        final_hint = "{0}, {1} {2}".format(
                            final_hint, index, use_fts_hint)
            else:
                final_hint = use_fts_hint

        if self.flex_query_option == "flex_use_gsi_query" or self.flex_query_option == "flex_use_fts_gsi_query":
            if self.use_index_name_in_query and expected_gsi_index:
                for index in expected_gsi_index:
                    if final_hint == "":
                        final_hint = "{0} {1}".format(index, use_gsi_hint)
                    else:
                        final_hint = "{0}, {1} {2}".format(
                            final_hint, index, use_gsi_hint)
            elif final_hint is not "":
                final_hint = "{0}, {1}".format(final_hint, use_gsi_hint)
            else:
                final_hint = use_gsi_hint

        flex_query = flex_query_ph.format(flex_hint=final_hint)

        return flex_query

    def run_queries_and_validate_clusterops(self):
        failed_to_run_query, not_found_index_in_response, result_mismatch = self.run_queries_and_validate(
        )

        if failed_to_run_query or not_found_index_in_response:
            self.fail(
                "Found queries not runnable: {0} or required index not found in the query resonse: {1} "
                .format(failed_to_run_query, not_found_index_in_response))
        else:
            self.log.info("All {0} queries passed".format(
                len(self.query_gen.fts_flex_queries)))

# ======================== tests =====================================================

    def test_flex_single_typemapping(self):

        self._load_emp_dataset(end=self.num_items)

        fts_index = self.create_fts_index(name="custom_index",
                                          source_name=self.bucket_name)
        self.generate_random_queries(fts_index.smart_query_fields)
        self.update_expected_fts_index_map(fts_index)
        if not self.is_index_present("default", "primary_gsi_index"):
            self.run_cbq_query(
                "create primary index primary_gsi_index on default")

        self.wait_for_fts_indexing_complete(fts_index, self.num_items)

        failed_to_run_query, not_found_index_in_response, result_mismatch = self.run_queries_and_validate(
        )
        self.cbcluster.delete_all_fts_indexes()

        if failed_to_run_query or not_found_index_in_response or result_mismatch:
            self.fail(
                "Found queries not runnable: {0} or required index not found in the query resonse: {1} "
                "or flex query and gsi query results not matching: {2}".format(
                    failed_to_run_query, not_found_index_in_response,
                    result_mismatch))
        else:
            self.log.info("All {0} queries passed".format(
                len(self.query_gen.fts_flex_queries)))

    def test_flex_multi_typemapping(self):

        self._load_emp_dataset(end=(self.num_items / 2))
        self._load_wiki_dataset(end=(self.num_items / 2))

        fts_index = self.create_fts_index(name="custom_index",
                                          source_name=self.bucket_name)
        # make sure mutated is indexed in both the type mappings to test the bug: MB-39517
        mutated_field_def = {
            'dynamic':
            False,
            'enabled':
            True,
            'properties': {},
            'fields': [{
                'include_in_all': True,
                'include_term_vectors': False,
                'index': True,
                'name': 'mutated',
                'store': False,
                'type': 'number',
                'analyzer': ''
            }]
        }
        fts_index.index_definition['params']['mapping']['types']['emp'][
            'properties']['mutated'] = mutated_field_def
        fts_index.index_definition['params']['mapping']['types']['wiki'][
            'properties']['mutated'] = mutated_field_def
        fts_index.index_definition['uuid'] = fts_index.get_uuid()
        fts_index.update()
        self.generate_random_queries(fts_index.smart_query_fields)
        self.update_expected_fts_index_map(fts_index)
        if not self.is_index_present("default", "primary_gsi_index"):
            self.run_cbq_query(
                "create primary index primary_gsi_index on default")

        self.wait_for_fts_indexing_complete(fts_index, self.num_items)

        failed_to_run_query, not_found_index_in_response, result_mismatch = self.run_queries_and_validate(
            partial_sargability=True)
        self.cbcluster.delete_all_fts_indexes()

        if failed_to_run_query or not_found_index_in_response or result_mismatch:
            self.fail(
                "Found queries not runnable: {0} or required index not found in the query resonse: {1} "
                "or flex query and gsi query results not matching: {2}".format(
                    failed_to_run_query, not_found_index_in_response,
                    result_mismatch))
        else:
            self.log.info("All {0} queries passed".format(
                len(self.query_gen.fts_flex_queries)))

    def test_flex_default_typemapping(self):

        self._load_emp_dataset(end=self.num_items / 2)
        self._load_wiki_dataset(end=(self.num_items / 2))

        fts_index = self.create_fts_index(name="default_index",
                                          source_name=self.bucket_name)
        if not self.is_index_present("default", "primary_gsi_index"):
            self.run_cbq_query(
                "create primary index primary_gsi_index on default")
        self.generate_random_queries()
        fts_index.smart_query_fields = self.query_gen.fields
        self.update_expected_fts_index_map(fts_index)

        self.wait_for_fts_indexing_complete(fts_index, self.num_items)

        failed_to_run_query, not_found_index_in_response, result_mismatch = self.run_queries_and_validate(
        )
        self.cbcluster.delete_all_fts_indexes()

        if failed_to_run_query or not_found_index_in_response or result_mismatch:
            self.fail(
                "Found queries not runnable: {0} or required index not found in the query resonse: {1} "
                "or flex query and gsi query results not matching: {2}".format(
                    failed_to_run_query, not_found_index_in_response,
                    result_mismatch))
        else:
            self.log.info("All {0} queries passed".format(
                len(self.query_gen.fts_flex_queries)))

    def test_flex_single_typemapping_partial_sargability(self):

        self._load_emp_dataset(end=self.num_items)

        fts_index = self.create_fts_index(name="custom_index",
                                          source_name=self.bucket_name)
        self.generate_random_queries(fts_index.smart_query_fields)
        self.update_expected_fts_index_map(fts_index)
        if not self.is_index_present("default", "primary_gsi_index"):
            self.run_cbq_query(
                "create primary index primary_gsi_index on default")
        self.get_gsi_fields_partial_sargability()
        self.create_gsi_indexes()
        self.generate_random_queries()

        self.wait_for_fts_indexing_complete(fts_index, self.num_items)

        failed_to_run_query, not_found_index_in_response, result_mismatch = self.run_queries_and_validate(
            partial_sargability=True)
        self.cbcluster.delete_all_fts_indexes()

        if failed_to_run_query or not_found_index_in_response or result_mismatch:
            self.fail(
                "Found queries not runnable: {0} or required index not found in the query resonse: {1} "
                "or flex query and gsi query results not matching: {2}".format(
                    failed_to_run_query, not_found_index_in_response,
                    result_mismatch))
        else:
            self.log.info("All {0} queries passed".format(
                len(self.query_gen.fts_flex_queries)))

    def test_flex_multi_typemapping_partial_sargability(self):

        self._load_emp_dataset(end=(self.num_items / 2))
        self._load_wiki_dataset(end=(self.num_items / 2))

        fts_index = self.create_fts_index(name="custom_index")
        self.update_expected_fts_index_map(fts_index)
        if not self.is_index_present("default", "primary_gsi_index"):
            self.run_cbq_query(
                "create primary index primary_gsi_index on default")
        self.generate_random_queries(fts_index.smart_query_fields)
        self.get_gsi_fields_partial_sargability()
        self.create_gsi_indexes()
        self.generate_random_queries()

        self.wait_for_fts_indexing_complete(fts_index, self.num_items)

        failed_to_run_query, not_found_index_in_response, result_mismatch = self.run_queries_and_validate(
            partial_sargability=True)
        self.cbcluster.delete_all_fts_indexes()

        if failed_to_run_query or not_found_index_in_response or result_mismatch:
            self.fail(
                "Found queries not runnable: {0} or required index not found in the query resonse: {1} "
                "or flex query and gsi query results not matching: {2}".format(
                    failed_to_run_query, not_found_index_in_response,
                    result_mismatch))
        else:
            self.log.info("All {0} queries passed".format(
                len(self.query_gen.fts_flex_queries)))

    def test_flex_single_typemapping_2_fts_indexes(self):
        self._load_emp_dataset(end=self.num_items)

        fts_index_1 = self.create_fts_index(name="custom_index_1",
                                            source_name=self.bucket_name)
        fts_index_2 = self.create_fts_index(name="custom_index_2",
                                            source_name=self.bucket_name)
        self.log.info("Editing custom index with new map...")
        fts_index_2.generate_new_custom_map(seed=fts_index_2.cm_id + 10)
        fts_index_2.index_definition['uuid'] = fts_index_2.get_uuid()
        fts_index_2.update()
        smart_fields = self.merge_smart_fields(fts_index_1.smart_query_fields,
                                               fts_index_2.smart_query_fields)
        self.generate_random_queries(smart_fields)

        self.update_expected_fts_index_map(fts_index_1)
        self.update_expected_fts_index_map(fts_index_2)
        if not self.is_index_present("default", "primary_gsi_index"):
            self.run_cbq_query(
                "create primary index primary_gsi_index on default")

        self.get_gsi_fields_partial_sargability()
        self.create_gsi_indexes()
        self.generate_random_queries()

        self.wait_for_fts_indexing_complete(fts_index_1, self.num_items)
        self.wait_for_fts_indexing_complete(fts_index_2, self.num_items)

        failed_to_run_query, not_found_index_in_response, result_mismatch = self.run_queries_and_validate(
        )
        self.cbcluster.delete_all_fts_indexes()

        if failed_to_run_query or not_found_index_in_response or result_mismatch:
            self.fail(
                "Found queries not runnable: {0} or required index not found in the query resonse: {1} "
                "or flex query and gsi query results not matching: {2}".format(
                    failed_to_run_query, not_found_index_in_response,
                    result_mismatch))
        else:
            self.log.info("All {0} queries passed".format(
                len(self.query_gen.fts_flex_queries)))

    def test_flex_with_napa_dataset(self):

        self._load_napa_dataset(end=self.num_items)

        fts_index = self.create_fts_index(name="default_index",
                                          source_name=self.bucket_name,
                                          doc_count=self.num_items)
        if not self.is_index_present(
                "default", "primary_gsi_index", server=self.query_node):
            self.run_cbq_query(
                "create primary index primary_gsi_index on default",
                server=self.query_node)

        query_list = [
            'SELECT META().id FROM `default` {0} WHERE address.ecpdId="1" ORDER BY META().id LIMIT 100',
            'SELECT META().id, email FROM `default` {0} WHERE address.ecpdId="3" OR address.applicationId=300'
            ' ORDER BY email,META().id LIMIT 10',
            'SELECT META().id, address.applicationId FROM `default` {0} WHERE address.ecpdId="3" AND'
            ' address.deviceTypeId=9 ORDER BY address.applicationId,META().id LIMIT 100',
            'SELECT META().id FROM `default` {0} WHERE (address.ecpdId="3" OR address.applicationId=300) AND '
            '(address.deviceTypeId=9 OR address.deviceStatus=0) ORDER BY META().id LIMIT 100',
            'SELECT META().id, address.applicationId FROM `default` {0} WHERE address.applicationId = 1 AND '
            'address.deviceTypeId=9 AND address.deviceStatus=0 ORDER BY address.applicationId LIMIT 100',
            'SELECT META().id FROM `default` {0} WHERE address.deviceTypeId=9 AND address.deviceStatus=0'
            ' ORDER BY address.applicationId,META().id LIMIT 100',
            'SELECT META().id FROM `default` {0} WHERE address.deviceStatus=0 ORDER BY META().id LIMIT 100',
            'SELECT META().id, address.activationDate FROM `default` {0} WHERE address.ecpdId="1" ORDER BY'
            ' address.activationDate,META().id LIMIT 100',
            'SELECT META().id FROM default {0} WHERE ( ( ( ( ( email LIKE "A%") OR '
            '( ANY v IN devices SATISFIES v LIKE "2%" END)) OR '
            '( first_name > "Karianne" AND first_name <= "Qarianne")) OR ( routing_number = 12160)) OR '
            '( address.activationDate BETWEEN "1995-10-10T21:22:00" AND "2020-05-09T20:08:02.462692")) '
            'ORDER BY address.city,META().id LIMIT 100',
            'SELECT META().id, company_name FROM default {0} WHERE (( email = "*****@*****.**") '
            'AND ( ANY v IN children SATISFIES v.first_name = "Raven" END)) OR '
            '(( company_code > "IMWW" AND company_code <= "D3IHO") AND ( routing_number = 67473) OR '
            '( address.activationDate BETWEEN "2019-10-10T21:22:00" AND "2020-05-09T20:08:02.462692")) '
            'ORDER BY address.activationDate,META().id OFFSET 500 LIMIT 100',
            'SELECT first_name, last_name, email FROM default {0} WHERE '
            '( ( SOME v IN children SATISFIES v.first_name LIKE "R%" END) AND '
            '( age > 20 AND age < 40)) OR '
            '(( dob BETWEEN "1994-12-08T01:19:00" AND "2020-05-09T20:08:02.469127") AND isActive = FALSE) '
            'OR (address.deviceTypeId > 3 AND ISNUMBER(address.deviceTypeId)) '
            'ORDER BY address.activationDate,META().id OFFSET 500 LIMIT 100',
            'SELECT first_name, last_name, email, address.country FROM default {0} WHERE '
            'ANY c IN children SATISFIES c.gender = "F" AND (c.age > 5 AND c.age <15) '
            'OR c.first_name LIKE "a%" END ORDER BY address.country,META().id OFFSET 500 LIMIT 100'
        ]

        self.wait_for_fts_indexing_complete(fts_index, self.num_items)

        failed_to_run_query, not_found_index_in_response, result_mismatch = self.run_query_and_validate(
            query_list)
        if failed_to_run_query or not_found_index_in_response or result_mismatch:
            self.fail(
                "Found queries not runnable: {0} or required index not found in the query resonse: {1} "
                "or flex query and gsi query results not matching: {2}".format(
                    failed_to_run_query, not_found_index_in_response,
                    result_mismatch))
        else:
            self.log.info("All {0} queries passed".format(len(query_list)))

    def test_clusterops_flex_fts_node(self):
        self._load_emp_dataset(end=self.num_items / 2)
        self._load_wiki_dataset(end=(self.num_items / 2))

        fts_index = self.create_fts_index(name="default_index",
                                          source_name=self.bucket_name)
        if not self.is_index_present("default", "primary_gsi_index"):
            self.run_cbq_query(
                "create primary index primary_gsi_index on default")
        self.generate_random_queries()
        fts_index.smart_query_fields = self.query_gen.fields
        self.update_expected_fts_index_map(fts_index)
        fts_index.update_num_replicas(1)

        self.wait_for_fts_indexing_complete(fts_index, self.num_items)

        failed_to_run_query, not_found_index_in_response, result_mismatch = self.run_queries_and_validate(
        )

        if failed_to_run_query or not_found_index_in_response or result_mismatch:
            self.fail(
                "Found queries not runnable: {0} or required index not found in the query resonse: {1} "
                "or flex query and gsi query results not matching: {2}".format(
                    failed_to_run_query, not_found_index_in_response,
                    result_mismatch))
        else:
            self.log.info("All {0} queries passed".format(
                len(self.query_gen.fts_flex_queries)))

        thread1 = threading.Thread(
            name='run_query', target=self.run_queries_and_validate_clusterops)
        thread1.start()
        if self.rebalance_in:

            self.log.info(
                "Now rebalancing in fts node while running queries parallely")

            self.assertTrue(
                len(self.servers) >= self.nodes_in + 1,
                "Servers are not enough")

            try:
                self.cluster.async_rebalance(self.servers[:self.nodes_init],
                                             [self.servers[self.nodes_init]],
                                             [],
                                             services=['fts'])
            except Exception as e:
                self.fail("Rebalance in failed with {0}".format(str(e)))

        elif self.failover_fts:
            self.log.info(
                "Now failover fts node while running queries parallely")
            try:
                self.cluster.failover(self.servers[:self.nodes_init],
                                      [self.servers[self.nodes_init - 1]])
            except Exception as e:
                self.fail("node failover failed with {0}".format(str(e)))
        else:
            self.log.info(
                "Now rebalancing out fts node while running queries parallely")
            try:
                self.cluster.async_rebalance(
                    self.servers[:self.nodes_init], [],
                    [self.servers[self.nodes_init - 1]],
                    services=['fts'])
            except Exception as e:
                self.fail("Rebalance out failed with {0}".format(str(e)))

        thread1.join()
        self.cbcluster.delete_all_fts_indexes()

    def test_rbac_flex(self):
        self._load_test_buckets(create_index=False)
        user = self.input.param("user", '')
        if user == '':
            raise Exception(
                "Invalid test configuration! User name should not be empty.")

        self.cbcluster = CouchbaseCluster(name='cluster',
                                          nodes=self.servers,
                                          log=self.log)
        fts_index = self.create_fts_index(name="idx_beer_sample_fts",
                                          doc_count=7303,
                                          source_name='beer-sample')

        self.wait_for_fts_indexing_complete(fts_index, 7303)

        self._create_user(user, 'beer-sample')

        username = self.users[user]['username']
        password = self.users[user]['password']
        query = "select meta().id from `beer-sample` use index (using fts, using gsi) where state = \"California\""

        master_result = self.run_cbq_query(query=query,
                                           server=self.master,
                                           username=username,
                                           password=password)
        self.assertEquals(master_result['status'], 'success',
                          username + " query run failed on non-fts node")

        self.cbcluster.delete_all_fts_indexes()

    def test_rbac_flex_not_granted_n1ql(self):
        self._load_test_buckets(create_index=False)
        user = self.input.param("user", '')
        if user == '':
            raise Exception(
                "Invalid test configuration! User name should not be empty.")

        self.cbcluster = CouchbaseCluster(name='cluster',
                                          nodes=self.servers,
                                          log=self.log)
        fts_index = self.create_fts_index(name="idx_beer_sample_fts",
                                          doc_count=7303,
                                          source_name='beer-sample')
        self.wait_for_fts_indexing_complete(fts_index, 7303)

        self._create_user(user, 'beer-sample')

        username = self.users[user]['username']
        password = self.users[user]['password']
        query = "select meta().id from `beer-sample` use index (using fts, using gsi) where state = \"California\""

        try:
            self.run_cbq_query(query=query,
                               server=self.master,
                               username=username,
                               password=password)
            self.fail("Could able to run query without n1ql permissions")
        except CBQError as e:
            self.log.info(str(e))
            if not "User does not have credentials to run SELECT queries" in str(
                    e):
                self.fail(
                    "Failed to run query with other CBQ issues: {0}".format(
                        str(e)))
        except Exception as e:
            self.fail("Failed to run query with other issues: {0}".format(
                str(e)))

        self.cbcluster.delete_all_fts_indexes()
Exemple #12
0
class ClusterOpsLargeMetaKV(QueryTests):

    users = {}

    def suite_setUp(self):
        super(ClusterOpsLargeMetaKV, self).suite_setUp()

    def runTest(self):
        pass

    def setUp(self):
        super(ClusterOpsLargeMetaKV, self).setUp()

        self.log.info(
            "==============  ClusterOpsLargeMetaKV setuAp has started =============="
        )
        self.log_config_info()
        self.dataset = self.input.param("dataset", "emp")
        self.custom_map = self.input.param("custom_map", False)
        self.bucket_name = self.input.param("bucket_name", 'default')
        self.rebalance_in = self.input.param("rebalance_in", False)
        self.failover = self.input.param("failover", False)
        self.rebalance_out = self.input.param("rebalance_out", False)
        self.swap_rebalance = self.input.param("swap_rebalance", False)
        self.node_service_in = self.input.param("node_service_in", None)
        self.node_service_out = self.input.param("node_service_out", None)
        self.graceful_failover = self.input.param("graceful_failover", True)
        self.num_fts_partitions = self.input.param("num_fts_partitions", 6)
        self.num_fts_replica = self.input.param("num_fts_replica", 0)
        self.num_gsi_indexes = self.input.param("num_gsi_indexes", 200)
        self.num_fts_indexes = self.input.param("num_fts_indexes", 30)
        self.index_ram = self.input.param('index_ram', 512)
        self.index_nodes = self.get_nodes_from_services_map(
            service_type="index", get_all_nodes=True)
        self.n1ql_nodes = self.get_nodes_from_services_map(service_type="n1ql",
                                                           get_all_nodes=True)
        for index_node in self.index_nodes:
            rest = RestConnection(index_node)
            rest.set_index_settings({"queryport.client.usePlanner": False})
            rest.set_service_memoryQuota(service='indexMemoryQuota',
                                         memoryQuota=self.index_ram)
        self.cbcluster = CouchbaseCluster(name='cluster',
                                          nodes=self.servers,
                                          log=self.log)
        self.log.info(
            "==============  ClusterOpsLargeMetaKV setup has completed =============="
        )

    def tearDown(self):
        self.log.info(
            "==============  ClusterOpsLargeMetaKV tearDown has started =============="
        )
        self.log_config_info()
        self.log.info(
            "==============  ClusterOpsLargeMetaKV tearDown has completed =============="
        )
        super(ClusterOpsLargeMetaKV, self).tearDown()

    def suite_tearDown(self):
        self.log.info(
            "==============  ClusterOpsLargeMetaKV suite_tearDown has started =============="
        )
        self.log_config_info()
        self.log.info(
            "==============  ClusterOpsLargeMetaKV suite_tearDown has completed =============="
        )
        super(ClusterOpsLargeMetaKV, self).suite_tearDown()

    def create_drop_simple_gsi_replica_indexes(self,
                                               num_gsi_indexes,
                                               index_prefix_num=0,
                                               drop_indexes=False):
        num_indexes_per_bucket = int(num_gsi_indexes / len(self.buckets))
        for bucket in self.buckets:
            for i in range(index_prefix_num,
                           num_indexes_per_bucket + index_prefix_num):
                self.run_cbq_query(
                    "create index gsi_index{0} on {1}(name) "
                    "with {{'defer_build': true,'num_replica': 1}}".format(
                        i, bucket))
            if drop_indexes:
                for i in range(index_prefix_num,
                               num_indexes_per_bucket + index_prefix_num):
                    self.run_cbq_query("drop index {1}.gsi_index{0}".format(
                        i, bucket))

    def build_indexes(self):
        for bucket in self.buckets:
            query = "build index on {0} (( select raw name from system:indexes where `keyspace_id` = '{0}' and state = 'deferred'))".format(
                bucket)

            try:
                self.run_cbq_query(query=query, server=self.n1ql_nodes[0])
            except Exception as err:
                self.fail('{0} failed with {1}'.format(str(query), str(err)))

    def create_fts_indexes(self,
                           num_fts_indexes,
                           index_prefix_num=0,
                           drop_indexes=False):
        plan_params = {}
        if self.num_fts_partitions:
            plan_params["indexPartitions"] = self.num_fts_partitions
        if self.num_fts_replica:
            plan_params["numReplicas"] = self.num_fts_replica

        sourceParams = {"feedAllotment": "1:n"}

        num_indexes_per_bucket = int(num_fts_indexes / len(self.buckets))
        for bucket in self.buckets:
            for i in range(index_prefix_num,
                           num_indexes_per_bucket + index_prefix_num):
                try:
                    self.create_fts_index(name=bucket.name + "_custom_index" +
                                          str(i),
                                          source_name=bucket.name,
                                          plan_params=plan_params,
                                          doc_count=self.num_items,
                                          cbcluster=self.cbcluster,
                                          source_params=sourceParams)
                except AssertionError as err:
                    self.log.info(str(err))

            if drop_indexes:
                for i in range(index_prefix_num,
                               num_indexes_per_bucket + index_prefix_num):
                    self.cbcluster.delete_fts_index(bucket.name +
                                                    "_custom_index" + str(i))
                self.sleep(300)

            # ======================== tests =====================================================

    def test_clusterops_large_metakv(self):

        self.sleep(10)
        self._load_emp_dataset_on_all_buckets(end=self.num_items)

        self.create_drop_simple_gsi_replica_indexes(self.num_gsi_indexes)

        self.build_indexes()
        self.create_fts_indexes(self.num_fts_indexes)

        self.create_drop_simple_gsi_replica_indexes(400,
                                                    index_prefix_num=100,
                                                    drop_indexes=True)

        self.create_fts_indexes(50, index_prefix_num=100, drop_indexes=True)

        if self.rebalance_in:

            self.log.info("Now rebalancing in")

            try:
                self.cluster.rebalance(self.servers[:self.nodes_init],
                                       [self.servers[self.nodes_init]], [],
                                       services=[self.node_service_in])
            except Exception as e:
                self.fail("Rebalance in failed with {0}".format(str(e)))

        elif self.failover:
            self.log.info("Now failover node")
            self.node_out = self.get_nodes_from_services_map(
                service_type=self.node_service_out, get_all_nodes=True)
            try:
                self.cluster.failover(self.servers[:self.nodes_init],
                                      [self.node_out[0]],
                                      graceful=self.graceful_failover)
            except Exception as e:
                self.fail("node failover failed with {0}".format(str(e)))
        elif self.rebalance_out:
            self.node_out = self.get_nodes_from_services_map(
                service_type=self.node_service_out, get_all_nodes=True)
            self.log.info("Now rebalancing out")
            try:
                self.cluster.rebalance(self.servers[:self.nodes_init], [],
                                       [self.node_out[0]])
            except Exception as e:
                self.fail("Rebalance out failed with {0}".format(str(e)))

        elif self.swap_rebalance:
            self.node_out = self.get_nodes_from_services_map(
                service_type=self.node_service_out, get_all_nodes=True)
            self.log.info("Now swap rebalance")

            try:
                self.cluster.rebalance(self.servers[:self.nodes_init],
                                       [self.servers[self.nodes_init]],
                                       [self.node_out[0]],
                                       services=[self.node_service_in])
            except Exception as e:
                self.fail("Rebalance in failed with {0}".format(str(e)))
Exemple #13
0
class N1qlFTSSanityTest(QueryTests):

    def suite_setUp(self):
        super(N1qlFTSSanityTest, self).suite_setUp()


    def setUp(self):
        super(N1qlFTSSanityTest, self).setUp()

        self.log.info("==============  N1qlFTSSanityTest setup has started ==============")
        self.log_config_info()
        self.log.info("==============  N1qlFTSSanityTest setup has completed ==============")

    def tearDown(self):
        self.log.info("==============  N1qlFTSSanityTest tearDown has started ==============")
        self.log_config_info()
        self.log.info("==============  N1qlFTSSanityTest tearDown has completed ==============")
        super(N1qlFTSSanityTest, self).tearDown()

    def suite_tearDown(self):
        self.log.info("==============  N1qlFTSSanityTest suite_tearDown has started ==============")
        self.log_config_info()
        self.log.info("==============  N1qlFTSSanityTest suite_tearDown has completed ==============")
        super(N1qlFTSSanityTest, self).suite_tearDown()


    def test_n1ql_syntax_select_from_let(self):
        self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
        rest = self.get_rest_client(self.servers[0].rest_username, self.servers[0].rest_password)
        self._create_fts_index(index_name="idx_default_fts", doc_count=98784, source_name='default')
        self.drop_index_safe(bucket_name="default", index_name="#primary", is_primary=True)

        self.scan_consistency="NOT_BOUNDED"
        n1ql_query = "select meta().id from default let res=true where search(default, {\"query\": {\"field\": \"email\", \"match\":\"'9'\"}, \"size\":10000})=res"
        fts_request = {"query": {"field": "email", "match": "'9'"}, "size": 10000}
        n1ql_results = self.run_cbq_query(n1ql_query)['results']
        total_hits, hits, took, status = rest.run_fts_query(index_name="idx_default_fts",
                                                        query_json=fts_request)
        comparison_results = self._compare_n1ql_results_against_fts(n1ql_results, hits)
        self.assertEqual(comparison_results, "OK", comparison_results)
        self.log.info("n1ql+fts integration sanity test is passed. Results against n1ql query equal to fts service call results.")
        self.log.info("n1ql results: "+str(n1ql_results))

        explain_result = self.run_cbq_query("explain "+n1ql_query)
        self.assertTrue("idx_default_fts" in str(explain_result), "FTS index is not used!")
        self.log.info("n1ql+fts integration sanity test is passed. FTS index usage is found in execution plan.")
        self._remove_all_fts_indexes()
        self.scan_consistency="REQUEST_PLUS"

    def _create_fts_index(self, index_name='', doc_count=0, source_name=''):
        fts_index_type = self.input.param("fts_index_type", "scorch")

        fts_index = self.cbcluster.create_fts_index(name=index_name, source_name=source_name)
        if fts_index_type == 'upside_down':
            fts_index.update_index_to_upside_down()
        else:
            fts_index.update_index_to_scorch()
        indexed_doc_count = 0
        while indexed_doc_count < doc_count:
            try:
                indexed_doc_count = fts_index.get_indexed_doc_count()
            except KeyError as k:
                continue

        return fts_index

    def _compare_n1ql_results_against_fts(self, n1ql_results, fts_results):
        n1ql_doc_ids = []
        for result in n1ql_results:
            n1ql_doc_ids.append(result['id'])
        hits = fts_results
        fts_doc_ids = []
        for hit in hits:
            fts_doc_ids.append(hit['id'])

        if len(n1ql_doc_ids) != len(fts_doc_ids):
            return "Results count does not match for test . FTS - " + str(len(fts_doc_ids)) + ", N1QL - " + str(len(n1ql_doc_ids))
        if sorted(fts_doc_ids) != sorted(n1ql_doc_ids):
            return "Found mismatch in results for test ."
        return "OK"

    def _remove_all_fts_indexes(self):
        indexes = self.cbcluster.get_indexes()
        rest = self.get_rest_client(self.servers[0].rest_username, self.servers[0].rest_password)
        for index in indexes:
            rest.delete_fts_index(index.name)

    def get_rest_client(self, user, password):
        rest = RestConnection(self.cbcluster.get_random_fts_node())
        rest.username = user
        rest.password = password
        return rest
Exemple #14
0
class N1qlFTSIntegrationPhase2ClusteropsTest(QueryTests):

    def suite_setUp(self):
        super(N1qlFTSIntegrationPhase2ClusteropsTest, self).suite_setUp()


    def setUp(self):
        super(N1qlFTSIntegrationPhase2ClusteropsTest, self).setUp()

        self.log.info("==============  N1qlFTSIntegrationPhase2ClusteropsTest setup has started ==============")
        self.log_config_info()
        self.log.info("==============  N1qlFTSIntegrationPhase2ClusteropsTest setup has completed ==============")

    def tearDown(self):
        self.log.info("==============  N1qlFTSIntegrationPhase2ClusteropsTest tearDown has started ==============")
        self.log_config_info()
        self.log.info("==============  N1qlFTSIntegrationPhase2ClusteropsTest tearDown has completed ==============")
        super(N1qlFTSIntegrationPhase2ClusteropsTest, self).tearDown()


    def suite_tearDown(self):
        self.log.info("==============  N1qlFTSIntegrationPhase2ClusteropsTest suite_tearDown has started ==============")
        self.log_config_info()
        self.log.info("==============  N1qlFTSIntegrationPhase2ClusteropsTest suite_tearDown has completed ==============")
        super(N1qlFTSIntegrationPhase2ClusteropsTest, self).suite_tearDown()


    def get_rest_client(self, user, password):
        rest = RestConnection(self.cbcluster.get_random_fts_node())
        rest.username = user
        rest.password = password
        return rest

    def test_cluster_config_stable(self):
        self.load_test_buckets()
        self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
        rest = self.get_rest_client(self.servers[0].rest_username, self.servers[0].rest_password)

        self._create_fts_index(index_name="idx_beer_sample_fts", doc_count=7303, source_name='beer-sample')
        n1ql_node = self.find_child_node_with_service("n1ql")
        if n1ql_node is None:
            self.log("Cannot find n1ql child node!")
        fts_node = self.find_child_node_with_service("fts")
        if fts_node is None:
            self.log("Cannot find fts child node!")

        n1ql_query = "select meta().id from `beer-sample` where search(`beer-sample`, {\"query\":{\"field\":\"state\", \"match\":\"California\"}, \"size\":1000})"
        fts_request = {"query":{"field":"state", "match":"California"}, "size":1000}
        n1ql_results = self.run_cbq_query(n1ql_query, server=n1ql_node)['results']
        n1ql_doc_ids = []
        for result in n1ql_results:
            n1ql_doc_ids.append(result['id'])

        total_hits, hits, took, status = \
            rest.run_fts_query(index_name="idx_beer_sample_fts",
                               query_json = fts_request)

        fts_doc_ids = []
        for hit in hits:
            fts_doc_ids.append(hit['id'])

        self.assertEqual(len(n1ql_doc_ids), len(fts_doc_ids),
                          "Results count does not match for test . FTS - " + str(
                              len(fts_doc_ids)) + ", N1QL - " + str(len(n1ql_doc_ids)))
        self.assertEqual(sorted(fts_doc_ids), sorted(n1ql_doc_ids),
                          "Found mismatch in results for test .")

        self.remove_all_fts_indexes()

    def test_cluster_replicas_failover_rebalance(self):
        self.load_test_buckets()
        self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
        fts_idx = self._create_fts_index(index_name="idx_beer_sample_fts", doc_count=7303, source_name='beer-sample')
        number_of_replicas = self.input.param("num_replicas", 0)
        self._update_replica_for_fts_index(fts_idx, number_of_replicas)
        self.sleep(60)
        n1ql_query = "select meta().id from `beer-sample` where search(`beer-sample`, {\"query\":{\"field\":\"state\", \"match\":\"California\"}, \"size\":10000})"
        n1ql_results = self.run_cbq_query(n1ql_query)['results']
        n1ql_doc_ids_before_failover = []
        for result in n1ql_results:
            n1ql_doc_ids_before_failover.append(result['id'])

        self.cluster.failover(servers=self.servers, failover_nodes=[self.servers[2]], graceful=False)
        rebalance = self.cluster.rebalance(self.servers, [], [self.servers[2]])
        self.assertEqual(rebalance, True, "Rebalance is failed.")
        n1ql_results = self.run_cbq_query(n1ql_query)['results']
        n1ql_doc_ids_after_rebalance = []
        for result in n1ql_results:
            n1ql_doc_ids_after_rebalance.append(result['id'])

        self.assertEqual(sorted(n1ql_doc_ids_before_failover), sorted(n1ql_doc_ids_after_rebalance), "Results after rebalance does not match.")

    def test_fts_node_failover_partial_results(self):
        self.load_test_buckets()
        self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
        fts_idx = self._create_fts_index(index_name="idx_beer_sample_fts", doc_count=7303, source_name='beer-sample')

        n1ql_query = "select meta().id from `beer-sample` where search(`beer-sample`, {\"query\":{\"field\":\"state\", \"match\":\"California\"}, \"size\":10000})"
        n1ql_results_before_failover = self.run_cbq_query(n1ql_query)['results']
        n1ql_doc_ids_before_failover = []
        for result in n1ql_results_before_failover:
            n1ql_doc_ids_before_failover.append(result['id'])

        self.cluster.failover(servers=self.servers, failover_nodes=[self.servers[2]], graceful=False)
        error_found = False
        try:
            n1ql_result_after_failover = self.run_cbq_query(n1ql_query)
        except CBQError as err:
            self.assertTrue("pindex not available" in str(err), "Partial results error message is not graceful.")
            error_found = True
        self.assertEqual(error_found, True, "Partial result set is not allowed for SEARCH() queries.")


    def test_cluster_add_new_fts_node(self):
        self.load_test_buckets()
        self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
        fts_idx = self._create_fts_index(index_name="idx_beer_sample_fts", doc_count=7303, source_name='beer-sample')
        number_of_replicas = self.input.param("num_replicas", 0)
        self._update_replica_for_fts_index(fts_idx, number_of_replicas)
        self.sleep(60)
        n1ql_query = "select meta().id from `beer-sample` where search(`beer-sample`, {\"query\":{\"field\":\"state\", \"match\":\"California\"}, \"size\":10000})"
        n1ql_results = self.run_cbq_query(n1ql_query)['results']
        n1ql_doc_ids_before_rebalance = []
        for result in n1ql_results:
            n1ql_doc_ids_before_rebalance.append(result['id'])

        self.cluster.rebalance(self.servers, [self.servers[4]], [], services=["fts"])

        n1ql_results = self.run_cbq_query(n1ql_query)['results']
        n1ql_doc_ids_after_rebalance = []
        for result in n1ql_results:
            n1ql_doc_ids_after_rebalance.append(result['id'])

        self.assertEqual(sorted(n1ql_doc_ids_before_rebalance), sorted(n1ql_doc_ids_after_rebalance), "Results after rebalance does not match.")

    def test_partitioning(self):
        partitions_number = self.input.param("partitions_num")
        self.load_test_buckets()
        self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
        fts_idx = self._create_fts_index(index_name="idx_beer_sample_fts", doc_count=7303, source_name='beer-sample')
        n1ql_query = "select meta().id from `beer-sample` where search(`beer-sample`, {\"query\":{\"field\":\"state\", \"match\":\"California\"}, \"size\":10000})"
        default_results = self.run_cbq_query(n1ql_query)
        self._update_partiotions_for_fts_index(fts_idx, partitions_number)
        self.sleep(60)
        new_partitioning_result = self.run_cbq_query(n1ql_query)

        n1ql_doc_ids_before_partitioning = []
        for result in default_results['results']:
            n1ql_doc_ids_before_partitioning.append(result['id'])

        n1ql_doc_ids_after_partitioning = []
        for result in new_partitioning_result['results']:
            n1ql_doc_ids_after_partitioning.append(result['id'])

        self.assertEqual(sorted(n1ql_doc_ids_before_partitioning), sorted(n1ql_doc_ids_after_partitioning), "Results after partitioning do not match.")


    def _update_replica_for_fts_index(self, idx, replicas):
        idx.update_num_replicas(replicas)

    def _update_partiotions_for_fts_index(self, idx, partitions_num):
        idx.update_num_pindexes(partitions_num)

    def remove_all_fts_indexes(self):
        indexes = self.cbcluster.get_indexes()
        rest = self.get_rest_client(self.servers[0].rest_username, self.servers[0].rest_password)
        for index in indexes:
            rest.delete_fts_index(index.name)

    def find_child_node_with_service(self, service=""):
        services_map = self._get_services_map()
        for node in list(services_map.keys()):
            if node == (str(self.servers[0].ip)+":"+str(self.servers[0].port)):
                continue
            if service in services_map[node]:
                for server in self.servers:
                    if (str(server.ip)+":"+str(server.port)) == node:
                        return server
        return None

    def load_test_buckets(self):
        self.rest.load_sample("beer-sample")
        self.wait_for_buckets_status({"beer-sample": "healthy"}, 5, 120)
        self.wait_for_bucket_docs({"beer-sample": 7303}, 5, 120)

    def _create_fts_index(self, index_name='', doc_count=0, source_name=''):
        fts_index = self.cbcluster.create_fts_index(name=index_name, source_name=source_name)
        rest = self.get_rest_client(self.servers[0].rest_username, self.servers[0].rest_password)
        indexed_doc_count = fts_index.get_indexed_doc_count(rest)
        #indexed_doc_count = rest.get_fts_stats(index_name, source_name, "doc_count")
        while indexed_doc_count < doc_count:
            try:
                indexed_doc_count = fts_index.get_indexed_doc_count(rest)
                #indexed_doc_count = rest.get_fts_stats(index_name, source_name, "doc_count")
            except KeyError as k:
                continue

        return fts_index

    def _get_services_map(self):
        rest = RestConnection(self.servers[0])
        return rest.get_nodes_services()