def test_rbac_flex(self):
        self._load_test_buckets(create_index=False)
        user = self.input.param("user", '')
        if user == '':
            raise Exception(
                "Invalid test configuration! User name should not be empty.")

        self.cbcluster = CouchbaseCluster(name='cluster',
                                          nodes=self.servers,
                                          log=self.log)
        fts_index = self.create_fts_index(name="idx_beer_sample_fts",
                                          doc_count=7303,
                                          source_name='beer-sample')

        self.wait_for_fts_indexing_complete(fts_index, 7303)

        self._create_user(user, 'beer-sample')

        username = self.users[user]['username']
        password = self.users[user]['password']
        query = "select meta().id from `beer-sample` use index (using fts, using gsi) where state = \"California\""

        master_result = self.run_cbq_query(query=query,
                                           server=self.master,
                                           username=username,
                                           password=password)
        self.assertEquals(master_result['status'], 'success',
                          username + " query run failed on non-fts node")

        self.cbcluster.delete_all_fts_indexes()
    def test_rbac_flex_not_granted_n1ql(self):
        self._load_test_buckets()
        user = self.input.param("user", '')
        if user == '':
            raise Exception("Invalid test configuration! User name should not be empty.")

        self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
        self.create_fts_index(name="idx_beer_sample_fts", doc_count=7303, source_name='beer-sample')
        self._create_user(user, 'beer-sample')

        username = self.users[user]['username']
        password = self.users[user]['password']
        query = "select meta().id from `beer-sample` use index (using fts, using gsi) where state = \"California\""

        try:
            self.run_cbq_query(query=query, server=self.master, username=username, password=password)
            self.fail("Could able to run query without n1ql permissions")
        except CBQError as e:
            self.log.info(str(e))
            if not "User does not have credentials to run SELECT queries" in str(e):
                self.fail("Failed to run query with other CBQ issues: {0}".format(str(e)))
        except Exception as e:
            self.fail("Failed to run query with other issues: {0}".format(str(e)))

        self.cbcluster.delete_all_fts_indexes()
Example #3
0
    def test_cluster_config_stable(self):
        self.load_test_buckets()
        self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
        rest = self.get_rest_client(self.servers[0].rest_username, self.servers[0].rest_password)

        self._create_fts_index(index_name="idx_beer_sample_fts", doc_count=7303, source_name='beer-sample')
        n1ql_node = self.find_child_node_with_service("n1ql")
        if n1ql_node is None:
            self.log("Cannot find n1ql child node!")
        fts_node = self.find_child_node_with_service("fts")
        if fts_node is None:
            self.log("Cannot find fts child node!")

        n1ql_query = "select meta().id from `beer-sample` where search(`beer-sample`, {\"query\":{\"field\":\"state\", \"match\":\"California\"}, \"size\":1000})"
        fts_request = {"query":{"field":"state", "match":"California"}, "size":1000}
        n1ql_results = self.run_cbq_query(n1ql_query, server=n1ql_node)['results']
        n1ql_doc_ids = []
        for result in n1ql_results:
            n1ql_doc_ids.append(result['id'])

        total_hits, hits, took, status = \
            rest.run_fts_query(index_name="idx_beer_sample_fts",
                               query_json = fts_request)

        fts_doc_ids = []
        for hit in hits:
            fts_doc_ids.append(hit['id'])

        self.assertEqual(len(n1ql_doc_ids), len(fts_doc_ids),
                          "Results count does not match for test . FTS - " + str(
                              len(fts_doc_ids)) + ", N1QL - " + str(len(n1ql_doc_ids)))
        self.assertEqual(sorted(fts_doc_ids), sorted(n1ql_doc_ids),
                          "Found mismatch in results for test .")

        self.remove_all_fts_indexes()
    def test_partitioning(self):
        partitions_number = self.input.param("partitions_num")
        self.load_test_buckets()
        self.cbcluster = CouchbaseCluster(name='cluster',
                                          nodes=self.servers,
                                          log=self.log)
        fts_idx = self._create_fts_index(index_name="idx_beer_sample_fts",
                                         doc_count=7303,
                                         source_name='beer-sample')
        n1ql_query = "select meta().id from `beer-sample` where search(`beer-sample`, {\"query\":{\"field\":\"state\", \"match\":\"California\"}, \"size\":10000})"
        default_results = self.run_cbq_query(n1ql_query)
        self._update_partiotions_for_fts_index(fts_idx, partitions_number)
        self.sleep(60)
        new_partitioning_result = self.run_cbq_query(n1ql_query)

        n1ql_doc_ids_before_partitioning = []
        for result in default_results['results']:
            n1ql_doc_ids_before_partitioning.append(result['id'])

        n1ql_doc_ids_after_partitioning = []
        for result in new_partitioning_result['results']:
            n1ql_doc_ids_after_partitioning.append(result['id'])

        self.assertEqual(sorted(n1ql_doc_ids_before_partitioning),
                         sorted(n1ql_doc_ids_after_partitioning),
                         "Results after partitioning do not match.")
    def test_cluster_add_new_fts_node(self):
        self.load_test_buckets()
        self.cbcluster = CouchbaseCluster(name='cluster',
                                          nodes=self.servers,
                                          log=self.log)
        fts_idx = self._create_fts_index(index_name="idx_beer_sample_fts",
                                         doc_count=7303,
                                         source_name='beer-sample')
        number_of_replicas = self.input.param("num_replicas", 0)
        self._update_replica_for_fts_index(fts_idx, number_of_replicas)
        self.sleep(60)
        n1ql_query = "select meta().id from `beer-sample` where search(`beer-sample`, {\"query\":{\"field\":\"state\", \"match\":\"California\"}, \"size\":10000})"
        n1ql_results = self.run_cbq_query(n1ql_query)['results']
        n1ql_doc_ids_before_rebalance = []
        for result in n1ql_results:
            n1ql_doc_ids_before_rebalance.append(result['id'])

        self.cluster.rebalance(self.servers, [self.servers[4]], [],
                               services=["fts"])

        n1ql_results = self.run_cbq_query(n1ql_query)['results']
        n1ql_doc_ids_after_rebalance = []
        for result in n1ql_results:
            n1ql_doc_ids_after_rebalance.append(result['id'])

        self.assertEqual(sorted(n1ql_doc_ids_before_rebalance),
                         sorted(n1ql_doc_ids_after_rebalance),
                         "Results after rebalance does not match.")
    def test_fts_node_failover_partial_results(self):
        self.load_test_buckets()
        self.cbcluster = CouchbaseCluster(name='cluster',
                                          nodes=self.servers,
                                          log=self.log)
        fts_idx = self._create_fts_index(index_name="idx_beer_sample_fts",
                                         doc_count=7303,
                                         source_name='beer-sample')
        self.run_cbq_query("drop index `beer-sample`.beer_primary")

        n1ql_query = "select meta().id from `beer-sample` where search(`beer-sample`, {\"query\":{\"field\":\"state\", \"match\":\"California\"}, \"size\":10000})"
        n1ql_results_before_failover = self.run_cbq_query(
            n1ql_query)['results']
        n1ql_doc_ids_before_failover = []
        for result in n1ql_results_before_failover:
            n1ql_doc_ids_before_failover.append(result['id'])

        self.cluster.failover(servers=self.servers,
                              failover_nodes=[self.servers[2]],
                              graceful=False)
        error_found = False
        try:
            n1ql_result_after_failover = self.run_cbq_query(n1ql_query)
        except CBQError as err:
            self.assertTrue("pindex not available" in str(err),
                            "Partial results error message is not graceful.")
            error_found = True
        self.assertEqual(
            error_found, True,
            "Partial result set is not allowed for SEARCH() queries.")
Example #7
0
    def test_cluster_replicas_failover_rebalance(self):
        self.load_test_buckets()
        self.cbcluster = CouchbaseCluster(name='cluster',
                                          nodes=self.servers,
                                          log=self.log)
        fts_idx = self._create_fts_index(index_name="idx_beer_sample_fts",
                                         doc_count=7303,
                                         source_name=self.sample_bucket)
        number_of_replicas = self.input.param("num_replicas", 0)
        self._update_replica_for_fts_index(fts_idx, number_of_replicas)
        self.sleep(60)
        n1ql_query = "select meta().id from `{0}` where search(`{0}`, {{\"query\":{{\"field\":\"state\"," \
                     " \"match\":\"California\"}}, \"size\":10000}})".format(self.sample_bucket)
        n1ql_results = self.run_cbq_query(n1ql_query)['results']
        n1ql_doc_ids_before_failover = []
        for result in n1ql_results:
            n1ql_doc_ids_before_failover.append(result['id'])

        self.cluster.failover(servers=self.servers,
                              failover_nodes=[self.servers[2]],
                              graceful=False)
        rebalance = self.cluster.rebalance(self.servers, [], [self.servers[2]])
        self.assertEqual(rebalance, True, "Rebalance is failed.")
        n1ql_results = self.run_cbq_query(n1ql_query)['results']
        n1ql_doc_ids_after_rebalance = []
        for result in n1ql_results:
            n1ql_doc_ids_after_rebalance.append(result['id'])

        self.assertEqual(sorted(n1ql_doc_ids_before_failover),
                         sorted(n1ql_doc_ids_after_rebalance),
                         "Results after rebalance does not match.")
    def setUp(self):
        super(N1qlFTSIntegrationPhase2ClusteropsTest, self).setUp()
        self.sample_bucket = 'beer-sample'
        self.query_buckets = self.get_query_buckets(sample_buckets=[self.sample_bucket])
        self.query_bucket = self.query_buckets[1]
        self.log.info("==============  N1qlFTSIntegrationPhase2ClusteropsTest setup has started ==============")
        self.log_config_info()
        cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
        fts_node = cbcluster.get_fts_nodes()
        RestConnection(fts_node[0]).set_fts_ram_quota(900)

        self.log.info("==============  N1qlFTSIntegrationPhase2ClusteropsTest setup has completed ==============")
    def test_n1ql_syntax_select_from_let(self):
        query_bucket = self.get_collection_name(self.default_bucket_name)
        self.cbcluster = CouchbaseCluster(name='cluster',
                                          nodes=self.servers,
                                          log=self.log)
        rest = self.get_rest_client(self.servers[0].rest_username,
                                    self.servers[0].rest_password)
        self._create_fts_index(index_name="idx_default_fts",
                               doc_count=98784,
                               source_name=query_bucket)
        self.drop_index_safe(bucket_name=self.default_bucket_name,
                             index_name="#primary",
                             is_primary=True)

        self.scan_consistency = "NOT_BOUNDED"
        n1ql_query = "select meta().id from {0} let res=true where search(default, {{\"query\": {{\"field\": " \
                     "\"email\", \"match\":\"'9'\"}}, \"size\":10000}})=res".format(query_bucket)
        fts_request = {
            "query": {
                "field": "email",
                "match": "'9'"
            },
            "size": 10000
        }
        n1ql_results = self.run_cbq_query(n1ql_query)['results']
        total_hits, hits, took, status = rest.run_fts_query(
            index_name="idx_default_fts", query_json=fts_request)
        comparison_results = self._compare_n1ql_results_against_fts(
            n1ql_results, hits)
        self.assertEqual(comparison_results, "OK", comparison_results)
        self.log.info(
            "n1ql+fts integration sanity test is passed. Results against n1ql query equal to fts service call results."
        )
        self.log.info("n1ql results: " + str(n1ql_results))

        explain_result = self.run_cbq_query("explain " + n1ql_query)
        self.assertTrue("idx_default_fts" in str(explain_result),
                        "FTS index is not used!")
        self.log.info(
            "n1ql+fts integration sanity test is passed. FTS index usage is found in execution plan."
        )
        self._remove_all_fts_indexes()
        self.scan_consistency = "REQUEST_PLUS"
Example #10
0
    def setUp(self):
        super(ClusterOpsLargeMetaKV, self).setUp()

        self.log.info(
            "==============  ClusterOpsLargeMetaKV setuAp has started =============="
        )
        self.log_config_info()
        self.dataset = self.input.param("dataset", "emp")
        self.custom_map = self.input.param("custom_map", False)
        self.bucket_name = self.input.param("bucket_name", 'default')
        self.rebalance_in = self.input.param("rebalance_in", False)
        self.failover = self.input.param("failover", False)
        self.rebalance_out = self.input.param("rebalance_out", False)
        self.swap_rebalance = self.input.param("swap_rebalance", False)
        self.node_service_in = self.input.param("node_service_in", None)
        self.node_service_out = self.input.param("node_service_out", None)
        self.graceful_failover = self.input.param("graceful_failover", True)
        self.num_fts_partitions = self.input.param("num_fts_partitions", 6)
        self.num_fts_replica = self.input.param("num_fts_replica", 0)
        self.num_gsi_indexes = self.input.param("num_gsi_indexes", 200)
        self.num_fts_indexes = self.input.param("num_fts_indexes", 30)
        self.index_ram = self.input.param('index_ram', 512)
        self.index_nodes = self.get_nodes_from_services_map(
            service_type="index", get_all_nodes=True)
        self.n1ql_nodes = self.get_nodes_from_services_map(service_type="n1ql",
                                                           get_all_nodes=True)
        for index_node in self.index_nodes:
            rest = RestConnection(index_node)
            rest.set_index_settings({"queryport.client.usePlanner": False})
            rest.set_service_memoryQuota(service='indexMemoryQuota',
                                         memoryQuota=self.index_ram)
        self.cbcluster = CouchbaseCluster(name='cluster',
                                          nodes=self.servers,
                                          log=self.log)
        self.log.info(
            "==============  ClusterOpsLargeMetaKV setup has completed =============="
        )