Ejemplo n.º 1
0
    def setUp(self):
        super(TravelSampleApp, self).setUp()

        self.log_setup_status("TravelSampleApp", "started")
        self.step_num = 1
        self.app_iteration = self.input.param("iteration", 1)

        self.rbac_util = RbacUtil()
        self.sdk_clients = global_vars.sdk_clients

        if not self.skip_setup_cleanup:
            # Rebalance_in required nodes
            nodes_init = self.cluster.servers[1:self.nodes_init] \
                if self.nodes_init != 1 else []
            self.task.rebalance([self.cluster.master], nodes_init, [])
            self.cluster.nodes_in_cluster.extend([self.cluster.master] +
                                                 nodes_init)

            # Load travel_sample bucket
            status = self.bucket_util.load_sample_bucket(TravelSample())
            if status is False:
                self.fail("Failed to load sample bucket")

            self.bucket = self.bucket_util.buckets[0]
            self.bucket_util.update_bucket_property(
                self.bucket, ram_quota_mb=self.bucket_size)

            # Create required scope/collections
            self.create_scope_collections()
            self.bucket.scopes[CbServer.default_scope].collections[
                CbServer.default_collection] \
                .num_items = TravelSample().stats.expected_item_count
            self.sleep(20, "Wait for num_items to get updated")
            self.bucket_util.validate_docs_per_collections_all_buckets()

            # Create RBAC users
            self.create_rbac_users()

            #  Opening required clients
            self.create_sdk_clients()

            # Loading initial data into created collections
            self.load_initial_collection_data()
            self.bucket_util.validate_docs_per_collections_all_buckets()

            # Create required indexes
            self.create_indexes()
        else:
            self.bucket = self.bucket_util.buckets[0]
            self.map_collection_data()

            #  Opening required clients
            self.create_sdk_clients()

        global_vars.app_current_date = query_util.CommonUtil.get_current_date()
        self.log_setup_status("TravelSampleApp", "complete")
Ejemplo n.º 2
0
    def setUp(self, add_default_cbas_node=True):
        self.input = TestInputSingleton.input

        if "default_bucket" not in self.input.test_params:
            self.input.test_params.update({"default_bucket": False})

        if "set_cbas_memory_from_available_free_memory" not in \
                self.input.test_params:
            self.input.test_params.update(
                {"set_cbas_memory_from_available_free_memory": True})

        super(CBASDDLTests, self).setUp(add_default_cbas_node)

        self.validate_error = False
        if self.expected_error:
            self.validate_error = True
        ''' Considering all the scenarios where:
        1. There can be 1 KV and multiple cbas nodes
           (and tests wants to add all cbas into cluster.)
        2. There can be 1 KV and multiple cbas nodes
           (and tests wants only 1 cbas node)
        3. There can be only 1 node running KV, CBAS service.
        NOTE: Cases pending where there are nodes which are running only cbas.
              For that service check on nodes is needed.
        '''
        self.sample_bucket = TravelSample()
        result = self.bucket_util.load_sample_bucket(self.sample_bucket)
        self.assertTrue(result, msg="Load sample bucket failed")
Ejemplo n.º 3
0
    def test_change_global_scrape_timeout(self):
        """
        Change global scrape timeout and verify the prometheus config by querying Prometheus Federation
        (Positive test case as a valid scrape_timeout is always less than scrape_interval)
        """
        scrape_timeout = self.input.param("scrape_timeout", 5)
        self.bucket_util.load_sample_bucket(TravelSample())
        self.bucket_util.load_sample_bucket(BeerSample())

        self.log.info("Changing scrape interval to {0}".format(scrape_timeout))
        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "scrape_timeout", scrape_timeout)

        self.log.info("Validating by querying prometheus")
        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "prometheus_auth_enabled", "false")
        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "listen_addr_type", "any")
        self.sleep(10, "Waiting for prometheus federation")
        query = "status/config"
        yaml = YAML()
        for server in self.cluster.servers[:self.nodes_init]:
            content = StatsHelper(server).query_prometheus_federation(query)
            yaml_code = yaml.load(content["data"]["yaml"])
            global_scrape_timeout = yaml_code["global"]["scrape_timeout"]
            if str(global_scrape_timeout) != (str(scrape_timeout) + "s"):
                self.fail("Expected scrape timeout {0}, but Actual {1}".format(
                    scrape_timeout, global_scrape_timeout))
Ejemplo n.º 4
0
    def setUp(self):
        super(EnforceTls, self).setUp()
        self.sample_urls_map = \
            {"http://%s:8091/nodes/self": "https://%s:18091/nodes/self",
             "http://%s:9102/api/v1/stats": "https://%s:19102/api/v1/stats",
             "http://%s:8093/admin/clusters": "https://%s:18093/admin/clusters",
             "http://%s:8094/api/cfg": "https://%s:18094/api/cfg",
             "http://%s:8096/api/v1/functions": "https://%s:18096/api/v1/functions",
             "http://%s:8095/analytics/node/agg/stats/remaining":
                 "https://%s:18095/analytics/node/agg/stats/remaining",
             "http://%s:8097/api/v1/config": "https://%s:18097/api/v1/config"}

        self.log.info("Disabling AF on all nodes before beginning the test")
        for node in self.cluster.servers:
            status = RestConnection(node)\
                .update_autofailover_settings(False, 120)
            self.assertTrue(status)
        self.log.info("Changing security settings to trust all CAs")
        trust_all_certs()
        self.bucket_util.load_sample_bucket(self.cluster, TravelSample())
        shell = RemoteMachineShellConnection(self.cluster.master)
        self.curl_path = "/opt/couchbase/bin/curl"
        if shell.extract_remote_info().distribution_type == "windows":
            self.curl_path = "C:/Program Files/Couchbase/Server/bin/curl"
        shell.disconnect()
Ejemplo n.º 5
0
 def test_check_get_all_metrics(self):
     """
     Test /metrics endpoint. Validate for duplicity and prefix
     """
     self.bucket_util.load_sample_bucket(TravelSample())
     self.bucket_util.load_sample_bucket(BeerSample())
     for server in self.cluster.servers[:self.nodes_init]:
         content = StatsHelper(server).get_all_metrics()
         StatsHelper(server)._validate_metrics(content)
     for line in content:
         print(line.strip("\n"))
Ejemplo n.º 6
0
    def test_check_high_cardinality_metrics(self):
        """
        Check if _prometheusMetrics returns high cardinality metrics by default
        ie; High cardinality metrics are collected by default
        Also serves as a check if prometheus is running on all nodes
        """
        component = self.input.param("component", "kv")
        parse = self.input.param("parse", False)

        self.bucket_util.load_sample_bucket(TravelSample())
        self.bucket_util.load_sample_bucket(BeerSample())
        for server in self.cluster.servers[:self.nodes_init]:
            content = StatsHelper(server).get_prometheus_metrics_high(
                component=component, parse=parse)
            if not parse:
                StatsHelper(server)._validate_metrics(content)
        for line in content:
            print(line.strip("\n"))
Ejemplo n.º 7
0
    def test_disable_external_prometheus_high_cardinality_metrics(self):
        """
        Disable exposition of high cardinality metrics by ns-server's /metrics endpoint
        Validate by checking that there are no high cardinality metrics returned at
        /metrics endpoint ie; check if
        total number low cardinality metrics = total number of metrics at /metrics endpoint
        """
        self.bucket_util.load_sample_bucket(self.cluster, TravelSample())
        self.bucket_util.load_sample_bucket(self.cluster, BeerSample())

        self.log.info(
            "Disabling external prometheus high cardinality metrics of all services"
        )
        metrics_data = '{"statsExport":{"analytics":{"highCardEnabled":false}, "clusterManager":{"highCardEnabled":false},\
                "data":{"highCardEnabled":false}, "eventing":{"highCardEnabled":false}, \
                "fullTextSearch":{"highCardEnabled":false}, "index":{"highCardEnabled":false}}}'

        StatsHelper(self.cluster.master).configure_stats_settings_from_api(
            metrics_data)
        for server in self.cluster.servers[:self.nodes_init]:
            len_low_cardinality_metrics = 0
            content = StatsHelper(server).get_prometheus_metrics(
                component="ns_server", parse=False)
            self.log.info("lc count of ns_server on {0} is {1}".format(
                server.ip, len(content)))
            len_low_cardinality_metrics = len_low_cardinality_metrics + len(
                content)
            server_services = self.get_services_from_node(server)
            for component in server_services:
                content = StatsHelper(server).get_prometheus_metrics(
                    component=component, parse=False)
                self.log.info("lc count of {2} on {0} is {1}".format(
                    server.ip, len(content), component))
                len_low_cardinality_metrics = len_low_cardinality_metrics + len(
                    content)
            self.sleep(20, "Wait before fetching metrics")
            content = StatsHelper(server).get_all_metrics()
            len_metrics = len(content)
            if len_metrics != len_low_cardinality_metrics:
                self.fail(
                    "Number mismatch on node {0} , Total lc metrics count {1}, Total metrics count {2}"
                    .format(server.ip, len_low_cardinality_metrics,
                            len_metrics))
Ejemplo n.º 8
0
    def test_disable_external_prometheus_high_cardinality_metrics(self):
        """
        Disable exposition of high cardinality metrics by ns-server's /metrics endpoint
        Validate by checking that there are no high cardinality metrics returned at
        /metrics endpoint ie; check if
        total number low cardinality metrics = total number of metrics at /metrics endpoint
        """
        self.bucket_util.load_sample_bucket(TravelSample())
        self.bucket_util.load_sample_bucket(BeerSample())

        self.log.info(
            "Disabling external prometheus high cardinality metrics of all services"
        )
        value = "[{index,[{high_cardinality_enabled,false}]}, {fts,[{high_cardinality_enabled,false}]},\
                         {kv,[{high_cardinality_enabled,false}]}, {cbas,[{high_cardinality_enabled,false}]}, \
                         {eventing,[{high_cardinality_enabled,false}]}]"

        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "external_prometheus_services", value)
        for server in self.cluster.servers[:self.nodes_init]:
            len_low_cardinality_metrics = 0
            content = StatsHelper(server).get_prometheus_metrics(
                component="ns_server", parse=False)
            self.log.info("lc count of ns_server on {0} is {1}".format(
                server.ip, len(content)))
            len_low_cardinality_metrics = len_low_cardinality_metrics + len(
                content)
            server_services = self.get_services_from_node(server)
            for component in server_services:
                content = StatsHelper(server).get_prometheus_metrics(
                    component=component, parse=False)
                self.log.info("lc count of {2} on {0} is {1}".format(
                    server.ip, len(content), component))
                len_low_cardinality_metrics = len_low_cardinality_metrics + len(
                    content)
            content = StatsHelper(server).get_all_metrics()
            len_metrics = len(content)
            if len_metrics != len_low_cardinality_metrics:
                self.fail(
                    "Number mismatch on node {0} , Total lc metrics count {1}, Total metrics count {2}"
                    .format(server.ip, len_low_cardinality_metrics,
                            len_metrics))
Ejemplo n.º 9
0
    def test_change_global_scrape_interval(self):
        """
        1. Change global scrape interval via diag eval
        2. verify the prometheus config by querying Prometheus Federation
        3. Reset the global scrape interval back to default via rest api
        4. verify the prometheus config by querying Prometheus Federation
        """
        def verify_prometheus_config(expected_scrape_interval):
            self.log.info("Validating by querying prometheus")
            StatsHelper(self.cluster.master).configure_stats_settings_from_diag_eval\
                ("prometheus_auth_enabled", "false")
            StatsHelper(self.cluster.master).configure_stats_settings_from_diag_eval\
                ("listen_addr_type", "any")
            self.sleep(10, "Waiting for prometheus federation")
            for server in self.cluster.servers[:self.nodes_init]:
                content = StatsHelper(server).query_prometheus_federation(
                    query)
                yaml_code = yaml.load(content["data"]["yaml"])
                global_scrape_interval = yaml_code["global"]["scrape_interval"]
                if str(global_scrape_interval) != (
                        str(expected_scrape_interval) + "s"):
                    self.fail(
                        "Expected scrape interval {0}, but Actual {1}".format(
                            expected_scrape_interval, global_scrape_interval))

        scrape_interval = self.input.param("scrape_interval", 15)
        query = "status/config"
        yaml = YAML()
        self.bucket_util.load_sample_bucket(self.cluster, TravelSample())
        self.bucket_util.load_sample_bucket(self.cluster, BeerSample())

        self.log.info("Changing scrape interval to {0} via diag_eval".format(
            scrape_interval))
        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "scrape_interval", scrape_interval)
        verify_prometheus_config(expected_scrape_interval=scrape_interval)

        self.log.info("Changing scrape interval to 10s via rest api")
        settings = StatsHelper(self.cluster.master).change_scrape_interval(10)
        verify_prometheus_config(expected_scrape_interval=10)
Ejemplo n.º 10
0
 def test_prometheus_and_ns_server_stats_after_failure_scenarios(self):
     """
     Run all metrics before and after failure scenarios and validate
     both ns_server and prometheus stats
     """
     self.bucket_util.load_sample_bucket(self.cluster, TravelSample())
     target_node = self.servers[0]
     remote = RemoteMachineShellConnection(target_node)
     error_sim = CouchbaseError(self.log, remote)
     self.log.info("Before failure")
     self.get_all_metrics(self.components, self.parse, self.metric_name)
     try:
         # Induce the error condition
         error_sim.create(self.simulate_error)
         self.sleep(20, "Wait before reverting the error condition")
     finally:
         # Revert the simulated error condition and close the ssh session
         error_sim.revert(self.simulate_error)
         remote.disconnect()
     self.log.info("After failure")
     self.get_all_metrics(self.components, self.parse, self.metric_name)
Ejemplo n.º 11
0
 def test_prometheus_and_ns_server_stats_after_crash_scenarios(self):
     """
     Run all metrics before and after crash and validate
     both ns_server and prometheus stats
     """
     self.bucket_util.load_sample_bucket(self.cluster, TravelSample())
     target_node = self.servers[0]
     remote = RemoteMachineShellConnection(target_node)
     error_sim = CouchbaseError(self.log, remote)
     self.log.info("Before failure")
     self.get_all_metrics(self.components, self.parse, self.metric_name)
     try:
         self.log.info("Killing {0} on node {1}".format(
             self.process_name, target_node.ip))
         remote.kill_process(self.process_name,
                             self.service_name,
                             signum=signum[self.sig_type])
         self.sleep(20, "Wait for the process to come backup")
     finally:
         remote.disconnect()
     self.log.info("After failure")
     self.get_all_metrics(self.components, self.parse, self.metric_name)
Ejemplo n.º 12
0
    def setUp(self, add_default_cbas_node=True):
        super(CBASFunctionalTests, self).setUp(add_default_cbas_node)
        sample_bucket = TravelSample()

        if "default_bucket" not in self.input.test_params:
            self.input.test_params.update({"default_bucket": False})

        self.validate_error = False
        if self.expected_error:
            self.validate_error = True

        ''' Considering all the scenarios where:
        1. There can be 1 KV and multiple cbas nodes
           (and tests wants to add all cbas into cluster.)
        2. There can be 1 KV and multiple cbas nodes
           (and tests wants only 1 cbas node)
        3. There can be only 1 node running KV,CBAS service.
        NOTE: Cases pending where there are nodes which are running only cbas.
              For that service check on nodes is needed.
        '''
        result = self.bucket_util.load_sample_bucket(sample_bucket)
        self.assertTrue(result, msg="Failed to load '%s'" % sample_bucket)
Ejemplo n.º 13
0
    def test_disable_high_cardinality_metrics(self):
        """
        Disable Prometheus from scraping high cardinality metrics
        Validate by querying Prometheus directly for its active targets
        """
        self.bucket_util.load_sample_bucket(TravelSample())
        self.bucket_util.load_sample_bucket(BeerSample())

        self.log.info("Disabling high cardinality metrics of all services")
        value = "[{index,[{high_cardinality_enabled,false}]}, {fts,[{high_cardinality_enabled,false}]},\
                 {kv,[{high_cardinality_enabled,false}]}, {cbas,[{high_cardinality_enabled,false}]}, \
                 {eventing,[{high_cardinality_enabled,false}]}]"

        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "services", value)

        self.log.info("Validating by querying prometheus")
        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "prometheus_auth_enabled", "false")
        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "listen_addr_type", "any")
        self.sleep(10, "Waiting for prometheus federation")
        query = "targets?state=active"
        for server in self.cluster.servers[:self.nodes_init]:
            content = StatsHelper(server).query_prometheus_federation(query)
            active_targets = content["data"]["activeTargets"]
            if len(active_targets) == 0:
                self.fail("Prometheus did not return any active targets")
            for active_targets_dict in active_targets:
                job = active_targets_dict["labels"]["job"]
                self.log.info("Job name {0}".format(job))
                if "high_cardinality" in job:
                    self.fail(
                        "Prometheus is still scraping target with job name {0} on {1}"
                        .format(job, server.ip))
Ejemplo n.º 14
0
    def test_disable_high_cardinality_metrics(self):
        """
        Disable Prometheus from scraping high cardinality metrics
        Validate by querying Prometheus directly for its active targets
        """
        self.bucket_util.load_sample_bucket(self.cluster, TravelSample())
        self.bucket_util.load_sample_bucket(self.cluster, BeerSample())

        self.log.info("Disabling high cardinality metrics of all services")
        metrics_data = '{"services":{"analytics":{"highCardEnabled":false}, "clusterManager":{"highCardEnabled":false},\
        "data":{"highCardEnabled":false}, "eventing":{"highCardEnabled":false}, \
        "fullTextSearch":{"highCardEnabled":false}, "index":{"highCardEnabled":false}}}'

        StatsHelper(self.cluster.master).configure_stats_settings_from_api(
            metrics_data)

        self.log.info("Validating by querying prometheus")
        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "prometheus_auth_enabled", "false")
        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "listen_addr_type", "any")
        self.sleep(10, "Waiting for prometheus federation")
        query = "targets?state=active"
        for server in self.cluster.servers[:self.nodes_init]:
            content = StatsHelper(server).query_prometheus_federation(query)
            active_targets = content["data"]["activeTargets"]
            if len(active_targets) == 0:
                self.fail("Prometheus did not return any active targets")
            for active_targets_dict in active_targets:
                job = active_targets_dict["labels"]["job"]
                self.log.info("Job name {0}".format(job))
                if "high_cardinality" in job:
                    self.fail(
                        "Prometheus is still scraping target with job name {0} on {1}"
                        .format(job, server.ip))
Ejemplo n.º 15
0
    def setUp(self, add_default_cbas_node=True):
        super(CBASBaseTest, self).setUp()
        if self._testMethodDoc:
            self.log.info("Starting Test: %s - %s" %
                          (self._testMethodName, self._testMethodDoc))
        else:
            self.log.info("Starting Test: %s" % self._testMethodName)
        invalid_ip = '10.111.151.109'
        self.cb_bucket_name = self.input.param('cb_bucket_name',
                                               'travel-sample')
        self.cbas_bucket_name = self.input.param('cbas_bucket_name', 'travel')
        self.cb_bucket_password = self.input.param('cb_bucket_password', None)
        self.cb_server_ip = self.input.param("cb_server_ip", None)
        self.cb_server_ip = \
            self.cb_server_ip.replace('INVALID_IP', invalid_ip) \
            if self.cb_server_ip is not None else None
        self.cbas_dataset_name = self.input.param("cbas_dataset_name",
                                                  'travel_ds')
        self.cbas_bucket_name_invalid = \
            self.input.param('cbas_bucket_name_invalid', self.cbas_bucket_name)
        self.cbas_dataset2_name = self.input.param('cbas_dataset2_name', None)
        self.skip_create_dataset = self.input.param('skip_create_dataset',
                                                    False)
        self.disconnect_if_connected = \
            self.input.param('disconnect_if_connected', False)
        self.cbas_dataset_name_invalid = \
            self.input.param('cbas_dataset_name_invalid',
                             self.cbas_dataset_name)
        self.skip_drop_connection = self.input.param('skip_drop_connection',
                                                     False)
        self.skip_drop_dataset = self.input.param('skip_drop_dataset', False)
        self.query_id = self.input.param('query_id', None)
        self.mode = self.input.param('mode', None)
        self.num_concurrent_queries = self.input.param('num_queries', 5000)
        self.concurrent_batch_size = self.input.param('concurrent_batch_size',
                                                      100)
        self.compiler_param = self.input.param('compiler_param', None)
        self.compiler_param_val = self.input.param('compiler_param_val', None)
        self.expect_reject = self.input.param('expect_reject', False)
        self.expect_failure = self.input.param('expect_failure', False)
        self.compress_dataset = self.input.param('compress_dataset', False)
        self.index_name = self.input.param('index_name', "NoName")
        self.index_fields = self.input.param('index_fields', None)
        if self.index_fields:
            self.index_fields = self.index_fields.split("-")
        self.retry_time = self.input.param("retry_time", 300)
        self.num_retries = self.input.param("num_retries", 1)
        self.sample_bucket_dict = {
            TravelSample().name: TravelSample(),
            BeerSample().name: BeerSample()
        }
        self.sample_bucket = None
        self.flush_enabled = Bucket.FlushBucket.ENABLED
        self.test_abort_snapshot = self.input.param("test_abort_snapshot",
                                                    False)
        self.cbas_spec_name = self.input.param("cbas_spec", None)

        self._cb_cluster = self.get_clusters()

        self.expected_error = self.input.param("error", None)

        self.bucket_spec = self.input.param("bucket_spec", None)
        self.doc_spec_name = self.input.param("doc_spec_name", "initial_load")
        self.set_cbas_memory_from_available_free_memory = self.input.param(
            'set_cbas_memory_from_available_free_memory', False)
        self.parallel_load_percent = int(
            self.input.param("parallel_load_percent", 0))
        self.cbas_kill_count = self.input.param("cbas_kill_count", 0)
        self.memcached_kill_count = self.input.param("memcached_kill_count", 0)
        self.tamper_links_count = self.input.param("tamper_links_count", 0)
        self.cbas_node = None
        self.cbas_memory_quota_percent = int(
            self.input.param("cbas_memory_quota_percent", 100))
        self.bucket_size = self.input.param("bucket_size", 100)
        services = None
        nodes_init = None
        # Single cluster support
        if len(self._cb_cluster) == 1:
            self._cb_cluster = self._cb_cluster[0]
            self.cluster.nodes_in_cluster.extend([self.cluster.master])
            if self.services_init and self.nodes_init >= 3:
                if len(self.cluster.servers) < self.nodes_init or \
                        len(self.services_init.split("-")) != self.nodes_init:
                    self.fail("Configuration error. Re-check nodes_init, "
                              "services_init in .conf file and servers "
                              "available in .ini "
                              "file")
                services = list()
                for service in self.services_init.split(
                        "-")[1:self.nodes_init]:
                    services.append(service.replace(":", ","))
                # Initialize cluster using given nodes
                nodes_init = list(
                    filter(lambda node: node.ip != self.cluster.master.ip,
                           self.cluster.servers[1:self.nodes_init]))
                for node, services_init in map(None, nodes_init, services):
                    if services_init is None:
                        services.append("kv")
                    if not self.cbas_node and "cbas" in services_init:
                        self.cbas_node = node
                        self.cbas_node.services = services_init
                    idx = self.cluster.servers.index(node)
                    self.cluster.servers[idx].services = services_init
            for server in self.cluster.servers:
                if "cbas" in server.services:
                    self.cluster.cbas_nodes.append(server)
                if "kv" in server.services:
                    self.cluster.kv_nodes.append(server)
                rest = RestConnection(server)
                rest.set_data_path(data_path=server.data_path,
                                   index_path=server.index_path,
                                   cbas_path=server.cbas_path)
            if self.expected_error:
                self.expected_error = \
                    self.expected_error.replace("INVALID_IP", invalid_ip)
                self.expected_error = \
                    self.expected_error.replace("PORT",
                                                self.cluster.master.port)
            self.otpNodes = []
            self.cbas_path = server.cbas_path
            self.rest = RestConnection(self.cluster.master)
            if not self.set_cbas_memory_from_available_free_memory:
                self.log.info(
                    "Setting the min possible memory quota so that adding "
                    "more nodes to the cluster wouldn't be a problem.")
                self.rest.set_service_mem_quota({
                    CbServer.Settings.KV_MEM_QUOTA:
                    MIN_KV_QUOTA,
                    CbServer.Settings.FTS_MEM_QUOTA:
                    FTS_QUOTA,
                    CbServer.Settings.INDEX_MEM_QUOTA:
                    INDEX_QUOTA
                })
                self.set_cbas_memory_from_available_free_memory = \
                    self.input.param(
                        'set_cbas_memory_from_available_free_memory', False)

                self.log.info("Setting %d memory quota for CBAS" % CBAS_QUOTA)
                self.cbas_memory_quota = CBAS_QUOTA
                self.rest.set_service_mem_quota(
                    {CbServer.Settings.CBAS_MEM_QUOTA: CBAS_QUOTA})
            if self.expected_error:
                self.expected_error = \
                    self.expected_error.replace("INVALID_IP", invalid_ip)
                self.expected_error = \
                    self.expected_error.replace("PORT",
                                                self.cluster.master.port)
            self.cbas_util = None
            if self.cluster.cbas_nodes:
                if not self.cbas_node:
                    available_cbas_nodes = list(
                        filter(lambda node: node.ip != self.cluster.master.ip,
                               self.cluster.cbas_nodes))
                    self.cbas_node = available_cbas_nodes[0]
                if self.set_cbas_memory_from_available_free_memory:
                    self.set_memory_for_services(self.rest, self.cluster_util,
                                                 self.cbas_node,
                                                 self.cbas_node.services)
                self.cbas_util = CbasUtil(self.cluster.master, self.cbas_node)
                self.cbas_util_v2 = CbasUtilV2(self.cluster.master,
                                               self.cbas_node, self.task)
                if "cbas" in self.cluster.master.services:
                    self.cleanup_cbas()
                if add_default_cbas_node:
                    if self.cluster.master.ip != self.cbas_node.ip:
                        self.otpNodes.append(
                            self.cluster_util.add_node(self.cbas_node))
                        self.cluster.nodes_in_cluster.append(self.cbas_node)
                        if nodes_init:
                            idx = nodes_init.index(self.cbas_node)
                            services.pop(idx)
                            nodes_init.remove(self.cbas_node)
                    else:
                        self.otpNodes = self.rest.node_statuses()
                    ''' This cbas cleanup is actually not needed.
                        When a node is added to the cluster, 
                        it is automatically cleaned-up.'''
                    self.cleanup_cbas()
                    self.cluster.cbas_nodes.remove(self.cbas_node)
            if nodes_init:
                self.task.rebalance([self.cluster.master],
                                    nodes_init, [],
                                    services=services)
                self.cluster.nodes_in_cluster.extend(nodes_init)
            if self.bucket_spec is not None:
                try:
                    self.collectionSetUp(self.cluster, self.bucket_util,
                                         self.cluster_util)
                except Java_base_exception as exception:
                    self.handle_collection_setup_exception(exception)
                except Exception as exception:
                    self.handle_collection_setup_exception(exception)
            else:
                if self.default_bucket:
                    self.bucket_util.create_default_bucket(
                        self.cluster,
                        bucket_type=self.bucket_type,
                        ram_quota=self.bucket_size,
                        replica=self.num_replicas,
                        conflict_resolution=self.
                        bucket_conflict_resolution_type,
                        replica_index=self.bucket_replica_index,
                        storage=self.bucket_storage,
                        eviction_policy=self.bucket_eviction_policy,
                        flush_enabled=self.flush_enabled)
                elif self.cb_bucket_name in self.sample_bucket_dict.keys():
                    self.sample_bucket = \
                        self.sample_bucket_dict[self.cb_bucket_name]

        elif len(self._cb_cluster) > 1:
            # Multi Cluster Support
            for cluster in self._cb_cluster:
                for server in cluster.servers:
                    if CbServer.Services.CBAS in server.services:
                        cluster.cbas_nodes.append(server)
                    if CbServer.Services.KV in server.services:
                        cluster.kv_nodes.append(server)
                    rest = RestConnection(server)
                    rest.set_data_path(data_path=server.data_path,
                                       index_path=server.index_path,
                                       cbas_path=server.cbas_path)

                if self.expected_error:
                    cluster.expected_error = \
                        self.expected_error.replace("INVALID_IP", invalid_ip)
                    cluster.expected_error = \
                        self.expected_error.replace("PORT",
                                                    cluster.master.port)

                cluster.otpNodes = list()
                cluster.cbas_path = server.cbas_path

                cluster.rest = RestConnection(cluster.master)

                if not self.set_cbas_memory_from_available_free_memory:
                    self.log.info(
                        "Setting the min possible memory quota so that adding "
                        "more nodes to the cluster wouldn't be a problem.")
                    cluster.rest.set_service_mem_quota({
                        CbServer.Settings.KV_MEM_QUOTA:
                        MIN_KV_QUOTA,
                        CbServer.Settings.FTS_MEM_QUOTA:
                        FTS_QUOTA,
                        CbServer.Settings.INDEX_MEM_QUOTA:
                        INDEX_QUOTA
                    })
                    cluster.set_cbas_memory_from_available_free_memory = \
                        self.input.param(
                            'set_cbas_memory_from_available_free_memory', False)

                    self.log.info("Setting %d memory quota for CBAS" %
                                  CBAS_QUOTA)
                    cluster.cbas_memory_quota = CBAS_QUOTA
                    cluster.rest.set_service_mem_quota(
                        {CbServer.Settings.CBAS_MEM_QUOTA: CBAS_QUOTA})

                cluster.cbas_util = None
                # Drop any existing buckets and datasets
                if cluster.cbas_nodes:
                    cluster.cbas_node = cluster.cbas_nodes[0]
                    if self.set_cbas_memory_from_available_free_memory:
                        self.set_memory_for_services(
                            cluster.rest, cluster.cluster_util,
                            cluster.cbas_node, cluster.cbas_node.services)
                    cluster.cbas_util = CbasUtil(cluster.master,
                                                 cluster.cbas_node, self.task)
                    cluster.cbas_util_v2 = CbasUtilV2(cluster.master,
                                                      cluster.cbas_node)
                    if "cbas" in cluster.master.services:
                        self.cleanup_cbas(cluster.cbas_util)
                    if add_default_cbas_node:
                        if cluster.master.ip != cluster.cbas_node.ip:
                            cluster.otpNodes.append(
                                cluster.cluster_util.add_node(
                                    cluster, cluster.cbas_node))
                        else:
                            cluster.otpNodes = cluster.rest.node_statuses()
                        """
                        This cbas cleanup is actually not needed.
                        When a node is added to the cluster,
                        it is automatically cleaned-up.
                        """
                        self.cleanup_cbas(cluster.cbas_util)
                        cluster.cbas_nodes.remove(cluster.cbas_node)
                if self.bucket_spec is not None:
                    try:
                        self.collectionSetUp(cluster, cluster.bucket_util,
                                             cluster.cluster_util)
                    except Java_base_exception as exception:
                        self.handle_collection_setup_exception(exception)
                    except Exception as exception:
                        self.handle_collection_setup_exception(exception)
                else:
                    if self.default_bucket:
                        cluster.bucket_util.create_default_bucket(
                            self.cluster,
                            bucket_type=self.bucket_type,
                            ram_quota=self.bucket_size,
                            replica=self.num_replicas,
                            conflict_resolution=self.
                            bucket_conflict_resolution_type,
                            replica_index=self.bucket_replica_index,
                            storage=self.bucket_storage,
                            eviction_policy=self.bucket_eviction_policy,
                            flush_enabled=self.flush_enabled)
                    elif self.cb_bucket_name in self.sample_bucket_dict.keys():
                        self.sample_bucket = self.sample_bucket_dict[
                            self.cb_bucket_name]

                cluster.bucket_util.add_rbac_user(self.cluster.master)

        else:
            self.fail("No cluster is available")
        self.log.info(
            "=== CBAS_BASE setup was finished for test #{0} {1} ===".format(
                self.case_number, self._testMethodName))
Ejemplo n.º 16
0
    def __setup_buckets(self):
        self.cluster.buckets = self.bucket_util.get_all_buckets(self.cluster)
        for bucket in self.bucket_conf["buckets"]:
            bucket_obj = None
            # Skip bucket creation if already exists in cluster
            # Note: Useful while running instances for multi-tenant case
            for existing_bucket in self.cluster.buckets:
                if existing_bucket.name == bucket["name"]:
                    bucket_obj = existing_bucket
                    break
            if bucket_obj is None:
                if bucket["sample_bucket"] is True:
                    if bucket["name"] == "travel-sample":
                        s_bucket = TravelSample()
                    elif bucket["name"] == "beer-sample":
                        s_bucket = BeerSample()
                    elif bucket["name"] == "gamesim-sample":
                        s_bucket = GamesimSample()
                    else:
                        self.fail("Invalid sample bucket '%s'" %
                                  bucket["name"])

                    if self.bucket_util.load_sample_bucket(
                            self.cluster, s_bucket) is False:
                        self.fail("Failed to load sample bucket")
                    if Bucket.ramQuotaMB in bucket:
                        BucketHelper(self.cluster.master).change_bucket_props(
                            self.cluster.buckets[-1],
                            ramQuotaMB=bucket[Bucket.ramQuotaMB])
                else:
                    self.bucket_util.create_default_bucket(
                        cluster=self.cluster,
                        bucket_name=bucket["name"],
                        bucket_type=bucket.get(Bucket.bucketType,
                                               Bucket.Type.MEMBASE),
                        ram_quota=bucket.get(Bucket.ramQuotaMB, None),
                        replica=bucket.get(Bucket.replicaNumber,
                                           Bucket.ReplicaNum.ONE),
                        maxTTL=bucket.get(Bucket.maxTTL, 0),
                        storage=bucket.get(Bucket.storageBackend,
                                           Bucket.StorageBackend.couchstore),
                        eviction_policy=bucket.get(
                            Bucket.evictionPolicy,
                            Bucket.EvictionPolicy.VALUE_ONLY),
                        bucket_durability=bucket.get(
                            Bucket.durabilityMinLevel,
                            Bucket.DurabilityLevel.NONE))

                bucket_obj = self.cluster.buckets[-1]

            self.map_collection_data(bucket_obj)
            self.__print_step("Creating required scope/collections")
            for scope in bucket["scopes"]:
                if scope["name"] in bucket_obj.scopes.keys():
                    self.log.debug("Scope %s already exists for bucket %s" %
                                   (scope["name"], bucket_obj.name))
                else:
                    self.bucket_util.create_scope(self.cluster.master,
                                                  bucket_obj, scope)
                    bucket_obj.stats.increment_manifest_uid()
                for collection in scope["collections"]:
                    if collection["name"] in \
                            bucket_obj.scopes[scope["name"]].collections:
                        self.log.debug("Collection %s :: %s exists" %
                                       (scope["name"], collection["name"]))
                    else:
                        self.bucket_util.create_collection(
                            self.cluster.master, bucket_obj, scope["name"],
                            collection)
                        bucket_obj.stats.increment_manifest_uid()

            # Create RBAC users
            for t_bucket in self.rbac_conf["rbac_roles"]:
                if t_bucket["bucket"] == bucket["name"]:
                    self.create_rbac_users("rbac_admin", "rbac_admin",
                                           t_bucket["roles"])
                    break
Ejemplo n.º 17
0
    def setUp(self):
        super(TravelSampleApp, self).setUp()

        self.cluster_conf = self.input.param("cluster_conf", None)
        self.bucket_conf = self.input.param("bucket_conf", None)
        self.service_conf = self.input.param("service_conf", None)

        self.log_setup_status("TravelSampleApp", "started")
        self.step_num = 1
        self.app_iteration = self.input.param("iteration", 1)

        self.rbac_util = RbacUtil()
        self.sdk_clients = global_vars.sdk_clients
        self.config_path = "pytests/bucket_collections/app/config/"

        with open(self.config_path + "cluster.yaml", "r") as fp:
            self.cluster_config = YAML().load(fp.read())

        # Override nodes_init, services_init from yaml data
        self.nodes_init = self.cluster_config["cb_cluster"]["nodes_init"]
        self.services_init = self.cluster_config["cb_cluster"]["services"]

        if not self.skip_setup_cleanup:
            # Rebalance_in required nodes
            nodes_init = self.cluster.servers[1:self.nodes_init] \
                if self.nodes_init != 1 else []
            self.task.rebalance([self.cluster.master], nodes_init, [],
                                services=self.services_init[1:])
            self.cluster.nodes_in_cluster.extend(
                [self.cluster.master] + nodes_init)

            # Load travel_sample bucket
            ts_bucket = TravelSample()
            if self.bucket_util.load_sample_bucket(ts_bucket) is False:
                self.fail("Failed to load sample bucket")

            self.bucket = self.bucket_util.buckets[0]
            self.bucket_util.update_bucket_property(
                self.bucket, ram_quota_mb=self.bucket_size)

            # Create required scope/collections
            self.create_scope_collections()
            self.bucket.scopes[
                CbServer.default_scope].collections[
                CbServer.default_collection].num_items \
                = ts_bucket.scopes[CbServer.default_scope].collections[
                    CbServer.default_collection].num_items
            self.sleep(20, "Wait for num_items to get updated")
            self.bucket_util.validate_docs_per_collections_all_buckets()

            # Create RBAC users
            self.create_rbac_users()

            #  Opening required clients
            self.create_sdk_clients()

            # Loading initial data into created collections
            self.load_initial_collection_data()
            self.bucket_util.validate_docs_per_collections_all_buckets()

            # Configure backup settings
            self.configure_bucket_backups()

            # Create required GSIs
            self.create_indexes()

            # Create required CBAS data-sets
            self.create_cbas_indexes()
        else:
            self.bucket = self.bucket_util.buckets[0]
            self.map_collection_data()

            #  Opening required clients
            self.create_sdk_clients()

        global_vars.app_current_date = query_util.CommonUtil.get_current_date()
        self.log_setup_status("TravelSampleApp", "complete")
Ejemplo n.º 18
0
    def test_rest_api_authorization_cbas_cluster_info_api(self):
        validation_failed = False

        self.bucket_util.load_sample_bucket(self.cluster, TravelSample())
        self.bucket_util.load_sample_bucket(self.cluster, BeerSample())

        api_authentication = [
            {
                "api_url":
                "http://{0}:8095/analytics/cluster".format(self.cbas_node.ip),
                "roles": [{
                    "role": "ro_admin",
                    "expected_status": 200
                }, {
                    "role": "cluster_admin",
                    "expected_status": 200
                }, {
                    "role": "admin",
                    "expected_status": 200
                }, {
                    "role": "analytics_manager[*]",
                    "expected_status": 401
                }, {
                    "role": "analytics_reader",
                    "expected_status": 401
                }]
            },
            {
                "api_url":
                "http://{0}:8095/analytics/cluster/cc".format(
                    self.cbas_node.ip),
                "roles": [{
                    "role": "ro_admin",
                    "expected_status": 200
                }, {
                    "role": "cluster_admin",
                    "expected_status": 200
                }, {
                    "role": "admin",
                    "expected_status": 200
                }, {
                    "role": "analytics_manager[*]",
                    "expected_status": 401
                }, {
                    "role": "analytics_reader",
                    "expected_status": 401
                }]
            },
            {
                "api_url":
                "http://{0}:8095/analytics/diagnostics".format(
                    self.cbas_node.ip),
                "roles": [{
                    "role": "ro_admin",
                    "expected_status": 200
                }, {
                    "role": "cluster_admin",
                    "expected_status": 200
                }, {
                    "role": "admin",
                    "expected_status": 200
                }, {
                    "role": "analytics_manager[*]",
                    "expected_status": 401
                }, {
                    "role": "analytics_reader",
                    "expected_status": 401
                }]
            },
            {
                "api_url":
                "http://{0}:8095/analytics/node/diagnostics".format(
                    self.cbas_node.ip),
                "roles": [{
                    "role": "ro_admin",
                    "expected_status": 200
                }, {
                    "role": "cluster_admin",
                    "expected_status": 200
                }, {
                    "role": "admin",
                    "expected_status": 200
                }, {
                    "role": "analytics_manager[*]",
                    "expected_status": 401
                }, {
                    "role": "analytics_reader",
                    "expected_status": 401
                }]
            },
            {
                "api_url":
                "http://{0}:8095/analytics/cc/config".format(
                    self.cbas_node.ip),
                "roles": [{
                    "role": "ro_admin",
                    "expected_status": 401
                }, {
                    "role": "cluster_admin",
                    "expected_status": 200
                }, {
                    "role": "admin",
                    "expected_status": 200
                }, {
                    "role": "analytics_manager[*]",
                    "expected_status": 401
                }, {
                    "role": "analytics_reader",
                    "expected_status": 401
                }]
            },
            {
                "api_url":
                "http://{0}:8095/analytics/node/config".format(
                    self.cbas_node.ip),
                "roles": [{
                    "role": "ro_admin",
                    "expected_status": 401
                }, {
                    "role": "cluster_admin",
                    "expected_status": 200
                }, {
                    "role": "admin",
                    "expected_status": 200
                }, {
                    "role": "analytics_manager[*]",
                    "expected_status": 401
                }, {
                    "role": "analytics_reader",
                    "expected_status": 401
                }]
            },
            {
                "api_url":
                "http://{0}:9110/analytics/node/agg/stats/remaining".format(
                    self.cbas_node.ip),
                "roles": [{
                    "role": "analytics_manager[*]",
                    "expected_status": 200
                }, {
                    "role": "analytics_reader",
                    "expected_status": 200
                }],
            },
            {
                "api_url":
                "http://{0}:8095/analytics/backup?bucket=travel-sample".format(
                    self.cbas_node.ip),
                "roles": [
                    {
                        "role": "admin",
                        "expected_status": 200
                    },
                    {
                        "role": "data_backup[*],analytics_reader",
                        "expected_status": 200
                    },
                    {
                        "role": "data_backup[*], analytics_manager[*]",
                        "expected_status": 200
                    },
                    {
                        "role": "data_backup[travel-sample], analytics_reader",
                        "expected_status": 200
                    },
                    {
                        "role":
                        "data_backup[travel-sample], analytics_manager[travel-sample]",
                        "expected_status": 200
                    },
                    {
                        "role": "ro_admin",
                        "expected_status": 401
                    },
                    {
                        "role": "analytics_reader",
                        "expected_status": 401
                    },
                    {
                        "role": "analytics_manager[*]",
                        "expected_status": 401
                    },
                    {
                        "role": "data_backup[beer-sample], analytics_reader",
                        "expected_status": 401
                    },
                    {
                        "role":
                        "data_backup[beer-sample], analytics_manager[*]",
                        "expected_status": 401
                    },
                    {
                        "role":
                        "data_backup[beer-sample], analytics_manager[beer-sample]",
                        "expected_status": 401
                    },
                ],
            },
            {
                "api_url":
                "http://{0}:8095/analytics/cluster/restart".format(
                    self.cbas_node.ip),
                "roles": [{
                    "role": "cluster_admin",
                    "expected_status": 202
                }, {
                    "role": "admin",
                    "expected_status": 202
                }, {
                    "role": "analytics_manager[*]",
                    "expected_status": 401
                }, {
                    "role": "analytics_reader",
                    "expected_status": 401
                }],
                "method":
                "POST"
            },
        ]

        shell = RemoteMachineShellConnection(self.cluster.master)

        for api in api_authentication:
            for role in api["roles"]:
                self.rbac_util._create_user_and_grant_role(
                    "testuser", role["role"])
                self.sleep(5)

                if "method" in api:
                    output, error = shell.execute_command(
                        """curl -i {0} -X {1} -u {2}:{3} 2>/dev/null | head -n 1 | cut -d$' ' -f2"""
                        .format(api["api_url"], api["method"], "testuser",
                                "password"))
                    self.sleep(10)
                else:
                    output, error = shell.execute_command(
                        """curl -i {0} -u {1}:{2} 2>/dev/null | head -n 1 | cut -d$' ' -f2"""
                        .format(api["api_url"], "testuser", "password"))
                response = ""
                for line in output:
                    response = response + line
                response = json.loads(str(response))
                if response != role["expected_status"]:
                    self.log.info(
                        "Error accessing {0} as user with {1} role. Response = {2}"
                        .format(api["api_url"], role["role"], response))
                    validation_failed = True
                else:
                    self.log.info(
                        "Accessing {0} as user with {1} role worked as expected"
                        .format(api["api_url"], role["role"]))

                self.rbac_util._drop_user("testuser")

        shell.disconnect()

        self.assertFalse(
            validation_failed,
            "Authentication errors with some APIs. Check the test log above.")
Ejemplo n.º 19
0
    def setUp(self, add_default_cbas_node=True):
        super(CBASBaseTest, self).setUp()
        if self._testMethodDoc:
            self.log.info("Starting Test: %s - %s"
                          % (self._testMethodName, self._testMethodDoc))
        else:
            self.log.info("Starting Test: %s" % self._testMethodName)

        for server in self.cluster.servers:
            if "cbas" in server.services:
                self.cluster.cbas_nodes.append(server)
            if "kv" in server.services:
                self.cluster.kv_nodes.append(server)
            rest = RestConnection(server)
            rest.set_data_path(data_path=server.data_path,
                               index_path=server.index_path,
                               cbas_path=server.cbas_path)

        invalid_ip = '10.111.151.109'
        self._cb_cluster = self.task
        self.cb_bucket_name = self.input.param('cb_bucket_name',
                                               'travel-sample')
        self.sample_bucket_dict = {TravelSample().name: TravelSample(),
                                   BeerSample().name: BeerSample()}
        self.sample_bucket = None
        self.cbas_bucket_name = self.input.param('cbas_bucket_name', 'travel')
        self.cb_bucket_password = self.input.param('cb_bucket_password', None)
        self.expected_error = self.input.param("error", None)
        if self.expected_error:
            self.expected_error = self.expected_error.replace("INVALID_IP",
                                                              invalid_ip)
            self.expected_error = \
                self.expected_error.replace("PORT", self.cluster.master.port)
        self.cb_server_ip = self.input.param("cb_server_ip", None)
        self.cb_server_ip = \
            self.cb_server_ip.replace('INVALID_IP', invalid_ip) \
            if self.cb_server_ip is not None else None
        self.cbas_dataset_name = self.input.param("cbas_dataset_name",
                                                  'travel_ds')
        self.cbas_bucket_name_invalid = \
            self.input.param('cbas_bucket_name_invalid', self.cbas_bucket_name)
        self.cbas_dataset2_name = self.input.param('cbas_dataset2_name', None)
        self.skip_create_dataset = self.input.param('skip_create_dataset',
                                                    False)
        self.disconnect_if_connected = \
            self.input.param('disconnect_if_connected', False)
        self.cbas_dataset_name_invalid = \
            self.input.param('cbas_dataset_name_invalid',
                             self.cbas_dataset_name)
        self.skip_drop_connection = self.input.param('skip_drop_connection',
                                                     False)
        self.skip_drop_dataset = self.input.param('skip_drop_dataset', False)
        self.query_id = self.input.param('query_id', None)
        self.mode = self.input.param('mode', None)
        self.num_concurrent_queries = self.input.param('num_queries', 5000)
        self.concurrent_batch_size = self.input.param('concurrent_batch_size',
                                                      100)
        self.compiler_param = self.input.param('compiler_param', None)
        self.compiler_param_val = self.input.param('compiler_param_val', None)
        self.expect_reject = self.input.param('expect_reject', False)
        self.expect_failure = self.input.param('expect_failure', False)
        self.compress_dataset = self.input.param('compress_dataset', False)
        self.index_name = self.input.param('index_name', "NoName")
        self.index_fields = self.input.param('index_fields', None)
        self.retry_time = self.input.param("retry_time", 300)
        self.num_retries = self.input.param("num_retries", 1)
        self.flush_enabled = Bucket.FlushBucket.ENABLED
        self.test_abort_snapshot = self.input.param("test_abort_snapshot",
                                                    False)
        if self.index_fields:
            self.index_fields = self.index_fields.split("-")
        self.otpNodes = list()
        self.cbas_path = server.cbas_path

        self.rest = RestConnection(self.cluster.master)
        self.log.info("Setting the min possible memory quota so that adding "
                      "more nodes to the cluster wouldn't be a problem.")
        self.rest.set_service_memoryQuota(service='memoryQuota',
                                          memoryQuota=MIN_KV_QUOTA)
        self.rest.set_service_memoryQuota(service='ftsMemoryQuota',
                                          memoryQuota=FTS_QUOTA)
        self.rest.set_service_memoryQuota(service='indexMemoryQuota',
                                          memoryQuota=INDEX_QUOTA)

        self.set_cbas_memory_from_available_free_memory = \
            self.input.param('set_cbas_memory_from_available_free_memory',
                             False)
        if self.set_cbas_memory_from_available_free_memory:
            info = self.rest.get_nodes_self()
            self.cbas_memory_quota = int((info.memoryFree // 1024 ** 2) * 0.9)
            self.log.info("Setting %d memory quota for CBAS"
                          % self.cbas_memory_quota)
            self.rest.set_service_memoryQuota(
                service='cbasMemoryQuota',
                memoryQuota=self.cbas_memory_quota)
        else:
            self.log.info("Setting %d memory quota for CBAS" % CBAS_QUOTA)
            self.cbas_memory_quota = CBAS_QUOTA
            self.rest.set_service_memoryQuota(service='cbasMemoryQuota',
                                              memoryQuota=CBAS_QUOTA)

        self.cbas_util = None
        # Drop any existing buckets and datasets
        if self.cluster.cbas_nodes:
            self.cbas_node = self.cluster.cbas_nodes[0]
            self.cbas_util = CbasUtil(self.cluster.master, self.cbas_node,
                                      self.task)
            if "cbas" in self.cluster.master.services:
                self.cleanup_cbas()
            if add_default_cbas_node:
                if self.cluster.master.ip != self.cbas_node.ip:
                    self.otpNodes.append(
                        self.cluster_util.add_node(self.cbas_node))
                else:
                    self.otpNodes = self.rest.node_statuses()
                """
                This cbas cleanup is actually not needed.
                When a node is added to the cluster,
                it is automatically cleaned-up.
                """
                self.cleanup_cbas()
                self.cluster.cbas_nodes.remove(self.cbas_node)
        if self.default_bucket:
            self.bucket_util.create_default_bucket(
                bucket_type=self.bucket_type,
                ram_quota=self.bucket_size,
                replica=self.num_replicas,
                conflict_resolution=self.bucket_conflict_resolution_type,
                replica_index=self.bucket_replica_index,
                storage=self.bucket_storage,
                eviction_policy=self.bucket_eviction_policy,
                flush_enabled=self.flush_enabled)
        elif self.cb_bucket_name in self.sample_bucket_dict.keys():
            self.sample_bucket = self.sample_bucket_dict[self.cb_bucket_name]

        self.bucket_util.add_rbac_user()
        self.log.info("=== CBAS_BASE setup was finished for test #{0} {1} ==="
                      .format(self.case_number, self._testMethodName))