Example #1
0
    def test_change_global_scrape_timeout(self):
        """
        Change global scrape timeout and verify the prometheus config by querying Prometheus Federation
        (Positive test case as a valid scrape_timeout is always less than scrape_interval)
        """
        scrape_timeout = self.input.param("scrape_timeout", 5)
        self.bucket_util.load_sample_bucket(TravelSample())
        self.bucket_util.load_sample_bucket(BeerSample())

        self.log.info("Changing scrape interval to {0}".format(scrape_timeout))
        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "scrape_timeout", scrape_timeout)

        self.log.info("Validating by querying prometheus")
        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "prometheus_auth_enabled", "false")
        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "listen_addr_type", "any")
        self.sleep(10, "Waiting for prometheus federation")
        query = "status/config"
        yaml = YAML()
        for server in self.cluster.servers[:self.nodes_init]:
            content = StatsHelper(server).query_prometheus_federation(query)
            yaml_code = yaml.load(content["data"]["yaml"])
            global_scrape_timeout = yaml_code["global"]["scrape_timeout"]
            if str(global_scrape_timeout) != (str(scrape_timeout) + "s"):
                self.fail("Expected scrape timeout {0}, but Actual {1}".format(
                    scrape_timeout, global_scrape_timeout))
Example #2
0
 def test_check_get_all_metrics(self):
     """
     Test /metrics endpoint. Validate for duplicity and prefix
     """
     self.bucket_util.load_sample_bucket(TravelSample())
     self.bucket_util.load_sample_bucket(BeerSample())
     for server in self.cluster.servers[:self.nodes_init]:
         content = StatsHelper(server).get_all_metrics()
         StatsHelper(server)._validate_metrics(content)
     for line in content:
         print(line.strip("\n"))
Example #3
0
    def test_check_high_cardinality_metrics(self):
        """
        Check if _prometheusMetrics returns high cardinality metrics by default
        ie; High cardinality metrics are collected by default
        Also serves as a check if prometheus is running on all nodes
        """
        component = self.input.param("component", "kv")
        parse = self.input.param("parse", False)

        self.bucket_util.load_sample_bucket(TravelSample())
        self.bucket_util.load_sample_bucket(BeerSample())
        for server in self.cluster.servers[:self.nodes_init]:
            content = StatsHelper(server).get_prometheus_metrics_high(
                component=component, parse=parse)
            if not parse:
                StatsHelper(server)._validate_metrics(content)
        for line in content:
            print(line.strip("\n"))
Example #4
0
    def test_disable_external_prometheus_high_cardinality_metrics(self):
        """
        Disable exposition of high cardinality metrics by ns-server's /metrics endpoint
        Validate by checking that there are no high cardinality metrics returned at
        /metrics endpoint ie; check if
        total number low cardinality metrics = total number of metrics at /metrics endpoint
        """
        self.bucket_util.load_sample_bucket(TravelSample())
        self.bucket_util.load_sample_bucket(BeerSample())

        self.log.info(
            "Disabling external prometheus high cardinality metrics of all services"
        )
        value = "[{index,[{high_cardinality_enabled,false}]}, {fts,[{high_cardinality_enabled,false}]},\
                         {kv,[{high_cardinality_enabled,false}]}, {cbas,[{high_cardinality_enabled,false}]}, \
                         {eventing,[{high_cardinality_enabled,false}]}]"

        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "external_prometheus_services", value)
        for server in self.cluster.servers[:self.nodes_init]:
            len_low_cardinality_metrics = 0
            content = StatsHelper(server).get_prometheus_metrics(
                component="ns_server", parse=False)
            self.log.info("lc count of ns_server on {0} is {1}".format(
                server.ip, len(content)))
            len_low_cardinality_metrics = len_low_cardinality_metrics + len(
                content)
            server_services = self.get_services_from_node(server)
            for component in server_services:
                content = StatsHelper(server).get_prometheus_metrics(
                    component=component, parse=False)
                self.log.info("lc count of {2} on {0} is {1}".format(
                    server.ip, len(content), component))
                len_low_cardinality_metrics = len_low_cardinality_metrics + len(
                    content)
            content = StatsHelper(server).get_all_metrics()
            len_metrics = len(content)
            if len_metrics != len_low_cardinality_metrics:
                self.fail(
                    "Number mismatch on node {0} , Total lc metrics count {1}, Total metrics count {2}"
                    .format(server.ip, len_low_cardinality_metrics,
                            len_metrics))
Example #5
0
    def test_disable_external_prometheus_high_cardinality_metrics(self):
        """
        Disable exposition of high cardinality metrics by ns-server's /metrics endpoint
        Validate by checking that there are no high cardinality metrics returned at
        /metrics endpoint ie; check if
        total number low cardinality metrics = total number of metrics at /metrics endpoint
        """
        self.bucket_util.load_sample_bucket(self.cluster, TravelSample())
        self.bucket_util.load_sample_bucket(self.cluster, BeerSample())

        self.log.info(
            "Disabling external prometheus high cardinality metrics of all services"
        )
        metrics_data = '{"statsExport":{"analytics":{"highCardEnabled":false}, "clusterManager":{"highCardEnabled":false},\
                "data":{"highCardEnabled":false}, "eventing":{"highCardEnabled":false}, \
                "fullTextSearch":{"highCardEnabled":false}, "index":{"highCardEnabled":false}}}'

        StatsHelper(self.cluster.master).configure_stats_settings_from_api(
            metrics_data)
        for server in self.cluster.servers[:self.nodes_init]:
            len_low_cardinality_metrics = 0
            content = StatsHelper(server).get_prometheus_metrics(
                component="ns_server", parse=False)
            self.log.info("lc count of ns_server on {0} is {1}".format(
                server.ip, len(content)))
            len_low_cardinality_metrics = len_low_cardinality_metrics + len(
                content)
            server_services = self.get_services_from_node(server)
            for component in server_services:
                content = StatsHelper(server).get_prometheus_metrics(
                    component=component, parse=False)
                self.log.info("lc count of {2} on {0} is {1}".format(
                    server.ip, len(content), component))
                len_low_cardinality_metrics = len_low_cardinality_metrics + len(
                    content)
            self.sleep(20, "Wait before fetching metrics")
            content = StatsHelper(server).get_all_metrics()
            len_metrics = len(content)
            if len_metrics != len_low_cardinality_metrics:
                self.fail(
                    "Number mismatch on node {0} , Total lc metrics count {1}, Total metrics count {2}"
                    .format(server.ip, len_low_cardinality_metrics,
                            len_metrics))
Example #6
0
    def test_create_dataset_on_connected_link(self):
        beer_sample = BeerSample()
        result = self.bucket_util.load_sample_bucket(beer_sample)
        self.assertTrue(result, "Bucket Creation Failed.")

        result = self.cbas_util.create_dataset_on_bucket(
            cbas_bucket_name=self.sample_bucket.name, cbas_dataset_name="ds2")
        self.assertTrue(result, "Dataset Creation Failed.")

        self.cbas_util.connect_link()

        result = self.cbas_util.create_dataset_on_bucket(
            cbas_bucket_name=beer_sample.name, cbas_dataset_name="ds1")
        self.assertTrue(result, "Dataset Creation Failed.")

        self.cbas_util.connect_link()
        self.assertTrue(
            self.cbas_util.validate_cbas_dataset_items_count(
                "ds1", beer_sample.stats.expected_item_count),
            "Data loss in CBAS.")
Example #7
0
    def test_change_global_scrape_interval(self):
        """
        1. Change global scrape interval via diag eval
        2. verify the prometheus config by querying Prometheus Federation
        3. Reset the global scrape interval back to default via rest api
        4. verify the prometheus config by querying Prometheus Federation
        """
        def verify_prometheus_config(expected_scrape_interval):
            self.log.info("Validating by querying prometheus")
            StatsHelper(self.cluster.master).configure_stats_settings_from_diag_eval\
                ("prometheus_auth_enabled", "false")
            StatsHelper(self.cluster.master).configure_stats_settings_from_diag_eval\
                ("listen_addr_type", "any")
            self.sleep(10, "Waiting for prometheus federation")
            for server in self.cluster.servers[:self.nodes_init]:
                content = StatsHelper(server).query_prometheus_federation(
                    query)
                yaml_code = yaml.load(content["data"]["yaml"])
                global_scrape_interval = yaml_code["global"]["scrape_interval"]
                if str(global_scrape_interval) != (
                        str(expected_scrape_interval) + "s"):
                    self.fail(
                        "Expected scrape interval {0}, but Actual {1}".format(
                            expected_scrape_interval, global_scrape_interval))

        scrape_interval = self.input.param("scrape_interval", 15)
        query = "status/config"
        yaml = YAML()
        self.bucket_util.load_sample_bucket(self.cluster, TravelSample())
        self.bucket_util.load_sample_bucket(self.cluster, BeerSample())

        self.log.info("Changing scrape interval to {0} via diag_eval".format(
            scrape_interval))
        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "scrape_interval", scrape_interval)
        verify_prometheus_config(expected_scrape_interval=scrape_interval)

        self.log.info("Changing scrape interval to 10s via rest api")
        settings = StatsHelper(self.cluster.master).change_scrape_interval(10)
        verify_prometheus_config(expected_scrape_interval=10)
Example #8
0
    def test_drop_one_bucket(self):
        beer_sample = BeerSample()
        result = self.bucket_util.load_sample_bucket(beer_sample)
        self.assertTrue(result, "Bucket Creation Failed.")

        result = self.cbas_util.create_dataset_on_bucket(
            cbas_bucket_name=self.sample_bucket.name, cbas_dataset_name="ds1")
        self.assertTrue(result, "Dataset Creation Failed.")

        result = self.cbas_util.create_dataset_on_bucket(
            cbas_bucket_name=beer_sample.name, cbas_dataset_name="ds2")
        self.assertTrue(result, "Dataset Creation Failed.")

        self.cbas_util.connect_link()
        deleted = BucketHelper(self.cluster.master).delete_bucket(
            beer_sample.name)
        self.assertTrue(deleted, "Failed to delete KV bucket")
        self.assertTrue(
            self.cbas_util.validate_cbas_dataset_items_count(
                "ds1", self.sample_bucket.stats.expected_item_count),
            "Data loss in CBAS.")
Example #9
0
    def test_query_running_into_overflow(self):

        self.log.info("Load beer-sample bucket")
        self.sample_bucket = BeerSample()
        result = self.bucket_util.load_sample_bucket(self.cluster,
                                                     self.sample_bucket)
        self.assertTrue(result, "Failed to load sample bucket")

        self.log.info("Create connection")
        self.cbas_util.createConn(self.sample_bucket.name)

        self.log.info("Create dataset beers")
        self.cbas_util.create_dataset_on_bucket(self.sample_bucket.name,
                                                "beers",
                                                where_field="type",
                                                where_value="beer")

        self.log.info("Create dataset breweries")
        self.cbas_util.create_dataset_on_bucket(self.sample_bucket.name,
                                                "breweries",
                                                where_field="type",
                                                where_value="brewery")

        self.log.info("Connect link Local")
        self.cbas_util.connect_link()

        self.log.info("Verify query doesn't result in stack overflow")
        query = '''SELECT bw.name AS brewer, (
                              SELECT br.name, br.abv 
                              FROM beers br 
                              WHERE br.brewery_id = meta(bw).id
                              ) AS beers 
                              FROM breweries bw 
                              ORDER BY array_count(beers)
                              LIMIT 2
               '''
        status, _, _, results, _ = self.cbas_util.execute_statement_on_cbas_util(
            query)
        self.assertTrue(status, msg="Failed to execute query")
Example #10
0
    def test_disable_high_cardinality_metrics(self):
        """
        Disable Prometheus from scraping high cardinality metrics
        Validate by querying Prometheus directly for its active targets
        """
        self.bucket_util.load_sample_bucket(TravelSample())
        self.bucket_util.load_sample_bucket(BeerSample())

        self.log.info("Disabling high cardinality metrics of all services")
        value = "[{index,[{high_cardinality_enabled,false}]}, {fts,[{high_cardinality_enabled,false}]},\
                 {kv,[{high_cardinality_enabled,false}]}, {cbas,[{high_cardinality_enabled,false}]}, \
                 {eventing,[{high_cardinality_enabled,false}]}]"

        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "services", value)

        self.log.info("Validating by querying prometheus")
        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "prometheus_auth_enabled", "false")
        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "listen_addr_type", "any")
        self.sleep(10, "Waiting for prometheus federation")
        query = "targets?state=active"
        for server in self.cluster.servers[:self.nodes_init]:
            content = StatsHelper(server).query_prometheus_federation(query)
            active_targets = content["data"]["activeTargets"]
            if len(active_targets) == 0:
                self.fail("Prometheus did not return any active targets")
            for active_targets_dict in active_targets:
                job = active_targets_dict["labels"]["job"]
                self.log.info("Job name {0}".format(job))
                if "high_cardinality" in job:
                    self.fail(
                        "Prometheus is still scraping target with job name {0} on {1}"
                        .format(job, server.ip))
Example #11
0
    def test_disable_high_cardinality_metrics(self):
        """
        Disable Prometheus from scraping high cardinality metrics
        Validate by querying Prometheus directly for its active targets
        """
        self.bucket_util.load_sample_bucket(self.cluster, TravelSample())
        self.bucket_util.load_sample_bucket(self.cluster, BeerSample())

        self.log.info("Disabling high cardinality metrics of all services")
        metrics_data = '{"services":{"analytics":{"highCardEnabled":false}, "clusterManager":{"highCardEnabled":false},\
        "data":{"highCardEnabled":false}, "eventing":{"highCardEnabled":false}, \
        "fullTextSearch":{"highCardEnabled":false}, "index":{"highCardEnabled":false}}}'

        StatsHelper(self.cluster.master).configure_stats_settings_from_api(
            metrics_data)

        self.log.info("Validating by querying prometheus")
        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "prometheus_auth_enabled", "false")
        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "listen_addr_type", "any")
        self.sleep(10, "Waiting for prometheus federation")
        query = "targets?state=active"
        for server in self.cluster.servers[:self.nodes_init]:
            content = StatsHelper(server).query_prometheus_federation(query)
            active_targets = content["data"]["activeTargets"]
            if len(active_targets) == 0:
                self.fail("Prometheus did not return any active targets")
            for active_targets_dict in active_targets:
                job = active_targets_dict["labels"]["job"]
                self.log.info("Job name {0}".format(job))
                if "high_cardinality" in job:
                    self.fail(
                        "Prometheus is still scraping target with job name {0} on {1}"
                        .format(job, server.ip))
Example #12
0
    def setUp(self, add_default_cbas_node=True):
        super(CBASBaseTest, self).setUp()
        if self._testMethodDoc:
            self.log.info("Starting Test: %s - %s" %
                          (self._testMethodName, self._testMethodDoc))
        else:
            self.log.info("Starting Test: %s" % self._testMethodName)
        invalid_ip = '10.111.151.109'
        self.cb_bucket_name = self.input.param('cb_bucket_name',
                                               'travel-sample')
        self.cbas_bucket_name = self.input.param('cbas_bucket_name', 'travel')
        self.cb_bucket_password = self.input.param('cb_bucket_password', None)
        self.cb_server_ip = self.input.param("cb_server_ip", None)
        self.cb_server_ip = \
            self.cb_server_ip.replace('INVALID_IP', invalid_ip) \
            if self.cb_server_ip is not None else None
        self.cbas_dataset_name = self.input.param("cbas_dataset_name",
                                                  'travel_ds')
        self.cbas_bucket_name_invalid = \
            self.input.param('cbas_bucket_name_invalid', self.cbas_bucket_name)
        self.cbas_dataset2_name = self.input.param('cbas_dataset2_name', None)
        self.skip_create_dataset = self.input.param('skip_create_dataset',
                                                    False)
        self.disconnect_if_connected = \
            self.input.param('disconnect_if_connected', False)
        self.cbas_dataset_name_invalid = \
            self.input.param('cbas_dataset_name_invalid',
                             self.cbas_dataset_name)
        self.skip_drop_connection = self.input.param('skip_drop_connection',
                                                     False)
        self.skip_drop_dataset = self.input.param('skip_drop_dataset', False)
        self.query_id = self.input.param('query_id', None)
        self.mode = self.input.param('mode', None)
        self.num_concurrent_queries = self.input.param('num_queries', 5000)
        self.concurrent_batch_size = self.input.param('concurrent_batch_size',
                                                      100)
        self.compiler_param = self.input.param('compiler_param', None)
        self.compiler_param_val = self.input.param('compiler_param_val', None)
        self.expect_reject = self.input.param('expect_reject', False)
        self.expect_failure = self.input.param('expect_failure', False)
        self.compress_dataset = self.input.param('compress_dataset', False)
        self.index_name = self.input.param('index_name', "NoName")
        self.index_fields = self.input.param('index_fields', None)
        if self.index_fields:
            self.index_fields = self.index_fields.split("-")
        self.retry_time = self.input.param("retry_time", 300)
        self.num_retries = self.input.param("num_retries", 1)
        self.sample_bucket_dict = {
            TravelSample().name: TravelSample(),
            BeerSample().name: BeerSample()
        }
        self.sample_bucket = None
        self.flush_enabled = Bucket.FlushBucket.ENABLED
        self.test_abort_snapshot = self.input.param("test_abort_snapshot",
                                                    False)
        self.cbas_spec_name = self.input.param("cbas_spec", None)

        self._cb_cluster = self.get_clusters()

        self.expected_error = self.input.param("error", None)

        self.bucket_spec = self.input.param("bucket_spec", None)
        self.doc_spec_name = self.input.param("doc_spec_name", "initial_load")
        self.set_cbas_memory_from_available_free_memory = self.input.param(
            'set_cbas_memory_from_available_free_memory', False)
        self.parallel_load_percent = int(
            self.input.param("parallel_load_percent", 0))
        self.cbas_kill_count = self.input.param("cbas_kill_count", 0)
        self.memcached_kill_count = self.input.param("memcached_kill_count", 0)
        self.tamper_links_count = self.input.param("tamper_links_count", 0)
        self.cbas_node = None
        self.cbas_memory_quota_percent = int(
            self.input.param("cbas_memory_quota_percent", 100))
        self.bucket_size = self.input.param("bucket_size", 100)
        services = None
        nodes_init = None
        # Single cluster support
        if len(self._cb_cluster) == 1:
            self._cb_cluster = self._cb_cluster[0]
            self.cluster.nodes_in_cluster.extend([self.cluster.master])
            if self.services_init and self.nodes_init >= 3:
                if len(self.cluster.servers) < self.nodes_init or \
                        len(self.services_init.split("-")) != self.nodes_init:
                    self.fail("Configuration error. Re-check nodes_init, "
                              "services_init in .conf file and servers "
                              "available in .ini "
                              "file")
                services = list()
                for service in self.services_init.split(
                        "-")[1:self.nodes_init]:
                    services.append(service.replace(":", ","))
                # Initialize cluster using given nodes
                nodes_init = list(
                    filter(lambda node: node.ip != self.cluster.master.ip,
                           self.cluster.servers[1:self.nodes_init]))
                for node, services_init in map(None, nodes_init, services):
                    if services_init is None:
                        services.append("kv")
                    if not self.cbas_node and "cbas" in services_init:
                        self.cbas_node = node
                        self.cbas_node.services = services_init
                    idx = self.cluster.servers.index(node)
                    self.cluster.servers[idx].services = services_init
            for server in self.cluster.servers:
                if "cbas" in server.services:
                    self.cluster.cbas_nodes.append(server)
                if "kv" in server.services:
                    self.cluster.kv_nodes.append(server)
                rest = RestConnection(server)
                rest.set_data_path(data_path=server.data_path,
                                   index_path=server.index_path,
                                   cbas_path=server.cbas_path)
            if self.expected_error:
                self.expected_error = \
                    self.expected_error.replace("INVALID_IP", invalid_ip)
                self.expected_error = \
                    self.expected_error.replace("PORT",
                                                self.cluster.master.port)
            self.otpNodes = []
            self.cbas_path = server.cbas_path
            self.rest = RestConnection(self.cluster.master)
            if not self.set_cbas_memory_from_available_free_memory:
                self.log.info(
                    "Setting the min possible memory quota so that adding "
                    "more nodes to the cluster wouldn't be a problem.")
                self.rest.set_service_mem_quota({
                    CbServer.Settings.KV_MEM_QUOTA:
                    MIN_KV_QUOTA,
                    CbServer.Settings.FTS_MEM_QUOTA:
                    FTS_QUOTA,
                    CbServer.Settings.INDEX_MEM_QUOTA:
                    INDEX_QUOTA
                })
                self.set_cbas_memory_from_available_free_memory = \
                    self.input.param(
                        'set_cbas_memory_from_available_free_memory', False)

                self.log.info("Setting %d memory quota for CBAS" % CBAS_QUOTA)
                self.cbas_memory_quota = CBAS_QUOTA
                self.rest.set_service_mem_quota(
                    {CbServer.Settings.CBAS_MEM_QUOTA: CBAS_QUOTA})
            if self.expected_error:
                self.expected_error = \
                    self.expected_error.replace("INVALID_IP", invalid_ip)
                self.expected_error = \
                    self.expected_error.replace("PORT",
                                                self.cluster.master.port)
            self.cbas_util = None
            if self.cluster.cbas_nodes:
                if not self.cbas_node:
                    available_cbas_nodes = list(
                        filter(lambda node: node.ip != self.cluster.master.ip,
                               self.cluster.cbas_nodes))
                    self.cbas_node = available_cbas_nodes[0]
                if self.set_cbas_memory_from_available_free_memory:
                    self.set_memory_for_services(self.rest, self.cluster_util,
                                                 self.cbas_node,
                                                 self.cbas_node.services)
                self.cbas_util = CbasUtil(self.cluster.master, self.cbas_node)
                self.cbas_util_v2 = CbasUtilV2(self.cluster.master,
                                               self.cbas_node, self.task)
                if "cbas" in self.cluster.master.services:
                    self.cleanup_cbas()
                if add_default_cbas_node:
                    if self.cluster.master.ip != self.cbas_node.ip:
                        self.otpNodes.append(
                            self.cluster_util.add_node(self.cbas_node))
                        self.cluster.nodes_in_cluster.append(self.cbas_node)
                        if nodes_init:
                            idx = nodes_init.index(self.cbas_node)
                            services.pop(idx)
                            nodes_init.remove(self.cbas_node)
                    else:
                        self.otpNodes = self.rest.node_statuses()
                    ''' This cbas cleanup is actually not needed.
                        When a node is added to the cluster, 
                        it is automatically cleaned-up.'''
                    self.cleanup_cbas()
                    self.cluster.cbas_nodes.remove(self.cbas_node)
            if nodes_init:
                self.task.rebalance([self.cluster.master],
                                    nodes_init, [],
                                    services=services)
                self.cluster.nodes_in_cluster.extend(nodes_init)
            if self.bucket_spec is not None:
                try:
                    self.collectionSetUp(self.cluster, self.bucket_util,
                                         self.cluster_util)
                except Java_base_exception as exception:
                    self.handle_collection_setup_exception(exception)
                except Exception as exception:
                    self.handle_collection_setup_exception(exception)
            else:
                if self.default_bucket:
                    self.bucket_util.create_default_bucket(
                        self.cluster,
                        bucket_type=self.bucket_type,
                        ram_quota=self.bucket_size,
                        replica=self.num_replicas,
                        conflict_resolution=self.
                        bucket_conflict_resolution_type,
                        replica_index=self.bucket_replica_index,
                        storage=self.bucket_storage,
                        eviction_policy=self.bucket_eviction_policy,
                        flush_enabled=self.flush_enabled)
                elif self.cb_bucket_name in self.sample_bucket_dict.keys():
                    self.sample_bucket = \
                        self.sample_bucket_dict[self.cb_bucket_name]

        elif len(self._cb_cluster) > 1:
            # Multi Cluster Support
            for cluster in self._cb_cluster:
                for server in cluster.servers:
                    if CbServer.Services.CBAS in server.services:
                        cluster.cbas_nodes.append(server)
                    if CbServer.Services.KV in server.services:
                        cluster.kv_nodes.append(server)
                    rest = RestConnection(server)
                    rest.set_data_path(data_path=server.data_path,
                                       index_path=server.index_path,
                                       cbas_path=server.cbas_path)

                if self.expected_error:
                    cluster.expected_error = \
                        self.expected_error.replace("INVALID_IP", invalid_ip)
                    cluster.expected_error = \
                        self.expected_error.replace("PORT",
                                                    cluster.master.port)

                cluster.otpNodes = list()
                cluster.cbas_path = server.cbas_path

                cluster.rest = RestConnection(cluster.master)

                if not self.set_cbas_memory_from_available_free_memory:
                    self.log.info(
                        "Setting the min possible memory quota so that adding "
                        "more nodes to the cluster wouldn't be a problem.")
                    cluster.rest.set_service_mem_quota({
                        CbServer.Settings.KV_MEM_QUOTA:
                        MIN_KV_QUOTA,
                        CbServer.Settings.FTS_MEM_QUOTA:
                        FTS_QUOTA,
                        CbServer.Settings.INDEX_MEM_QUOTA:
                        INDEX_QUOTA
                    })
                    cluster.set_cbas_memory_from_available_free_memory = \
                        self.input.param(
                            'set_cbas_memory_from_available_free_memory', False)

                    self.log.info("Setting %d memory quota for CBAS" %
                                  CBAS_QUOTA)
                    cluster.cbas_memory_quota = CBAS_QUOTA
                    cluster.rest.set_service_mem_quota(
                        {CbServer.Settings.CBAS_MEM_QUOTA: CBAS_QUOTA})

                cluster.cbas_util = None
                # Drop any existing buckets and datasets
                if cluster.cbas_nodes:
                    cluster.cbas_node = cluster.cbas_nodes[0]
                    if self.set_cbas_memory_from_available_free_memory:
                        self.set_memory_for_services(
                            cluster.rest, cluster.cluster_util,
                            cluster.cbas_node, cluster.cbas_node.services)
                    cluster.cbas_util = CbasUtil(cluster.master,
                                                 cluster.cbas_node, self.task)
                    cluster.cbas_util_v2 = CbasUtilV2(cluster.master,
                                                      cluster.cbas_node)
                    if "cbas" in cluster.master.services:
                        self.cleanup_cbas(cluster.cbas_util)
                    if add_default_cbas_node:
                        if cluster.master.ip != cluster.cbas_node.ip:
                            cluster.otpNodes.append(
                                cluster.cluster_util.add_node(
                                    cluster, cluster.cbas_node))
                        else:
                            cluster.otpNodes = cluster.rest.node_statuses()
                        """
                        This cbas cleanup is actually not needed.
                        When a node is added to the cluster,
                        it is automatically cleaned-up.
                        """
                        self.cleanup_cbas(cluster.cbas_util)
                        cluster.cbas_nodes.remove(cluster.cbas_node)
                if self.bucket_spec is not None:
                    try:
                        self.collectionSetUp(cluster, cluster.bucket_util,
                                             cluster.cluster_util)
                    except Java_base_exception as exception:
                        self.handle_collection_setup_exception(exception)
                    except Exception as exception:
                        self.handle_collection_setup_exception(exception)
                else:
                    if self.default_bucket:
                        cluster.bucket_util.create_default_bucket(
                            self.cluster,
                            bucket_type=self.bucket_type,
                            ram_quota=self.bucket_size,
                            replica=self.num_replicas,
                            conflict_resolution=self.
                            bucket_conflict_resolution_type,
                            replica_index=self.bucket_replica_index,
                            storage=self.bucket_storage,
                            eviction_policy=self.bucket_eviction_policy,
                            flush_enabled=self.flush_enabled)
                    elif self.cb_bucket_name in self.sample_bucket_dict.keys():
                        self.sample_bucket = self.sample_bucket_dict[
                            self.cb_bucket_name]

                cluster.bucket_util.add_rbac_user(self.cluster.master)

        else:
            self.fail("No cluster is available")
        self.log.info(
            "=== CBAS_BASE setup was finished for test #{0} {1} ===".format(
                self.case_number, self._testMethodName))
Example #13
0
    def __setup_buckets(self):
        self.cluster.buckets = self.bucket_util.get_all_buckets(self.cluster)
        for bucket in self.bucket_conf["buckets"]:
            bucket_obj = None
            # Skip bucket creation if already exists in cluster
            # Note: Useful while running instances for multi-tenant case
            for existing_bucket in self.cluster.buckets:
                if existing_bucket.name == bucket["name"]:
                    bucket_obj = existing_bucket
                    break
            if bucket_obj is None:
                if bucket["sample_bucket"] is True:
                    if bucket["name"] == "travel-sample":
                        s_bucket = TravelSample()
                    elif bucket["name"] == "beer-sample":
                        s_bucket = BeerSample()
                    elif bucket["name"] == "gamesim-sample":
                        s_bucket = GamesimSample()
                    else:
                        self.fail("Invalid sample bucket '%s'" %
                                  bucket["name"])

                    if self.bucket_util.load_sample_bucket(
                            self.cluster, s_bucket) is False:
                        self.fail("Failed to load sample bucket")
                    if Bucket.ramQuotaMB in bucket:
                        BucketHelper(self.cluster.master).change_bucket_props(
                            self.cluster.buckets[-1],
                            ramQuotaMB=bucket[Bucket.ramQuotaMB])
                else:
                    self.bucket_util.create_default_bucket(
                        cluster=self.cluster,
                        bucket_name=bucket["name"],
                        bucket_type=bucket.get(Bucket.bucketType,
                                               Bucket.Type.MEMBASE),
                        ram_quota=bucket.get(Bucket.ramQuotaMB, None),
                        replica=bucket.get(Bucket.replicaNumber,
                                           Bucket.ReplicaNum.ONE),
                        maxTTL=bucket.get(Bucket.maxTTL, 0),
                        storage=bucket.get(Bucket.storageBackend,
                                           Bucket.StorageBackend.couchstore),
                        eviction_policy=bucket.get(
                            Bucket.evictionPolicy,
                            Bucket.EvictionPolicy.VALUE_ONLY),
                        bucket_durability=bucket.get(
                            Bucket.durabilityMinLevel,
                            Bucket.DurabilityLevel.NONE))

                bucket_obj = self.cluster.buckets[-1]

            self.map_collection_data(bucket_obj)
            self.__print_step("Creating required scope/collections")
            for scope in bucket["scopes"]:
                if scope["name"] in bucket_obj.scopes.keys():
                    self.log.debug("Scope %s already exists for bucket %s" %
                                   (scope["name"], bucket_obj.name))
                else:
                    self.bucket_util.create_scope(self.cluster.master,
                                                  bucket_obj, scope)
                    bucket_obj.stats.increment_manifest_uid()
                for collection in scope["collections"]:
                    if collection["name"] in \
                            bucket_obj.scopes[scope["name"]].collections:
                        self.log.debug("Collection %s :: %s exists" %
                                       (scope["name"], collection["name"]))
                    else:
                        self.bucket_util.create_collection(
                            self.cluster.master, bucket_obj, scope["name"],
                            collection)
                        bucket_obj.stats.increment_manifest_uid()

            # Create RBAC users
            for t_bucket in self.rbac_conf["rbac_roles"]:
                if t_bucket["bucket"] == bucket["name"]:
                    self.create_rbac_users("rbac_admin", "rbac_admin",
                                           t_bucket["roles"])
                    break
Example #14
0
    def setUp(self, add_default_cbas_node=True):
        super(CBASBaseTest, self).setUp()
        if self._testMethodDoc:
            self.log.info("Starting Test: %s - %s"
                          % (self._testMethodName, self._testMethodDoc))
        else:
            self.log.info("Starting Test: %s" % self._testMethodName)

        for server in self.cluster.servers:
            if "cbas" in server.services:
                self.cluster.cbas_nodes.append(server)
            if "kv" in server.services:
                self.cluster.kv_nodes.append(server)
            rest = RestConnection(server)
            rest.set_data_path(data_path=server.data_path,
                               index_path=server.index_path,
                               cbas_path=server.cbas_path)

        invalid_ip = '10.111.151.109'
        self._cb_cluster = self.task
        self.cb_bucket_name = self.input.param('cb_bucket_name',
                                               'travel-sample')
        self.sample_bucket_dict = {TravelSample().name: TravelSample(),
                                   BeerSample().name: BeerSample()}
        self.sample_bucket = None
        self.cbas_bucket_name = self.input.param('cbas_bucket_name', 'travel')
        self.cb_bucket_password = self.input.param('cb_bucket_password', None)
        self.expected_error = self.input.param("error", None)
        if self.expected_error:
            self.expected_error = self.expected_error.replace("INVALID_IP",
                                                              invalid_ip)
            self.expected_error = \
                self.expected_error.replace("PORT", self.cluster.master.port)
        self.cb_server_ip = self.input.param("cb_server_ip", None)
        self.cb_server_ip = \
            self.cb_server_ip.replace('INVALID_IP', invalid_ip) \
            if self.cb_server_ip is not None else None
        self.cbas_dataset_name = self.input.param("cbas_dataset_name",
                                                  'travel_ds')
        self.cbas_bucket_name_invalid = \
            self.input.param('cbas_bucket_name_invalid', self.cbas_bucket_name)
        self.cbas_dataset2_name = self.input.param('cbas_dataset2_name', None)
        self.skip_create_dataset = self.input.param('skip_create_dataset',
                                                    False)
        self.disconnect_if_connected = \
            self.input.param('disconnect_if_connected', False)
        self.cbas_dataset_name_invalid = \
            self.input.param('cbas_dataset_name_invalid',
                             self.cbas_dataset_name)
        self.skip_drop_connection = self.input.param('skip_drop_connection',
                                                     False)
        self.skip_drop_dataset = self.input.param('skip_drop_dataset', False)
        self.query_id = self.input.param('query_id', None)
        self.mode = self.input.param('mode', None)
        self.num_concurrent_queries = self.input.param('num_queries', 5000)
        self.concurrent_batch_size = self.input.param('concurrent_batch_size',
                                                      100)
        self.compiler_param = self.input.param('compiler_param', None)
        self.compiler_param_val = self.input.param('compiler_param_val', None)
        self.expect_reject = self.input.param('expect_reject', False)
        self.expect_failure = self.input.param('expect_failure', False)
        self.compress_dataset = self.input.param('compress_dataset', False)
        self.index_name = self.input.param('index_name', "NoName")
        self.index_fields = self.input.param('index_fields', None)
        self.retry_time = self.input.param("retry_time", 300)
        self.num_retries = self.input.param("num_retries", 1)
        self.flush_enabled = Bucket.FlushBucket.ENABLED
        self.test_abort_snapshot = self.input.param("test_abort_snapshot",
                                                    False)
        if self.index_fields:
            self.index_fields = self.index_fields.split("-")
        self.otpNodes = list()
        self.cbas_path = server.cbas_path

        self.rest = RestConnection(self.cluster.master)
        self.log.info("Setting the min possible memory quota so that adding "
                      "more nodes to the cluster wouldn't be a problem.")
        self.rest.set_service_memoryQuota(service='memoryQuota',
                                          memoryQuota=MIN_KV_QUOTA)
        self.rest.set_service_memoryQuota(service='ftsMemoryQuota',
                                          memoryQuota=FTS_QUOTA)
        self.rest.set_service_memoryQuota(service='indexMemoryQuota',
                                          memoryQuota=INDEX_QUOTA)

        self.set_cbas_memory_from_available_free_memory = \
            self.input.param('set_cbas_memory_from_available_free_memory',
                             False)
        if self.set_cbas_memory_from_available_free_memory:
            info = self.rest.get_nodes_self()
            self.cbas_memory_quota = int((info.memoryFree // 1024 ** 2) * 0.9)
            self.log.info("Setting %d memory quota for CBAS"
                          % self.cbas_memory_quota)
            self.rest.set_service_memoryQuota(
                service='cbasMemoryQuota',
                memoryQuota=self.cbas_memory_quota)
        else:
            self.log.info("Setting %d memory quota for CBAS" % CBAS_QUOTA)
            self.cbas_memory_quota = CBAS_QUOTA
            self.rest.set_service_memoryQuota(service='cbasMemoryQuota',
                                              memoryQuota=CBAS_QUOTA)

        self.cbas_util = None
        # Drop any existing buckets and datasets
        if self.cluster.cbas_nodes:
            self.cbas_node = self.cluster.cbas_nodes[0]
            self.cbas_util = CbasUtil(self.cluster.master, self.cbas_node,
                                      self.task)
            if "cbas" in self.cluster.master.services:
                self.cleanup_cbas()
            if add_default_cbas_node:
                if self.cluster.master.ip != self.cbas_node.ip:
                    self.otpNodes.append(
                        self.cluster_util.add_node(self.cbas_node))
                else:
                    self.otpNodes = self.rest.node_statuses()
                """
                This cbas cleanup is actually not needed.
                When a node is added to the cluster,
                it is automatically cleaned-up.
                """
                self.cleanup_cbas()
                self.cluster.cbas_nodes.remove(self.cbas_node)
        if self.default_bucket:
            self.bucket_util.create_default_bucket(
                bucket_type=self.bucket_type,
                ram_quota=self.bucket_size,
                replica=self.num_replicas,
                conflict_resolution=self.bucket_conflict_resolution_type,
                replica_index=self.bucket_replica_index,
                storage=self.bucket_storage,
                eviction_policy=self.bucket_eviction_policy,
                flush_enabled=self.flush_enabled)
        elif self.cb_bucket_name in self.sample_bucket_dict.keys():
            self.sample_bucket = self.sample_bucket_dict[self.cb_bucket_name]

        self.bucket_util.add_rbac_user()
        self.log.info("=== CBAS_BASE setup was finished for test #{0} {1} ==="
                      .format(self.case_number, self._testMethodName))
Example #15
0
    def test_rest_api_authorization_cbas_cluster_info_api(self):
        validation_failed = False

        self.bucket_util.load_sample_bucket(self.cluster, TravelSample())
        self.bucket_util.load_sample_bucket(self.cluster, BeerSample())

        api_authentication = [
            {
                "api_url":
                "http://{0}:8095/analytics/cluster".format(self.cbas_node.ip),
                "roles": [{
                    "role": "ro_admin",
                    "expected_status": 200
                }, {
                    "role": "cluster_admin",
                    "expected_status": 200
                }, {
                    "role": "admin",
                    "expected_status": 200
                }, {
                    "role": "analytics_manager[*]",
                    "expected_status": 401
                }, {
                    "role": "analytics_reader",
                    "expected_status": 401
                }]
            },
            {
                "api_url":
                "http://{0}:8095/analytics/cluster/cc".format(
                    self.cbas_node.ip),
                "roles": [{
                    "role": "ro_admin",
                    "expected_status": 200
                }, {
                    "role": "cluster_admin",
                    "expected_status": 200
                }, {
                    "role": "admin",
                    "expected_status": 200
                }, {
                    "role": "analytics_manager[*]",
                    "expected_status": 401
                }, {
                    "role": "analytics_reader",
                    "expected_status": 401
                }]
            },
            {
                "api_url":
                "http://{0}:8095/analytics/diagnostics".format(
                    self.cbas_node.ip),
                "roles": [{
                    "role": "ro_admin",
                    "expected_status": 200
                }, {
                    "role": "cluster_admin",
                    "expected_status": 200
                }, {
                    "role": "admin",
                    "expected_status": 200
                }, {
                    "role": "analytics_manager[*]",
                    "expected_status": 401
                }, {
                    "role": "analytics_reader",
                    "expected_status": 401
                }]
            },
            {
                "api_url":
                "http://{0}:8095/analytics/node/diagnostics".format(
                    self.cbas_node.ip),
                "roles": [{
                    "role": "ro_admin",
                    "expected_status": 200
                }, {
                    "role": "cluster_admin",
                    "expected_status": 200
                }, {
                    "role": "admin",
                    "expected_status": 200
                }, {
                    "role": "analytics_manager[*]",
                    "expected_status": 401
                }, {
                    "role": "analytics_reader",
                    "expected_status": 401
                }]
            },
            {
                "api_url":
                "http://{0}:8095/analytics/cc/config".format(
                    self.cbas_node.ip),
                "roles": [{
                    "role": "ro_admin",
                    "expected_status": 401
                }, {
                    "role": "cluster_admin",
                    "expected_status": 200
                }, {
                    "role": "admin",
                    "expected_status": 200
                }, {
                    "role": "analytics_manager[*]",
                    "expected_status": 401
                }, {
                    "role": "analytics_reader",
                    "expected_status": 401
                }]
            },
            {
                "api_url":
                "http://{0}:8095/analytics/node/config".format(
                    self.cbas_node.ip),
                "roles": [{
                    "role": "ro_admin",
                    "expected_status": 401
                }, {
                    "role": "cluster_admin",
                    "expected_status": 200
                }, {
                    "role": "admin",
                    "expected_status": 200
                }, {
                    "role": "analytics_manager[*]",
                    "expected_status": 401
                }, {
                    "role": "analytics_reader",
                    "expected_status": 401
                }]
            },
            {
                "api_url":
                "http://{0}:9110/analytics/node/agg/stats/remaining".format(
                    self.cbas_node.ip),
                "roles": [{
                    "role": "analytics_manager[*]",
                    "expected_status": 200
                }, {
                    "role": "analytics_reader",
                    "expected_status": 200
                }],
            },
            {
                "api_url":
                "http://{0}:8095/analytics/backup?bucket=travel-sample".format(
                    self.cbas_node.ip),
                "roles": [
                    {
                        "role": "admin",
                        "expected_status": 200
                    },
                    {
                        "role": "data_backup[*],analytics_reader",
                        "expected_status": 200
                    },
                    {
                        "role": "data_backup[*], analytics_manager[*]",
                        "expected_status": 200
                    },
                    {
                        "role": "data_backup[travel-sample], analytics_reader",
                        "expected_status": 200
                    },
                    {
                        "role":
                        "data_backup[travel-sample], analytics_manager[travel-sample]",
                        "expected_status": 200
                    },
                    {
                        "role": "ro_admin",
                        "expected_status": 401
                    },
                    {
                        "role": "analytics_reader",
                        "expected_status": 401
                    },
                    {
                        "role": "analytics_manager[*]",
                        "expected_status": 401
                    },
                    {
                        "role": "data_backup[beer-sample], analytics_reader",
                        "expected_status": 401
                    },
                    {
                        "role":
                        "data_backup[beer-sample], analytics_manager[*]",
                        "expected_status": 401
                    },
                    {
                        "role":
                        "data_backup[beer-sample], analytics_manager[beer-sample]",
                        "expected_status": 401
                    },
                ],
            },
            {
                "api_url":
                "http://{0}:8095/analytics/cluster/restart".format(
                    self.cbas_node.ip),
                "roles": [{
                    "role": "cluster_admin",
                    "expected_status": 202
                }, {
                    "role": "admin",
                    "expected_status": 202
                }, {
                    "role": "analytics_manager[*]",
                    "expected_status": 401
                }, {
                    "role": "analytics_reader",
                    "expected_status": 401
                }],
                "method":
                "POST"
            },
        ]

        shell = RemoteMachineShellConnection(self.cluster.master)

        for api in api_authentication:
            for role in api["roles"]:
                self.rbac_util._create_user_and_grant_role(
                    "testuser", role["role"])
                self.sleep(5)

                if "method" in api:
                    output, error = shell.execute_command(
                        """curl -i {0} -X {1} -u {2}:{3} 2>/dev/null | head -n 1 | cut -d$' ' -f2"""
                        .format(api["api_url"], api["method"], "testuser",
                                "password"))
                    self.sleep(10)
                else:
                    output, error = shell.execute_command(
                        """curl -i {0} -u {1}:{2} 2>/dev/null | head -n 1 | cut -d$' ' -f2"""
                        .format(api["api_url"], "testuser", "password"))
                response = ""
                for line in output:
                    response = response + line
                response = json.loads(str(response))
                if response != role["expected_status"]:
                    self.log.info(
                        "Error accessing {0} as user with {1} role. Response = {2}"
                        .format(api["api_url"], role["role"], response))
                    validation_failed = True
                else:
                    self.log.info(
                        "Accessing {0} as user with {1} role worked as expected"
                        .format(api["api_url"], role["role"]))

                self.rbac_util._drop_user("testuser")

        shell.disconnect()

        self.assertFalse(
            validation_failed,
            "Authentication errors with some APIs. Check the test log above.")