예제 #1
0
    def test_range_api_metrics(self):
        """
        Example to retrieve range_api_metrics
        """
        # Example 1
        metric_name = "kv_curr_items"
        label_values = {
            "bucket": self.bucket_util.buckets[0].name,
            "nodes": self.cluster.master.ip
        }
        content = StatsHelper(self.cluster.master).get_range_api_metrics(
            metric_name, label_values=label_values)
        print(content)

        # Example 2
        metric_name = "kv_curr_items"
        label_values = {
            "bucket": self.bucket_util.buckets[0].name,
            "aggregationFunction": "max"
        }
        content = StatsHelper(self.cluster.master).get_range_api_metrics(
            metric_name, label_values=label_values)
        print(content)

        # Example 3
        content = StatsHelper(self.cluster.master).post_range_api_metrics(
            self.bucket_util.buckets[0].name)
        print(content)
예제 #2
0
    def test_change_global_scrape_timeout(self):
        """
        Change global scrape timeout and verify the prometheus config by querying Prometheus Federation
        (Positive test case as a valid scrape_timeout is always less than scrape_interval)
        """
        scrape_timeout = self.input.param("scrape_timeout", 5)
        self.bucket_util.load_sample_bucket(TravelSample())
        self.bucket_util.load_sample_bucket(BeerSample())

        self.log.info("Changing scrape interval to {0}".format(scrape_timeout))
        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "scrape_timeout", scrape_timeout)

        self.log.info("Validating by querying prometheus")
        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "prometheus_auth_enabled", "false")
        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "listen_addr_type", "any")
        self.sleep(10, "Waiting for prometheus federation")
        query = "status/config"
        yaml = YAML()
        for server in self.cluster.servers[:self.nodes_init]:
            content = StatsHelper(server).query_prometheus_federation(query)
            yaml_code = yaml.load(content["data"]["yaml"])
            global_scrape_timeout = yaml_code["global"]["scrape_timeout"]
            if str(global_scrape_timeout) != (str(scrape_timeout) + "s"):
                self.fail("Expected scrape timeout {0}, but Actual {1}".format(
                    scrape_timeout, global_scrape_timeout))
예제 #3
0
 def get_low_cardinality_metrics(self, parse):
     content = None
     for server in self.cluster_util.get_kv_nodes():
         content = StatsHelper(server).get_prometheus_metrics(parse=parse)
         if not parse:
             StatsHelper(server)._validate_metrics(content)
     for line in content:
         self.log.info(line.strip("\n"))
예제 #4
0
 def get_low_cardinality_metrics(self, component, parse):
     content = None
     for server in self.cluster.servers[:self.nodes_init]:
         content = StatsHelper(server).get_prometheus_metrics(
             component=component, parse=parse)
         if not parse:
             StatsHelper(server)._validate_metrics(content)
     for line in content:
         self.log.info(line.strip("\n"))
예제 #5
0
파일: Collections.py 프로젝트: umang-cb/TAF
    def setUp(self):
        self.input = TestInputSingleton.input
        self.input.test_params.update({"default_bucket": False})
        self.backup_service_test = self.input.param("backup_service_test", False)
        if self.backup_service_test:
            self.backup_service = BackupServiceTest(self.input.servers)
        super(volume, self).setUp()
        self.bucket_util._expiry_pager(self.cluster, val=5)
        self.rest = RestConnection(self.servers[0])
        self.available_servers = self.cluster.servers[self.nodes_init:]
        self.exclude_nodes = [self.cluster.master]
        self.skip_check_logs = False
        self.iterations = self.input.param("iterations", 2)
        self.vbucket_check = self.input.param("vbucket_check", True)
        self.retry_get_process_num = self.input.param("retry_get_process_num", 400)
        self.data_load_spec = self.input.param("data_load_spec", "volume_test_load_for_volume_test")
        self.perform_quorum_failover = self.input.param("perform_quorum_failover", True)
        self.rebalance_moves_per_node = self.input.param("rebalance_moves_per_node", 4)
        self.cluster_util.set_rebalance_moves_per_nodes(
            self.cluster.master,
            rebalanceMovesPerNode=self.rebalance_moves_per_node)
        self.scrape_interval = self.input.param("scrape_interval", None)
        if self.scrape_interval:
            self.log.info("Changing scrape interval and scrape_timeout to {0}".format(self.scrape_interval))
            StatsHelper(self.cluster.master).change_scrape_timeout(self.scrape_interval)
            StatsHelper(self.cluster.master).change_scrape_interval(self.scrape_interval)
            # Change high cardinality services' scrape_interval
            value = "[{S, [{high_cardinality_enabled, true}, {high_cardinality_scrape_interval, %s}]} " \
                    "|| S <- [index, fts, kv, cbas, eventing]]" % self.scrape_interval
            StatsHelper(self.cluster.master).configure_stats_settings_from_diag_eval("services", value)

        self.enable_n2n_encryption = self.input.param("enable_n2n_encryption", False)
        if self.enable_n2n_encryption:
            shell_conn = RemoteMachineShellConnection(self.cluster.master)
            cb_cli = CbCli(shell_conn)
            cb_cli.enable_n2n_encryption()
            cb_cli.set_n2n_encryption_level(level="all")
            shell_conn.disconnect()

        self.doc_and_collection_ttl = self.input.param("doc_and_collection_ttl", False)  # For using doc_ttl + coll_ttl
        self.skip_validations = self.input.param("skip_validations", True)

        # Services to be added on rebalance-in nodes during the volume test
        self.services_for_rebalance_in = self.input.param("services_for_rebalance_in", None)

        self.index_and_query_setup()
        self.fts_setup()
        self.query_thread_flag = False
        self.query_thread = None
        self.ui_stats_thread_flag = False
        self.ui_stats_thread = None

        # Setup the backup service
        if self.backup_service_test:
            self.backup_service.setup()
예제 #6
0
 def test_check_get_all_metrics(self):
     """
     Test /metrics endpoint. Validate for duplicity and prefix
     """
     self.bucket_util.load_sample_bucket(TravelSample())
     self.bucket_util.load_sample_bucket(BeerSample())
     for server in self.cluster.servers[:self.nodes_init]:
         content = StatsHelper(server).get_all_metrics()
         StatsHelper(server)._validate_metrics(content)
     for line in content:
         print(line.strip("\n"))
예제 #7
0
    def test_stats_1000_collections(self):
        """
        Call all endpoints, for all components, and validate with 1000 collections in the cluster
        """
        for server in self.cluster.servers[:self.nodes_init]:
            self.log.info(
                "calling low cardinality metrics on {0} with component ns server"
                .format(server.ip))
            content = StatsHelper(server).get_prometheus_metrics(
                component="ns_server", parse=False)
            StatsHelper(server)._validate_metrics(content)

            self.log.info("calling /metrics on {0}".format(server.ip))
            content = StatsHelper(server).get_all_metrics()
            StatsHelper(server)._validate_metrics(content)

            server_services = self.get_services_from_node(server)
            for component in server_services:
                self.log.info(
                    "calling low cardinality metrics on {0} with component {1}"
                    .format(server.ip, component))
                content = StatsHelper(server).get_prometheus_metrics(
                    component=component, parse=False)
                StatsHelper(server)._validate_metrics(content)
                self.log.info(
                    "calling high cardinality metrics on {0} with component {1}"
                    .format(server.ip, component))
                content = StatsHelper(server).get_prometheus_metrics_high(
                    component=component, parse=False)
                StatsHelper(server)._validate_metrics(content)
예제 #8
0
 def get_range_api_metrics(self, metric_name):
     label_values = {
         "bucket": self.cluster.buckets[0].name,
         "nodes": self.cluster.master.ip
     }
     content = StatsHelper(self.cluster.master).get_range_api_metrics(
         metric_name, label_values=label_values)
     self.log.info(content)
예제 #9
0
 def ns_server_stat(self, node, pattern):
     content = StatsHelper(node).get_prometheus_metrics_high(
         component="ns_server")
     output = [
         re.findall(r'\s\d+', line)[0] for line in content
         if pattern in line
     ]
     return output
예제 #10
0
 def verify_prometheus_config(expected_scrape_timeout):
     self.log.info("Validating by querying prometheus")
     StatsHelper(self.cluster.master).configure_stats_settings_from_diag_eval\
         ("prometheus_auth_enabled", "false")
     StatsHelper(self.cluster.master).configure_stats_settings_from_diag_eval\
         ("listen_addr_type", "any")
     self.sleep(10, "Waiting for prometheus federation")
     for server in self.cluster.servers[:self.nodes_init]:
         content = StatsHelper(server).query_prometheus_federation(
             query)
         yaml_code = yaml.load(content["data"]["yaml"])
         global_scrape_timeout = yaml_code["global"]["scrape_timeout"]
         if str(global_scrape_timeout) != (
                 str(expected_scrape_timeout) + "s"):
             self.fail(
                 "Expected scrape timeout {0}, but Actual {1}".format(
                     expected_scrape_timeout, global_scrape_timeout))
예제 #11
0
 def test_check_authorization_high_cardinality_metrics(self):
     """
     Check high cardinality prometheus metrics endpoint is accessible only by cluster.admin.internal.stats!read
     Check with cluster admin role - it should fail, and then try it with Full admin - it should pass
     """
     rbac_util = RbacUtils(self.cluster.master)
     self.log.info("Create a user with role cluster admin")
     rbac_util._create_user_and_grant_role("cluster_admin", "cluster_admin")
     for server in self.cluster.servers[:self.nodes_init]:
         server_services = self.get_services_from_node(server)
         stats_helper_object = StatsHelper(server)
         for component in server_services:
             try:
                 stats_helper_object.username = "******"
                 _ = stats_helper_object.get_prometheus_metrics_high(
                     component=component, parse=False)
                 self.fail(
                     "Metrics was accessible without necessary permissions on {0} for component {1}"
                     .format(server.ip, component))
             except Exception as e:
                 self.log.info(
                     "Accessing metrics with cluster admin failed as expected {0}"
                     .format(e))
             self.log.info("trying again with Administrator privilages")
             stats_helper_object.username = "******"
             content = stats_helper_object.get_prometheus_metrics_high(
                 component=component, parse=False)
             StatsHelper(server)._validate_metrics(content)
예제 #12
0
    def test_check_high_cardinality_metrics(self):
        """
        Check if _prometheusMetrics returns high cardinality metrics by default
        ie; High cardinality metrics are collected by default
        Also serves as a check if prometheus is running on all nodes
        """
        component = self.input.param("component", "kv")
        parse = self.input.param("parse", False)

        self.bucket_util.load_sample_bucket(TravelSample())
        self.bucket_util.load_sample_bucket(BeerSample())
        for server in self.cluster.servers[:self.nodes_init]:
            content = StatsHelper(server).get_prometheus_metrics_high(
                component=component, parse=parse)
            if not parse:
                StatsHelper(server)._validate_metrics(content)
        for line in content:
            print(line.strip("\n"))
예제 #13
0
파일: Collections.py 프로젝트: umang-cb/TAF
 def run_ui_stats_queries(self):
     """
     Runs UI stats queries in a loop in a seperate thread unitl the thread is asked for to join
     """
     self.log.info("Starting to poll UI stats queries")
     while self.ui_stats_thread_flag:
         for bucket in self.cluster.buckets:
             _ = StatsHelper(self.cluster.master).post_range_api_metrics(bucket.name)
             self.sleep(10)
예제 #14
0
    def test_disable_external_prometheus_high_cardinality_metrics(self):
        """
        Disable exposition of high cardinality metrics by ns-server's /metrics endpoint
        Validate by checking that there are no high cardinality metrics returned at
        /metrics endpoint ie; check if
        total number low cardinality metrics = total number of metrics at /metrics endpoint
        """
        self.bucket_util.load_sample_bucket(TravelSample())
        self.bucket_util.load_sample_bucket(BeerSample())

        self.log.info(
            "Disabling external prometheus high cardinality metrics of all services"
        )
        value = "[{index,[{high_cardinality_enabled,false}]}, {fts,[{high_cardinality_enabled,false}]},\
                         {kv,[{high_cardinality_enabled,false}]}, {cbas,[{high_cardinality_enabled,false}]}, \
                         {eventing,[{high_cardinality_enabled,false}]}]"

        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "external_prometheus_services", value)
        for server in self.cluster.servers[:self.nodes_init]:
            len_low_cardinality_metrics = 0
            content = StatsHelper(server).get_prometheus_metrics(
                component="ns_server", parse=False)
            self.log.info("lc count of ns_server on {0} is {1}".format(
                server.ip, len(content)))
            len_low_cardinality_metrics = len_low_cardinality_metrics + len(
                content)
            server_services = self.get_services_from_node(server)
            for component in server_services:
                content = StatsHelper(server).get_prometheus_metrics(
                    component=component, parse=False)
                self.log.info("lc count of {2} on {0} is {1}".format(
                    server.ip, len(content), component))
                len_low_cardinality_metrics = len_low_cardinality_metrics + len(
                    content)
            content = StatsHelper(server).get_all_metrics()
            len_metrics = len(content)
            if len_metrics != len_low_cardinality_metrics:
                self.fail(
                    "Number mismatch on node {0} , Total lc metrics count {1}, Total metrics count {2}"
                    .format(server.ip, len_low_cardinality_metrics,
                            len_metrics))
예제 #15
0
    def test_disable_external_prometheus_high_cardinality_metrics(self):
        """
        Disable exposition of high cardinality metrics by ns-server's /metrics endpoint
        Validate by checking that there are no high cardinality metrics returned at
        /metrics endpoint ie; check if
        total number low cardinality metrics = total number of metrics at /metrics endpoint
        """
        self.bucket_util.load_sample_bucket(self.cluster, TravelSample())
        self.bucket_util.load_sample_bucket(self.cluster, BeerSample())

        self.log.info(
            "Disabling external prometheus high cardinality metrics of all services"
        )
        metrics_data = '{"statsExport":{"analytics":{"highCardEnabled":false}, "clusterManager":{"highCardEnabled":false},\
                "data":{"highCardEnabled":false}, "eventing":{"highCardEnabled":false}, \
                "fullTextSearch":{"highCardEnabled":false}, "index":{"highCardEnabled":false}}}'

        StatsHelper(self.cluster.master).configure_stats_settings_from_api(
            metrics_data)
        for server in self.cluster.servers[:self.nodes_init]:
            len_low_cardinality_metrics = 0
            content = StatsHelper(server).get_prometheus_metrics(
                component="ns_server", parse=False)
            self.log.info("lc count of ns_server on {0} is {1}".format(
                server.ip, len(content)))
            len_low_cardinality_metrics = len_low_cardinality_metrics + len(
                content)
            server_services = self.get_services_from_node(server)
            for component in server_services:
                content = StatsHelper(server).get_prometheus_metrics(
                    component=component, parse=False)
                self.log.info("lc count of {2} on {0} is {1}".format(
                    server.ip, len(content), component))
                len_low_cardinality_metrics = len_low_cardinality_metrics + len(
                    content)
            self.sleep(20, "Wait before fetching metrics")
            content = StatsHelper(server).get_all_metrics()
            len_metrics = len(content)
            if len_metrics != len_low_cardinality_metrics:
                self.fail(
                    "Number mismatch on node {0} , Total lc metrics count {1}, Total metrics count {2}"
                    .format(server.ip, len_low_cardinality_metrics,
                            len_metrics))
예제 #16
0
파일: cgroup.py 프로젝트: couchbaselabs/TAF
 def read_latest_host_aware_stats(self):
     """
     Reads host aware stats reported by prometheus
     """
     latest_host_aware_stats_map = dict()
     for stat in self.host_aware_stats:
         content = StatsHelper(self.node).get_range_api_metrics(stat)
         val = content['data'][0]['values'][-1][1]
         latest_host_aware_stats_map[stat] = val
     self.log.info("latest_host_aware_stats_map {0}".format(latest_host_aware_stats_map))
     return latest_host_aware_stats_map
예제 #17
0
    def test_change_global_scrape_interval(self):
        """
        1. Change global scrape interval via diag eval
        2. verify the prometheus config by querying Prometheus Federation
        3. Reset the global scrape interval back to default via rest api
        4. verify the prometheus config by querying Prometheus Federation
        """
        def verify_prometheus_config(expected_scrape_interval):
            self.log.info("Validating by querying prometheus")
            StatsHelper(self.cluster.master).configure_stats_settings_from_diag_eval\
                ("prometheus_auth_enabled", "false")
            StatsHelper(self.cluster.master).configure_stats_settings_from_diag_eval\
                ("listen_addr_type", "any")
            self.sleep(10, "Waiting for prometheus federation")
            for server in self.cluster.servers[:self.nodes_init]:
                content = StatsHelper(server).query_prometheus_federation(
                    query)
                yaml_code = yaml.load(content["data"]["yaml"])
                global_scrape_interval = yaml_code["global"]["scrape_interval"]
                if str(global_scrape_interval) != (
                        str(expected_scrape_interval) + "s"):
                    self.fail(
                        "Expected scrape interval {0}, but Actual {1}".format(
                            expected_scrape_interval, global_scrape_interval))

        scrape_interval = self.input.param("scrape_interval", 15)
        query = "status/config"
        yaml = YAML()
        self.bucket_util.load_sample_bucket(self.cluster, TravelSample())
        self.bucket_util.load_sample_bucket(self.cluster, BeerSample())

        self.log.info("Changing scrape interval to {0} via diag_eval".format(
            scrape_interval))
        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "scrape_interval", scrape_interval)
        verify_prometheus_config(expected_scrape_interval=scrape_interval)

        self.log.info("Changing scrape interval to 10s via rest api")
        settings = StatsHelper(self.cluster.master).change_scrape_interval(10)
        verify_prometheus_config(expected_scrape_interval=10)
예제 #18
0
 def test_check_authorization_low_cardinality_metrics(self):
     """
     Check low cardinality prometheus metrics endpoint is accessible only by cluster.admin.internal.stats!read
     Check with cluster admin role - it should fail, and then try it with Full admin - it should pass
     """
     rbac_util = RbacUtils(self.cluster.master)
     self.log.info("Create a user with role cluster admin")
     rbac_util._create_user_and_grant_role("cluster_admin", "cluster_admin")
     for server in self.cluster.servers[:self.nodes_init]:
         stats_helper_object = StatsHelper(server)
         try:
             stats_helper_object.username = "******"
             _ = stats_helper_object.get_prometheus_metrics()
             self.fail(
                 "Metrics was accessible without necessary permissions")
         except Exception as e:
             self.log.info(
                 "Accessing metrics with cluster admin failed as expected {0}"
                 .format(e))
         self.log.info("trying again with Administrator privilages")
         stats_helper_object.username = "******"
         map = stats_helper_object.get_prometheus_metrics()
         number_of_metrics = len(map)
         self.log.info(
             "Got metrics with user Full admin. Number of metrics: {0}".
             format(number_of_metrics))
예제 #19
0
    def test_disable_high_cardinality_metrics(self):
        """
        Disable Prometheus from scraping high cardinality metrics
        Validate by querying Prometheus directly for its active targets
        """
        self.bucket_util.load_sample_bucket(TravelSample())
        self.bucket_util.load_sample_bucket(BeerSample())

        self.log.info("Disabling high cardinality metrics of all services")
        value = "[{index,[{high_cardinality_enabled,false}]}, {fts,[{high_cardinality_enabled,false}]},\
                 {kv,[{high_cardinality_enabled,false}]}, {cbas,[{high_cardinality_enabled,false}]}, \
                 {eventing,[{high_cardinality_enabled,false}]}]"

        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "services", value)

        self.log.info("Validating by querying prometheus")
        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "prometheus_auth_enabled", "false")
        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "listen_addr_type", "any")
        self.sleep(10, "Waiting for prometheus federation")
        query = "targets?state=active"
        for server in self.cluster.servers[:self.nodes_init]:
            content = StatsHelper(server).query_prometheus_federation(query)
            active_targets = content["data"]["activeTargets"]
            if len(active_targets) == 0:
                self.fail("Prometheus did not return any active targets")
            for active_targets_dict in active_targets:
                job = active_targets_dict["labels"]["job"]
                self.log.info("Job name {0}".format(job))
                if "high_cardinality" in job:
                    self.fail(
                        "Prometheus is still scraping target with job name {0} on {1}"
                        .format(job, server.ip))
예제 #20
0
    def test_disable_high_cardinality_metrics(self):
        """
        Disable Prometheus from scraping high cardinality metrics
        Validate by querying Prometheus directly for its active targets
        """
        self.bucket_util.load_sample_bucket(self.cluster, TravelSample())
        self.bucket_util.load_sample_bucket(self.cluster, BeerSample())

        self.log.info("Disabling high cardinality metrics of all services")
        metrics_data = '{"services":{"analytics":{"highCardEnabled":false}, "clusterManager":{"highCardEnabled":false},\
        "data":{"highCardEnabled":false}, "eventing":{"highCardEnabled":false}, \
        "fullTextSearch":{"highCardEnabled":false}, "index":{"highCardEnabled":false}}}'

        StatsHelper(self.cluster.master).configure_stats_settings_from_api(
            metrics_data)

        self.log.info("Validating by querying prometheus")
        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "prometheus_auth_enabled", "false")
        StatsHelper(
            self.cluster.master).configure_stats_settings_from_diag_eval(
                "listen_addr_type", "any")
        self.sleep(10, "Waiting for prometheus federation")
        query = "targets?state=active"
        for server in self.cluster.servers[:self.nodes_init]:
            content = StatsHelper(server).query_prometheus_federation(query)
            active_targets = content["data"]["activeTargets"]
            if len(active_targets) == 0:
                self.fail("Prometheus did not return any active targets")
            for active_targets_dict in active_targets:
                job = active_targets_dict["labels"]["job"]
                self.log.info("Job name {0}".format(job))
                if "high_cardinality" in job:
                    self.fail(
                        "Prometheus is still scraping target with job name {0} on {1}"
                        .format(job, server.ip))
예제 #21
0
파일: Collections.py 프로젝트: umang-cb/TAF
 def tearDown(self):
     # Do not call the base class's teardown, as we want to keep the cluster intact after the volume run
     if self.scrape_interval:
         self.log.info("Reverting prometheus settings back to default")
         StatsHelper(self.cluster.master).reset_stats_settings_from_diag_eval()
     self.close_all_threads()
     # Cleanup the backup service
     if self.backup_service_test:
         self.backup_service.clean()
     self.log.info("Printing bucket stats before teardown")
     self.bucket_util.print_bucket_stats(self.cluster)
     if self.collect_pcaps:
         self.start_fetch_pcaps()
     if not self.skip_check_logs:
         self.check_logs()
     # Close all tasks explicitly
     self.task_manager.shutdown_task_manager()
     self.task.shutdown(force=True)
     self.task_manager.abort_all_tasks()
     # Close all sdk clients explicitly
     if self.sdk_client_pool:
         self.sdk_client_pool.shutdown()
예제 #22
0
 def tearDown(self):
     # Do not call the base class's teardown, as we want to keep the cluster intact after the volume run
     if self.scrape_interval:
         self.log.info("Reverting prometheus settings back to default")
         StatsHelper(
             self.cluster.master).reset_stats_settings_from_diag_eval()
     if self.query_thread:
         # Join query thread
         self.query_thread_flag = False
         self.query_thread.join()
         self.query_thread = None
         # Join ui_stats thread
         self.ui_stats_thread_flag = False
         self.ui_stats_thread.join()
         self.ui_stats_thread = None
     self.log.info("Printing bucket stats before teardown")
     self.bucket_util.print_bucket_stats()
     if self.collect_pcaps:
         self.start_fetch_pcaps()
     result = self.check_coredump_exist(self.servers, force_collect=True)
     if not self.crash_warning:
         self.assertFalse(result, msg="Cb_log file validation failed")
     if self.crash_warning and result:
         self.log.warn("CRASH | CRITICAL | WARN messages found in cb_logs")
예제 #23
0
 def tearDown(self):
     self.log.info("Reverting settings to default")
     StatsHelper(self.cluster.master).reset_stats_settings_from_diag_eval()
     super(StatsBasicOps, self).tearDown()
예제 #24
0
    def setUp(self):
        self.input = TestInputSingleton.input
        self.input.test_params.update({"default_bucket": False})
        super(volume, self).setUp()
        self.bucket_util._expiry_pager(val=5)
        self.rest = RestConnection(self.servers[0])
        self.available_servers = list()
        self.available_servers = self.cluster.servers[self.nodes_init:]
        self.exclude_nodes = [self.cluster.master]
        self.iterations = self.input.param("iterations", 2)
        self.vbucket_check = self.input.param("vbucket_check", True)
        self.data_load_spec = self.input.param(
            "data_load_spec", "volume_test_load_for_volume_test")
        self.rebalance_moves_per_node = self.input.param(
            "rebalance_moves_per_node", 4)
        self.cluster_util.set_rebalance_moves_per_nodes(
            rebalanceMovesPerNode=self.rebalance_moves_per_node)
        self.scrape_interval = self.input.param("scrape_interval", None)
        if self.scrape_interval:
            self.log.info("Changing scrape interval to {0}".format(
                self.scrape_interval))
            # Change global scrape_interval
            StatsHelper(self.cluster.master).\
                configure_stats_settings_from_diag_eval("scrape_interval",self.scrape_interval)
            # Change gloabl scrape_timeout to equal global scrape_interval
            StatsHelper(self.cluster.master).\
                configure_stats_settings_from_diag_eval("scrape_timeout",self.scrape_interval)
            # Change high cardinality services' scrape_interval
            value = "[{S, [{high_cardinality_enabled, true}, {high_cardinality_scrape_interval, %s}]} " \
                    "|| S <- [index, fts, kv, cbas, eventing]]" % self.scrape_interval
            StatsHelper(
                self.cluster.master).configure_stats_settings_from_diag_eval(
                    "services", value)

        self.doc_and_collection_ttl = self.input.param(
            "doc_and_collection_ttl", False)  # For using doc_ttl + coll_ttl
        self.skip_validations = self.input.param("skip_validations", True)

        # Services to be added on rebalance-in nodes during the volume test
        self.services_for_rebalance_in = self.input.param(
            "services_for_rebalance_in", None)

        # Initialize parameters for index querying
        self.n1ql_nodes = None
        self.number_of_indexes = self.input.param("number_of_indexes", 0)
        self.flush_buckets_before_indexes_creation = False
        if self.number_of_indexes > 0:
            self.flush_buckets_before_indexes_creation = \
                self.input.param("flush_buckets_before_indexes_creation", True)
            if self.flush_buckets_before_indexes_creation:
                self.bucket_util.flush_all_buckets(
                    self.cluster.master, skip_resetting_num_items=True)
            self.set_memory_quota_kv_index()
            self.n1ql_nodes = self.cluster_util.get_nodes_from_services_map(
                service_type="n1ql",
                get_all_nodes=True,
                servers=self.cluster.servers[:self.nodes_init],
                master=self.cluster.master)
            self.n1ql_rest_connections = list()
            for n1ql_node in self.n1ql_nodes:
                self.n1ql_rest_connections.append(RestConnection(n1ql_node))
                self.exclude_nodes.append(n1ql_node)
            self.n1ql_turn_counter = 0  # To distribute the turn of using n1ql nodes for query. Start with first node
            indexes_to_build = self.create_indexes_and_initialize_queries()
            self.build_deferred_indexes(indexes_to_build)
        self.query_thread_flag = False
        self.query_thread = None
        self.ui_stats_thread_flag = False
        self.ui_stats_thread = None
예제 #25
0
 def _get_ui_stats(self, bucket_name):
     content = StatsHelper(
         self.cluster.master).post_range_api_metrics(bucket_name)
     self.log.info(content)
     return content