예제 #1
0
 def test_advance_bucket_op_with_expiry(self):
     self.load(self.gens_load,
               buckets=self.src_bucket,
               flag=self.item_flag,
               verify_data=False,
               batch_size=self.batch_size,
               exp=120)
     # set expiry pager interval
     ClusterOperationHelper.flushctl_set(self.master,
                                         "exp_pager_stime",
                                         3,
                                         bucket=self.src_bucket_name)
     body = self.create_save_function_body(self.function_name,
                                           self.handler_code)
     body['depcfg']['buckets'].append({
         "alias": self.src_bucket_name,
         "bucket_name": self.src_bucket_name,
         "access": "rw"
     })
     self.rest.create_function(body['appname'], body, self.function_scope)
     self.deploy_function(body)
     self.verify_eventing_results(self.function_name,
                                  self.docs_per_day * 2016,
                                  skip_stats_validation=True)
     self.verify_eventing_results(self.function_name,
                                  0,
                                  skip_stats_validation=True)
     self.undeploy_and_delete_function(body)
예제 #2
0
 def test_expiry_mutation_for_dcp_stream_boundary_from_now(self):
     # set expiry pager interval
     ClusterOperationHelper.flushctl_set(self.master, "exp_pager_stime", 1, bucket=self.src_bucket_name)
     body = self.create_save_function_body(self.function_name,"handler_code/ABO/insert_exp_delete_only.js",
                                           dcp_stream_boundary="from_now")
     if self.non_default_collection:
         self.load_data_to_collection(self.docs_per_day*self.num_docs,"src_bucket.src_bucket.src_bucket")
     else:
         self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default")
     self.deploy_function(body)
     if self.non_default_collection:
         self.verify_doc_count_collections("dst_bucket.dst_bucket.dst_bucket", 0)
     else:
         self.verify_doc_count_collections("dst_bucket._default._default", 0)
     ### update all the documents with expiry
     if self.non_default_collection:
         self.load_data_to_collection(self.docs_per_day*self.num_docs,"src_bucket.src_bucket.src_bucket",is_update=True,expiry=10)
     else:
         self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default",is_update=True,expiry=10)
     # Wait for eventing to catch up with all the expiry mutations and verify results
     if self.non_default_collection:
         self.verify_doc_count_collections("dst_bucket.dst_bucket.dst_bucket", self.docs_per_day * self.num_docs)
     else:
         self.verify_doc_count_collections("dst_bucket._default._default", self.docs_per_day * self.num_docs)
     self.undeploy_and_delete_function(body)
예제 #3
0
 def _expiry_pager(self, master):
     buckets = self._get_cluster_buckets(master)
     for bucket in buckets:
         ClusterOperationHelper.flushctl_set(master, "exp_pager_stime", 10,
                                             bucket)
         self._log.info("wait for expiry pager to run on all these nodes")
         time.sleep(30)
 def test_expired_mutation_pause_resume(self):
     self.load(self.gens_load,
               buckets=self.src_bucket,
               flag=self.item_flag,
               verify_data=False,
               batch_size=self.batch_size,
               exp=1)
     # set expiry pager interval
     ClusterOperationHelper.flushctl_set(self.master,
                                         "exp_pager_stime",
                                         10,
                                         bucket=self.src_bucket_name)
     body = self.create_save_function_body(
         self.function_name,
         "handler_code/bucket_op_expired.js",
         worker_count=3)
     self.deploy_function(body)
     self.pause_function(body)
     self.sleep(5)
     self.resume_function(body)
     # Wait for eventing to catch up with all the expiry mutations and verify results
     self.verify_eventing_results(self.function_name,
                                  0,
                                  on_delete=True,
                                  skip_stats_validation=True)
     self.undeploy_and_delete_function(body)
예제 #5
0
 def test_expiry_mutation_for_dcp_stream_boundary_from_beginning(self):
     self.load(load_gen=self.gens_load, bucket=self.src_bucket)
     # set expiry pager interval
     ClusterOperationHelper.flushctl_set(self.master, "exp_pager_stime", 1, bucket=self.src_bucket_name)
     body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_ON_DELETE, worker_count=3)
     self.deploy_function(body)
     # Wait for eventing to catch up with all the expiry mutations and verify results
     self.verify_eventing_results(self.function_name, self.docs_per_day * 2016, on_delete=True)
     self.undeploy_and_delete_function(body)
예제 #6
0
 def test_expiry_mutation_for_dcp_stream_boundary_from_beginning(self):
     self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
               batch_size=self.batch_size, exp=1)
     # set expiry pager interval
     ClusterOperationHelper.flushctl_set(self.master, "exp_pager_stime", 1, bucket=self.src_bucket_name)
     body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_ON_DELETE, worker_count=3)
     self.deploy_function(body)
     # Wait for eventing to catch up with all the expiry mutations and verify results
     self.verify_eventing_results(self.function_name, self.docs_per_day * 2016, on_delete=True)
     self.undeploy_and_delete_function(body)
예제 #7
0
 def test_expired_with_cancel_timer(self):
     self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
               batch_size=self.batch_size, exp=60)
     # set expiry pager interval
     ClusterOperationHelper.flushctl_set(self.master, "exp_pager_stime", 10, bucket=self.src_bucket_name)
     body = self.create_save_function_body(self.function_name, "handler_code/cancel_timer_with_expiry.js")
     self.deploy_function(body)
     # Wait for eventing to catch up with all the delete mutations and verify results
     self.verify_eventing_results(self.function_name, 0, skip_stats_validation=True)
     self.undeploy_and_delete_function(body)
예제 #8
0
 def test_warmup(self):
     ep_threshold = self.input.param("ep_threshold", "ep_mem_low_wat")
     active_resident_threshold = int(
         self.input.param("active_resident_threshold", 110))
     access_log_time = self.input.param("access_log_time", 2)
     mc = MemcachedClientHelper.direct_client(self.servers[0],
                                              self.bucket_name)
     stats = mc.stats()
     threshold = int(self.input.param('threshold', stats[ep_threshold]))
     threshold_reached = False
     self.num_items = self.input.param("items", 10000)
     self._load_doc_data_all_buckets('create')
     #load items till reached threshold or mem-ratio is less than resident ratio threshold
     while not threshold_reached:
         mem_used = int(mc.stats()["mem_used"])
         if mem_used < threshold or int(mc.stats(
         )["vb_active_perc_mem_resident"]) >= active_resident_threshold:
             self.log.info(
                 "mem_used and vb_active_perc_mem_resident_ratio reached at %s/%s and %s "
                 % (mem_used, threshold,
                    mc.stats()["vb_active_perc_mem_resident"]))
             items = self.num_items
             self.num_items += self.input.param("items", 10000)
             self._load_doc_data_all_buckets('create', items)
         else:
             threshold_reached = True
             self.log.info("DGM state achieved!!!!")
     #parallel load of data
     items = self.num_items
     self.num_items += 10000
     tasks = self._async_load_doc_data_all_buckets('create', items)
     #wait for draining of data before restart and warm up
     rest = RestConnection(self.servers[0])
     self.servers = rest.get_nodes()
     self._wait_for_stats_all_buckets(self.servers)
     self._stats_befor_warmup()
     for task in tasks:
         task.result()
     #If warmup is done through access log then run access scanner
     if self.access_log:
         scanner_runs = int(mc.stats()["ep_num_access_scanner_runs"])
         self.log.info("setting access scanner time %s minutes" %
                       access_log_time)
         self.log.info("current access scanner run is %s" % scanner_runs)
         ClusterOperationHelper.flushctl_set(self.servers[0],
                                             "alog_sleep_time",
                                             access_log_time,
                                             self.bucket_name)
         if not self._wait_for_access_run(access_log_time, scanner_runs,
                                          mc):
             self.fail("Not able to create access log within %s" %
                       access_log_time)
     self._restart_memcache()
     if self._warmup():
         self._load_doc_data_all_buckets('update', self.num_items - items)
예제 #9
0
    def _create_access_log(self):
        stats_all_buckets = {}
        for bucket in self.buckets:
            stats_all_buckets[bucket.name] = StatsCommon()

        for bucket in self.buckets:
            for server in self.servers:
                scanner_runs = stats_all_buckets[bucket.name].get_stats([server], bucket, '', 'ep_num_access_scanner_runs')[server]
                self.log.info("current access scanner run for %s in bucket %s is %s times" % (server.ip, bucket.name, scanner_runs))
                self.log.info("setting access scanner time %s minutes for %s in bucket %s" % (self.access_log_time, server.ip, bucket.name))
                ClusterOperationHelper.flushctl_set(server, "alog_sleep_time", self.access_log_time , bucket.name)
                if not self._wait_for_access_run(self.access_log_time, scanner_runs, server, bucket, stats_all_buckets[bucket.name]):
                    self.fail("Not able to create access log within %s minutes" % self.access_log_time)
예제 #10
0
    def _create_access_log(self):
        stats_all_buckets = {}
        for bucket in self.buckets:
            stats_all_buckets[bucket.name] = StatsCommon()

        for bucket in self.buckets:
            for server in self.servers:
                scanner_runs = stats_all_buckets[bucket.name].get_stats([server], bucket, '', 'ep_num_access_scanner_runs')[server]
                self.log.info("current access scanner run for %s in bucket %s is %s times" % (server.ip, bucket.name, scanner_runs))
                self.log.info("setting access scanner time %s minutes for %s in bucket %s" % (self.access_log_time, server.ip, bucket.name))
                ClusterOperationHelper.flushctl_set(server, "alog_sleep_time", self.access_log_time , bucket.name)
                if not self._wait_for_access_run(self.access_log_time, scanner_runs, server, bucket, stats_all_buckets[bucket.name]):
                    self.fail("Not able to create access log within %s minutes" % self.access_log_time)
예제 #11
0
 def test_expired_mutation(self):
     if self.non_default_collection:
         self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket.src_bucket.src_bucket", expiry=100, wait_for_loading=False)
     else:
         self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default", expiry=100, wait_for_loading=False)
     # set expiry pager interval
     ClusterOperationHelper.flushctl_set(self.master, "exp_pager_stime", 1, bucket=self.src_bucket_name)
     body = self.create_save_function_body(self.function_name, "handler_code/bucket_op_expired.js")
     self.deploy_function(body)
     # Wait for eventing to catch up with all the expiry mutations and verify results
     if self.non_default_collection:
         self.verify_doc_count_collections("src_bucket.src_bucket.src_bucket", 0)
     else:
         self.verify_doc_count_collections("src_bucket._default._default", 0)
     self.undeploy_and_delete_function(body)
예제 #12
0
 def setUp(self):
     super(EventingUpgrade, self).setUp()
     self.rest = RestConnection(self.master)
     self.server = self.master
     self.queue = queue.Queue()
     self.src_bucket_name = self.input.param('src_bucket_name', 'src_bucket')
     self.eventing_log_level = self.input.param('eventing_log_level', 'INFO')
     self.dst_bucket_name = self.input.param('dst_bucket_name', 'dst_bucket')
     self.dst_bucket_name1 = self.input.param('dst_bucket_name1', 'dst_bucket1')
     self.dst_bucket_curl = self.input.param('dst_bucket_curl', 'dst_bucket_curl')
     self.source_bucket_mutation = self.input.param('source_bucket_mutation', 'source_bucket_mutation')
     self.metadata_bucket_name = self.input.param('metadata_bucket_name', 'metadata')
     self.n1ql_op_dst=self.input.param('n1ql_op_dst', 'n1ql_op_dst')
     self.gens_load = self.generate_docs(self.docs_per_day)
     self.upgrade_version = self.input.param("upgrade_version")
     ClusterOperationHelper.flushctl_set(self.master, "exp_pager_stime", 60, bucket=self.src_bucket_name)
예제 #13
0
    def test_warmup(self):
        ep_threshold = self.input.param("ep_threshold", "ep_mem_low_wat")
        active_resident_threshold = int(self.input.param("active_resident_threshold", 110))
        access_log_time = self.input.param("access_log_time", 2)
        mc = MemcachedClientHelper.direct_client(self.servers[0], self.bucket_name)
        stats = mc.stats()
        threshold = int(self.input.param('threshold', stats[ep_threshold]))
        threshold_reached = False
        self.num_items = self.input.param("items", 10000)
        self._load_doc_data_all_buckets('create')

        # load items till reached threshold or mem-ratio is less than resident ratio threshold
        while not threshold_reached :
            mem_used = int(mc.stats()["mem_used"])
            if mem_used < threshold or int(mc.stats()["vb_active_perc_mem_resident"]) >= active_resident_threshold:
                self.log.info("mem_used and vb_active_perc_mem_resident_ratio reached at %s/%s and %s " % (mem_used, threshold, mc.stats()["vb_active_perc_mem_resident"]))
                items = self.num_items
                self.num_items += self.input.param("items", 10000)
                self._load_doc_data_all_buckets('create', items)
            else:
                threshold_reached = True
                self.log.info("DGM state achieved!!!!")
        # parallel load of data
        items = self.num_items
        self.num_items += 10000
        tasks = self._async_load_doc_data_all_buckets('create', items)
        # wait for draining of data before restart and warm up
        rest = RestConnection(self.servers[0])
        self.nodes_server = rest.get_nodes()
        self._wait_for_stats_all_buckets(self.nodes_server)
        self._stats_befor_warmup()
        for task in tasks:
            task.result()
        # If warmup is done through access log then run access scanner
        if self.access_log :
            scanner_runs = int(mc.stats()["ep_num_access_scanner_runs"])
            self.log.info("setting access scanner time %s minutes" % access_log_time)
            self.log.info("current access scanner run is %s" % scanner_runs)
            ClusterOperationHelper.flushctl_set(self.nodes_server[0], "alog_sleep_time", access_log_time , self.bucket_name)
            if not self._wait_for_access_run(access_log_time, scanner_runs, mc):
                self.fail("Not able to create access log within %s" % access_log_time)
        self._restart_memcache()
        if self._warmup():
            self._load_doc_data_all_buckets('update', self.num_items - items)
예제 #14
0
 def test_advance_bucket_op(self):
     if self.non_default_collection:
         self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket.src_bucket.src_bucket",
                                      expiry=300)
     else:
         self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default", expiry=300)
     # set expiry pager interval
     ClusterOperationHelper.flushctl_set(self.master, "exp_pager_stime", 3, bucket=self.src_bucket_name)
     body = self.create_save_function_body(self.function_name, "handler_code/ABO/curl_timer_insert.js")
     if self.non_default_collection:
         body['depcfg']['buckets'].append({"alias": self.src_bucket_name, "bucket_name": self.src_bucket_name,
                                           "scope_name":self.src_bucket_name,"collection_name":self.src_bucket_name})
     else:
         body['depcfg']['buckets'].append({"alias": self.src_bucket_name, "bucket_name": self.src_bucket_name})
     self.rest.create_function(body['appname'], body, self.function_scope)
     self.deploy_function(body)
     if self.non_default_collection:
         self.verify_doc_count_collections("dst_bucket.dst_bucket.dst_bucket", self.docs_per_day * self.num_docs)
         self.verify_doc_count_collections("dst_bucket.dst_bucket.dst_bucket", 0)
     else:
         self.verify_doc_count_collections("dst_bucket._default._default", self.docs_per_day * self.num_docs)
         self.verify_doc_count_collections("dst_bucket._default._default", 0)
     self.undeploy_and_delete_function(body)
예제 #15
0
    def replication_verification(master, bucket_data, replica, test, failed_over=False):
        asserts = []
        rest = RestConnection(master)
        buckets = rest.get_buckets()
        nodes = rest.node_statuses()
        test.log.info("expect {0} / {1} replication ? {2}".format(len(nodes),
            (1.0 + replica), len(nodes) / (1.0 + replica)))
        for bucket in buckets:
            ClusterOperationHelper.flushctl_set(master, "exp_pager_stime", 30, bucket.name)
        if len(nodes) / (1.0 + replica) >= 1:
            final_replication_state = RestHelper(rest).wait_for_replication(300)
            msg = "replication state after waiting for up to 5 minutes : {0}"
            test.log.info(msg.format(final_replication_state))
            #run expiry_pager on all nodes before doing the replication verification
            for bucket in buckets:
                ClusterOperationHelper.flushctl_set(master, "exp_pager_stime", 30, bucket.name)
                test.log.info("wait for expiry pager to run on all these nodes")
                time.sleep(30)
                ClusterOperationHelper.flushctl_set(master, "exp_pager_stime", 3600, bucket.name)
                ClusterOperationHelper.flushctl_set(master, "exp_pager_stime", 30, bucket.name)
                # windows need more than 15 minutes to get number matched
                replica_match = RebalanceHelper.wait_till_total_numbers_match(bucket=bucket.name,
                    master=master,
                    timeout_in_seconds=600)
                if not replica_match:
                    asserts.append("replication was completed but sum(curr_items) don't match the curr_items_total %s" %
                                   bucket.name)
                if not failed_over:
                    stats = rest.get_bucket_stats(bucket=bucket.name)
                    RebalanceHelper.print_taps_from_all_nodes(rest, bucket.name)
                    msg = "curr_items : {0} is not equal to actual # of keys inserted : {1} : bucket: {2}"

                    if bucket_data[bucket.name]['kv_store'] is None:
                        items_inserted = bucket_data[bucket.name]["items_inserted_count"]
                    else:
                        items_inserted = len(bucket_data[bucket.name]['kv_store'].valid_items())

                    active_items_match = stats["curr_items"] == items_inserted
                    if not active_items_match:
                        asserts.append(msg.format(stats["curr_items"], items_inserted, bucket.name))

        if len(asserts) > 0:
            for msg in asserts:
                test.log.error(msg)
            test.assertTrue(len(asserts) == 0, msg=asserts)
 def run_expiry_pager(self, ts=15):
     ClusterOperationHelper.flushctl_set(self.master, "exp_pager_stime", ts)
     self.log.info("wait for expiry pager to run on all these nodes")
예제 #17
0
    def test_document_expiry_with_overlapping_filters_between_datasets(self):
        """
        1. Create default bucket
        2. Load data with profession doctor and lawyer
        3. Load data with profession teacher
        4. Create dataset with no filters
        5. Create filter dataset that holds data with profession teacher
        6. Verify the dataset count
        7. Delete half the documents with profession teacher
        8. Verify the updated dataset count
        9. Expire data with profession teacher
        10. Verify the updated dataset count post expiry
        """
        self.log.info("Set expiry pager on default bucket")
        ClusterOperationHelper.flushctl_set(self.master,
                                            "exp_pager_stime",
                                            1,
                                            bucket="default")

        self.log.info("Load data in the default bucket")
        num_items = self.input.param("items", 10000)
        batch_size = self.input.param("batch_size", 10000)
        self.perform_doc_ops_in_all_cb_buckets(self.num_items,
                                               "create",
                                               0,
                                               num_items,
                                               exp=0,
                                               batch_size=batch_size)

        self.log.info(
            "Load data in the default bucket, with documents containing profession teacher"
        )
        load_gen = CBASBacklogIngestion.generate_documents(num_items,
                                                           num_items * 2,
                                                           role=['teacher'])
        self._async_load_all_buckets(server=self.master,
                                     kv_gen=load_gen,
                                     op_type="create",
                                     exp=0,
                                     batch_size=batch_size)

        self.log.info("Create primary index")
        query = "CREATE PRIMARY INDEX ON {0} using gsi".format(
            self.buckets[0].name)
        self.rest.query_tool(query)

        self.log.info("Create a connection")
        cb_bucket_name = self.input.param("cb_bucket_name")
        self.cbas_util.createConn(cb_bucket_name)

        self.log.info("Create a CBAS bucket")
        cbas_bucket_name = self.input.param("cbas_bucket_name")
        self.cbas_util.create_bucket_on_cbas(cbas_bucket_name=cbas_bucket_name,
                                             cb_bucket_name=cb_bucket_name)

        self.log.info("Create a default data-set")
        cbas_dataset_name = self.input.param("cbas_dataset_name")
        self.cbas_util.create_dataset_on_bucket(
            cbas_bucket_name=cb_bucket_name,
            cbas_dataset_name=cbas_dataset_name)

        self.log.info("Read input params for field name and value")
        field = self.input.param("where_field", "")
        value = self.input.param("where_value", "")
        cbas_dataset_with_clause = cbas_dataset_name + "_" + value

        self.log.info("Create data-set with profession teacher")
        self.cbas_util.create_dataset_on_bucket(
            cbas_bucket_name=cb_bucket_name,
            cbas_dataset_name=cbas_dataset_with_clause,
            where_field=field,
            where_value=value)

        secondary_index = self.input.param("secondary_index", False)
        datasets = [cbas_dataset_name, cbas_dataset_with_clause]
        index_fields = self.input.param("index_fields", None)
        if secondary_index:
            self.log.info("Create secondary index")
            index_fields = ""
            for index_field in self.index_fields:
                index_fields += index_field + ","
                index_fields = index_fields[:-1]
            for dataset in datasets:
                create_idx_statement = "create index {0} on {1}({2});".format(
                    dataset + "_idx", dataset, index_fields)
                status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
                    create_idx_statement)

                self.assertTrue(status == "success",
                                "Create Index query failed")
                self.assertTrue(
                    self.cbas_util.verify_index_created(
                        dataset + "_idx", self.index_fields, dataset)[0])

        self.log.info("Connect to CBAS bucket")
        self.cbas_util.connect_to_bucket(
            cbas_bucket_name=cbas_bucket_name,
            cb_bucket_password=self.cb_bucket_password)

        self.log.info("Wait for ingestion to complete on both data-sets")
        self.cbas_util.wait_for_ingestion_complete([cbas_dataset_name],
                                                   num_items * 2)
        self.cbas_util.wait_for_ingestion_complete([cbas_dataset_with_clause],
                                                   num_items)

        self.log.info("Validate count on data-set")
        self.assertTrue(
            self.cbas_util.validate_cbas_dataset_items_count(
                cbas_dataset_name, num_items * 2))
        self.assertTrue(
            self.cbas_util.validate_cbas_dataset_items_count(
                cbas_dataset_with_clause, num_items))

        self.log.info("Delete half of the teacher records")
        self.perform_doc_ops_in_all_cb_buckets(num_items // 2, "delete",
                                               num_items + (num_items // 2),
                                               num_items * 2)

        self.log.info("Wait for ingestion to complete")
        self.cbas_util.wait_for_ingestion_complete(
            [cbas_dataset_name], num_items + (num_items // 2))
        self.cbas_util.wait_for_ingestion_complete([cbas_dataset_with_clause],
                                                   num_items // 2)

        self.log.info("Validate count on data-set")
        self.assertTrue(
            self.cbas_util.validate_cbas_dataset_items_count(
                cbas_dataset_name, num_items + (num_items // 2)))
        self.assertTrue(
            self.cbas_util.validate_cbas_dataset_items_count(
                cbas_dataset_with_clause, num_items // 2))

        self.log.info(
            "Update the documents with profession teacher to expire in next 1 seconds"
        )
        self.perform_doc_ops_in_all_cb_buckets(num_items // 2,
                                               "update",
                                               num_items,
                                               num_items + (num_items // 2),
                                               exp=1)

        self.log.info("Wait for documents to expire")
        self.sleep(15, message="Waiting for documents to expire")

        self.log.info("Wait for ingestion to complete")
        self.cbas_util.wait_for_ingestion_complete([cbas_dataset_name],
                                                   num_items)
        self.cbas_util.wait_for_ingestion_complete([cbas_dataset_with_clause],
                                                   0)

        self.log.info("Validate count on data-set")
        self.assertTrue(
            self.cbas_util.validate_cbas_dataset_items_count(
                cbas_dataset_name, num_items))
        self.assertTrue(
            self.cbas_util.validate_cbas_dataset_items_count(
                cbas_dataset_with_clause, 0))
예제 #18
0
 def run_expiry_pager(self, ts = 15):
     ClusterOperationHelper.flushctl_set(self.master, "exp_pager_stime", ts)
     self.log.info("wait for expiry pager to run on all these nodes")
예제 #19
0
 def _set_checkpoint_timeout(self, servers, bucket, time):
     ClusterOperationHelper.flushctl_set(servers[0], 'chk_period', time,
                                         bucket)
예제 #20
0
 def _set_checkpoint_size(self, servers, bucket, size):
     ClusterOperationHelper.flushctl_set(servers[0], 'chk_max_items', size,
                                         bucket)
예제 #21
0
 def _expiry_pager(self, master):
     buckets = self._get_cluster_buckets(master)
     for bucket in buckets:
         ClusterOperationHelper.flushctl_set(master, "exp_pager_stime", 10, bucket)
         self._log.info("wait for expiry pager to run on all these nodes")
         time.sleep(30)
예제 #22
0
 def _set_checkpoint_timeout(self, servers, bucket, time):
     ClusterOperationHelper.flushctl_set(servers[0], 'chk_period', time, bucket)
예제 #23
0
 def _set_checkpoint_size(self, servers, bucket, size):
     ClusterOperationHelper.flushctl_set(servers[0], 'chk_max_items', size, bucket)