def load_document_until_ram_percentage(self): self.start = 0 doc_batch_size = 5000 self.end = doc_batch_size bucket_helper = BucketHelper(self.cluster.master) mem_cap = (self.document_ram_percentage * self.bucket_ram * 1000000) while True: self.log.info("Add documents to bucket") self.perform_doc_ops_in_all_cb_buckets( "create", self.start, self.end, durability=self.durability_level) self.log.info("Calculate available free memory") bucket_json = bucket_helper.get_bucket_json(self.cb_bucket_name) mem_used = 0 for node_stat in bucket_json["nodes"]: mem_used += node_stat["interestingStats"]["mem_used"] if mem_used < mem_cap: self.log.info("Memory used: %s < %s" % (mem_used, mem_cap)) self.start = self.end self.end = self.end + doc_batch_size self.num_items = self.end else: break
def durability_succeeds(self, bucket_name, induced_error=None, failed_nodes=0): """ Determines whether the durability will fail/work based on the type of error_induced during the test and number of nodes the error is induced on. :param bucket_name: Name of the bucket used for fetching the replica value (str) :param induced_error: Error type induced during the test execution (str) :param failed_nodes: No of nodes failed due to the induced_error (int) :return durability_succeeds: Durability status for the bucket (bool) """ durability_succeeds = True bucket = BucketHelper.get_bucket_json(bucket_name) min_nodes_req = bucket["replicaNumber"] + 1 majority_value = floor(min_nodes_req / 2) + 1 if induced_error is None: if (self.cluster_len - failed_nodes) < majority_value: durability_succeeds = False else: if (self.durability == "MAJORITY" and induced_error in self.disk_error_types): durability_succeeds = True elif (self.cluster_len - failed_nodes) < majority_value: durability_succeeds = False return durability_succeeds
def load_document_until_ram_percentage(self): self.start = 0 doc_batch_size = 5000 self.end = doc_batch_size bucket_helper = BucketHelper(self.cluster.master) mem_cap = (self.document_ram_percentage * self.bucket_ram * 1000000) first = ['james', 'sharon', 'dave', 'bill', 'mike', 'steve'] profession = ['doctor', 'lawyer'] template_obj = JsonObject.create() template_obj.put("number", 0) template_obj.put("first_name", "") template_obj.put("profession", "") template_obj.put("mutated", 0) template_obj.put("mutation_type", "ADD") while True: self.log.info("Add documents to bucket") doc_gen = DocumentGenerator("test_docs", template_obj, start=self.start, end=self.end, randomize=False, first_name=first, profession=profession, number=range(70)) try: self.bucket_util.sync_load_all_buckets( self.cluster, doc_gen, "create", 0, batch_size=doc_batch_size, durability=self.durability_level, suppress_error_table=True) except Exception as e: self.fail("Following error occurred while loading bucket - {" "0}".format(str(e))) self.log.info("Calculate available free memory") bucket_json = bucket_helper.get_bucket_json(self.bucket_name) mem_used = 0 for node_stat in bucket_json["nodes"]: mem_used += node_stat["interestingStats"]["mem_used"] if mem_used < mem_cap: self.log.info("Memory used: %s < %s" % (mem_used, mem_cap)) self.start = self.end self.end = self.end + doc_batch_size self.num_items = self.end else: break