Exemplo n.º 1
0
 def test_default_moxi(self):
     bucket_name = 'default'
     rest = RestConnection(self.cluster.master)
     replicaNumber = 1
     bucket = Bucket({"name": bucket_name, "replicaNumber": replicaNumber})
     self.bucket_util.create_bucket(bucket)
     msg = 'create_bucket succeeded but bucket {0} does not exist'.format(bucket_name)
     self.assertTrue(self.bucket_util.wait_for_bucket_creation(bucket, rest), msg)
     self.sleep(5)
     #self.assertTrue(self.bucket_util.wait_for_memcached(self.cluster.master, bucket), "Wait_for_memcached failed")
     try:
         self._generate_load(bucket)
     except Exception as e:
         self.fail("unable to insert any key to memcached")
     try:
         self._validate_load(bucket)
     except Exception as e:
         self.fail("Not all values were stored successfully")
     self.bucket_util.delete_bucket(self.cluster.master, bucket)
     msg = 'bucket "{0}" was not deleted even after waiting for two minutes'.format(bucket_name)
     self.assertTrue(self.bucket_util.wait_for_bucket_deletion(bucket, rest), msg)
     self.bucket_util.create_bucket(bucket)
     msg = 'create_bucket succeeded but bucket {0} does not exist'.format(bucket_name)
     self.assertTrue(self.bucket_util.wait_for_bucket_creation(bucket, rest), msg)
     self.sleep(5)
Exemplo n.º 2
0
 def test_default_moxi(self):
     name = "default"
     replicas = [0, 1, 2, 3]
     rest = RestConnection(self.cluster.master)
     remote = RemoteMachineShellConnection(self.cluster.master)
     for replicaNumber in replicas:
         bucket = Bucket({"name": name, "replicaNumber": replicaNumber})
         self.bucket_util.create_bucket(bucket)
         msg = 'create_bucket succeeded but bucket {0} does not exist'.format(
             name)
         self.assertTrue(
             self.bucket_util.wait_for_bucket_creation(bucket, rest), msg)
         self.bucket_util.delete_bucket(self.cluster.master, bucket.name)
         msg = 'bucket "{0}" was not deleted even after waiting for two minutes'.format(
             name)
         self.assertTrue(
             self.bucket_util.wait_for_bucket_deletion(bucket, rest), msg)
         msg = 'bucket {0} data files are not deleted after bucket deleted from membase'.format(
             name)
         self.assertTrue(self.wait_for_data_files_deletion(
             name,
             remote_connection=remote,
             rest=rest,
             timeout_in_seconds=20),
                         msg=msg)
Exemplo n.º 3
0
 def test_two_replica(self):
     bucket_name = 'default'
     rest = RestConnection(self.cluster.master)
     replicaNumber = 2
     bucket = Bucket({"name": bucket_name, "replicaNumber": replicaNumber})
     self.bucket_util.create_bucket(bucket)
     msg = 'create_bucket succeeded but bucket {0} does not exist'.format(
         bucket_name)
     self.assertTrue(
         self.bucket_util.wait_for_bucket_creation(bucket, rest), msg)
Exemplo n.º 4
0
    def setUp(self):
        super(MultiDurabilityTests, self).setUp()

        self.key = 'test_docs'.rjust(self.key_size, '0')
        replica_list = self.input.param("replica_list", list())
        bucket_type_list = self.input.param("bucket_type_list", list())
        self.bucket_dict = dict()
        bucket_ram_quota = 100

        if type(replica_list) is str:
            replica_list = replica_list.split(";")
        if type(bucket_type_list) is str:
            bucket_type_list = bucket_type_list.split(";")

        for index in range(self.standard_buckets):
            self.bucket_dict[index] = dict()

            # If replica not provided, set replica value to '0'
            if len(replica_list) - 1 < index:
                self.bucket_dict[index]["replica"] = 0
            else:
                self.bucket_dict[index]["replica"] = replica_list[index]

            # If bucket_type not provided, set replica value to 'MEMBASE'
            if len(bucket_type_list) - 1 < index:
                self.bucket_dict[index]["type"] = Bucket.bucket_type.MEMBASE
            else:
                self.bucket_dict[index]["type"] = bucket_type_list[index]

            # create bucket object for creation
            bucket_obj = Bucket({
                Bucket.name:
                "bucket_{0}".format(index),
                Bucket.bucketType:
                self.bucket_dict[index]["type"],
                Bucket.ramQuotaMB:
                bucket_ram_quota,
                Bucket.replicaNumber:
                self.bucket_dict[index]["replica"],
                Bucket.compressionMode:
                "off",
                Bucket.maxTTL:
                0
            })
            self.bucket_util.create_bucket(bucket_obj)

            # Append bucket object into the bucket_info dict
            self.bucket_dict[index]["object"] = self.bucket_util.buckets[-1]
        self.log.info("=== MultiDurabilityTests base setup done ===")
Exemplo n.º 5
0
 def test_valid_length(self):
     max_len = 100
     name_len = self.input.param('name_length', 100)
     name = 'a' * name_len
     rest = RestConnection(self.cluster.master)
     replicaNumber = 1
     bucket = Bucket({"name": name, "replicaNumber": replicaNumber})
     try:
         self.bucket_util.create_bucket(bucket)
         msg = 'create_bucket succeeded but bucket {0} does not exist'.format(
             name)
         self.assertTrue(
             self.bucket_util.wait_for_bucket_creation(bucket, rest), msg)
     except BucketCreationException as ex:
         self.log.error(ex)
         self.fail('could not create bucket with valid length')
    def setUp(self):
        super(MultiDurabilityTests, self).setUp()

        self.key = 'test_docs'.rjust(self.key_size, '0')
        replica_list = self.input.param("replica_list", list())
        bucket_type_list = self.input.param("bucket_type_list", list())
        self.bucket_dict = dict()
        tasks = dict()

        # Create cluster
        nodes_init = self.cluster.servers[1:self.nodes_init] \
            if self.nodes_init != 1 else []
        self.task.rebalance([self.cluster.master], nodes_init, [])
        self.cluster.nodes_in_cluster.extend([self.cluster.master]+nodes_init)
        self.bucket_util.add_rbac_user()

        rest = RestConnection(self.cluster.master)
        info = rest.get_nodes_self()
        if info.memoryQuota < 450.0:
            self.fail("At least 450MB of memory required")
        else:
            available_ram = info.memoryQuota * (2.0 / 3.0)
            if available_ram / self.standard_buckets > 100:
                bucket_ram_quota = int(available_ram / self.standard_buckets)
            else:
                bucket_ram_quota = 100

        if type(replica_list) is str:
            replica_list = replica_list.split(";")
        if type(bucket_type_list) is str:
            bucket_type_list = bucket_type_list.split(";")

        for index in range(self.standard_buckets):
            self.bucket_dict[index] = dict()

            # If replica not provided, set replica value to '0'
            if len(replica_list)-1 < index:
                self.bucket_dict[index]["replica"] = 0
            else:
                self.bucket_dict[index]["replica"] = int(replica_list[index])

            # If bucket_type not provided, set replica value to 'MEMBASE'
            if len(bucket_type_list)-1 < index:
                self.bucket_dict[index]["type"] = Bucket.bucket_type.MEMBASE
            else:
                self.bucket_dict[index]["type"] = bucket_type_list[index]

            # create bucket object for creation
            bucket = Bucket(
                {Bucket.name: "bucket_{0}".format(index),
                 Bucket.bucketType: self.bucket_dict[index]["type"],
                 Bucket.ramQuotaMB: bucket_ram_quota,
                 Bucket.replicaNumber: self.bucket_dict[index]["replica"],
                 Bucket.compressionMode: "off",
                 Bucket.maxTTL: 0})
            tasks[bucket] = self.bucket_util.async_create_bucket(bucket)

            # Append bucket object into the bucket_info dict
            self.bucket_dict[index]["object"] = bucket

        raise_exception = None
        for bucket, task in tasks.items():
            self.task_manager.get_task_result(task)
            if task.result:
                self.sleep(2)
                warmed_up = self.bucket_util._wait_warmup_completed(
                    self.cluster_util.get_kv_nodes(), bucket, wait_time=60)
                if not warmed_up:
                    task.result = False
                    raise_exception = "Bucket %s not warmed up" % bucket.name

            if task.result:
                self.bucket_util.buckets.append(bucket)
            self.task_manager.stop_task(task)

        if raise_exception:
            raise Exception("Create bucket failed: %s" % raise_exception)
        self.cluster_util.print_cluster_stats()
        self.bucket_util.print_bucket_stats()
        self.log.info("=== MultiDurabilityTests base setup done ===")
Exemplo n.º 7
0
    def test_database_fragmentation(self):
        new_port = self.input.param("new_port", "9090")
        self.err = None
        self.bucket_util.delete_all_buckets(self.cluster.servers)
        percent_threshold = self.autocompaction_value
        bucket_name = "default"
        MAX_RUN = 100
        item_size = 1024
        update_item_size = item_size * ((float(100 - percent_threshold)) / 100)
        serverInfo = self.servers[0]

        self.log.info("Creating Rest connection to {0}".format(serverInfo))
        rest = RestConnection(serverInfo)
        remote_client = RemoteMachineShellConnection(serverInfo)
        output, rq_content, _ = rest.set_auto_compaction(
            "false",
            dbFragmentThresholdPercentage=percent_threshold,
            viewFragmntThresholdPercentage=None)

        if not output and (percent_threshold <= MIN_COMPACTION_THRESHOLD
                           or percent_threshold >= MAX_COMPACTION_THRESHOLD):
            self.assertFalse(
                output,
                "it should be  impossible to set compaction value = {0}%".
                format(percent_threshold))
            self.assertTrue("errors" in json.loads(rq_content),
                            "Error is not present in response")
            self.assertTrue(
                str(json.loads(rq_content)["errors"]).find(
                    "Allowed range is 2 - 100") > -1,
                "Error 'Allowed range is 2 - 100' expected, but was '{0}'".
                format(str(json.loads(rq_content)["errors"])))
            self.log.info(
                "Response contains error = '%(errors)s' as expected" %
                json.loads(rq_content))
        elif (output and percent_threshold >= MIN_COMPACTION_THRESHOLD
              and percent_threshold <= MAX_RUN):
            node_ram_ratio = self.bucket_util.base_bucket_ratio(self.servers)
            info = rest.get_nodes_self()
            available_ram = info.memoryQuota * (node_ram_ratio) / 2
            items = (int(available_ram * 1000) / 2) / item_size
            self.log.info("ITEMS =============%s".format(items))

            bucket_obj = Bucket({
                Bucket.name: bucket_name,
                Bucket.ramQuotaMB: int(available_ram),
                Bucket.replicaNumber: self.num_replicas
            })
            self.bucket_util.create_bucket(bucket_obj)
            self.bucket_util.wait_for_memcached(serverInfo, bucket_obj)
            self.bucket_util.wait_for_vbuckets_ready_state(
                serverInfo, bucket_obj)

            self.log.info(
                "******start to load {0}K keys with {1} bytes/key".format(
                    items, item_size))
            generator = BlobGenerator('compact',
                                      'compact-',
                                      int(item_size),
                                      start=0,
                                      end=(items * 1000))
            for bucket in self.bucket_util.buckets:
                task = self.task.async_load_gen_docs(self.cluster,
                                                     bucket,
                                                     generator,
                                                     "create",
                                                     0,
                                                     batch_size=10,
                                                     process_concurrency=8)
                self.task.jython_task_manager.get_task_result(task)
            self.log.info("sleep 10 seconds before the next run")
            time.sleep(10)

            self.log.info(
                "********start to update {0}K keys with smaller value {1} bytes/key"
                .format(items, int(update_item_size)))
            generator_update = BlobGenerator('compact',
                                             'compact-',
                                             int(update_item_size),
                                             start=0,
                                             end=(items * 1000))
            if self.during_ops:
                if self.during_ops == "change_port":
                    self.change_port(new_port=new_port)
                    self.cluster.master.port = new_port
                elif self.during_ops == "change_password":
                    old_pass = self.cluster.master.rest_password
                    self.change_password(new_password=self.input.param(
                        "new_password", "new_pass"))
                    self.cluster.master.rest_password = self.input.param(
                        "new_password", "new_pass")
                rest = RestConnection(self.cluster.master)
            insert_thread = Thread(target=self.load,
                                   name="insert",
                                   args=(self.cluster.master,
                                         self.autocompaction_value,
                                         bucket_name, generator_update))
            try:
                self.log.info('starting the load thread')
                insert_thread.start()
                compact_run = remote_client.wait_till_compaction_end(
                    rest,
                    bucket_name,
                    timeout_in_seconds=(self.wait_timeout * 10))

                if not compact_run:
                    self.fail("auto compaction does not run")
                elif compact_run:
                    self.log.info("auto compaction run successfully")
            except Exception, ex:
                self.log.info("exception in auto compaction")
                if self.during_ops:
                    if self.during_ops == "change_password":
                        self.change_password(new_password=old_pass)
                    elif self.during_ops == "change_port":
                        self.change_port(new_port='8091',
                                         current_port=new_port)
                if str(ex).find("enospc") != -1:
                    self.is_crashed.set()
                    self.log.error(
                        "Disk is out of space, unable to load more data")
                    insert_thread._Thread__stop()
                else:
                    insert_thread._Thread__stop()
                    raise ex
            else:
                insert_thread.join()
                if self.err is not None:
                    self.fail(self.err)