Beispiel #1
0
 def test_load_regexp(self):
     template = '{{ "mutated" : 0, "age": {0}, "first_name": "{1}" }}'
     gen_load = DocumentGenerator('load_by_id_test', template, list(range(5)),
                                   ['james', 'john'], start=0, end=self.num_items)
     gen_load2 = DocumentGenerator('cbtransfer', template, list(range(5)),
                                   ['james', 'john'], start=0, end=self.num_items)
     verify_gen = copy.deepcopy(gen_load2)
     for bucket in self.buckets:
         bucket.kvs[2] = KVStore()
         self.cluster.load_gen_docs(self.server_origin, bucket.name, gen_load,
                                    self.buckets[0].kvs[2], "create", exp=0, flag=0,
                                    only_store_hash=True,
                                    batch_size=1000, compression=self.sdk_compression)
         self.cluster.load_gen_docs(self.server_origin, bucket.name, gen_load2,
                                    self.buckets[0].kvs[1], "create", exp=0, flag=0,
                                    only_store_hash=True,
                                    batch_size=1000, compression=self.sdk_compression)
     transfer_source = 'http://%s:%s' % (self.server_origin.ip, self.server_origin.port)
     transfer_destination = 'http://%s:%s' % (self.server_recovery.ip,
                                              self.server_recovery.port)
     self._run_cbtransfer_all_buckets(transfer_source, transfer_destination,
                                      "-k cbtransfer-[0-9]+ -u {0} -p {1}"\
                                      .format(self.server_recovery.rest_username,
                                              self.server_recovery.rest_password))
     self._wait_curr_items_all_buckets()
     self._verify_data_all_buckets(verify_gen)
Beispiel #2
0
    def __init__(self, new_params={}):
        self.name = new_params.get(Bucket.name, "default")
        self.bucketType = new_params.get(Bucket.bucketType,
                                         Bucket.bucket_type.MEMBASE)
        self.replicaNumber = new_params.get(Bucket.replicaNumber, 0)
        self.replicaServers = new_params.get(Bucket.replicaServers, [])
        self.ramQuotaMB = new_params.get(Bucket.ramQuotaMB, 100)
        self.kvs = {1: KVStore()}

        if self.bucketType == Bucket.bucket_type.EPHEMERAL:
            self.evictionPolicy = new_params.get(
                Bucket.evictionPolicy,
                Bucket.bucket_eviction_policy.NO_EVICTION)
        else:
            self.evictionPolicy = new_params.get(
                Bucket.evictionPolicy,
                Bucket.bucket_eviction_policy.VALUE_ONLY)

        self.replicaIndex = new_params.get(Bucket.replicaIndex, 0)
        self.priority = new_params.get(Bucket.priority, None)
        self.threadsNumber = new_params.get(Bucket.threadsNumber, 3)
        self.uuid = None
        self.lww = new_params.get(Bucket.lww, False)
        self.maxTTL = new_params.get(Bucket.maxTTL, 0)
        self.flushEnabled = new_params.get(Bucket.flushEnabled, 1)
        self.compressionMode = new_params.get(
            Bucket.compressionMode, Bucket.bucket_compression_mode.PASSIVE)
        self.nodes = None
        self.stats = None
        self.servers = []
        self.vbuckets = []
        self.forward_map = []
Beispiel #3
0
 def incremental_rebalance_out_with_mutation_and_deletion(self):
     gen_2 = BlobGenerator('rebalance-del',
                           'rebalance-del-',
                           self.value_size,
                           start=self.num_items // 2 + 2000,
                           end=self.num_items)
     batch_size = 1000
     for i in reversed(list(range(self.num_servers))[1:]):
         # don't use batch for rebalance out 2-1 nodes
         for bucket in self.buckets:
             bucket.kvs[2] = KVStore()
         tasks = [
             self.cluster.async_rebalance(self.servers[:i], [],
                                          [self.servers[i]])
         ]
         tasks += self._async_load_all_buckets(self.master,
                                               self.gen_update,
                                               "update",
                                               0,
                                               kv_store=1,
                                               batch_size=batch_size,
                                               timeout_secs=60)
         tasks += self._async_load_all_buckets(self.master,
                                               gen_2,
                                               "delete",
                                               0,
                                               kv_store=2,
                                               batch_size=batch_size,
                                               timeout_secs=60)
         for task in tasks:
             task.result()
         self.sleep(5)
         self._load_all_buckets(self.master, gen_2, "create", 0)
         self.verify_cluster_stats(self.servers[:i])
     self.verify_unacked_bytes_all_buckets()
Beispiel #4
0
 def online_upgrade_rebalance_in_out(self):
     self._install(self.servers[:self.nodes_init])
     self.operations(self.servers[:self.nodes_init])
     seqno_expected = 1
     seqno_comparator = '>='
     if not self.initial_version.startswith("1.") and self.input.param(
             'check_seqno', True):
         self.check_seqno(seqno_expected)
         seqno_comparator = '=='
     if self.ddocs_num:
         self.create_ddocs_and_views()
     self.sleep(self.sleep_time,
                "Pre-setup of old version is done. Wait for upgrade")
     self.initial_version = self.upgrade_versions[0]
     self.product = 'couchbase-server'
     self._install(self.servers[self.nodes_init:self.num_servers])
     self.sleep(self.sleep_time,
                "Installation of new version is done. Wait for rebalance")
     if self.during_ops:
         for opn in self.during_ops:
             getattr(self, opn)()
     if self.wait_expire:
         self.sleep(self.expire_time)
         for bucket in self.buckets:
             bucket.kvs[1] = KVStore()
     self.online_upgrade()
     self.sleep(self.sleep_time)
     if self.input.param('reboot_cluster', False):
         self.warm_up_node(self.servers[self.nodes_init:self.num_servers])
     self.verification(self.servers[self.nodes_init:self.num_servers])
     if self.input.param('check_seqno', True):
         self.check_seqno(seqno_expected, comparator=seqno_comparator)
Beispiel #5
0
    def restoreAndVerify(self, bucket_names, kvs_before, expected_error=None):
        for bucket in self.buckets:
            bucket.kvs[1] = kvs_before[bucket.name]
        del kvs_before
        gc.collect()

        errors, outputs = self.shell.restore_backupFile(
            self.couchbase_login_info, self.backup_location, bucket_names)
        errors.extend(outputs)
        error_found = False
        if expected_error:
            for line in errors:
                if line.find(expected_error) != -1:
                    error_found = True
                    break

            self.assertTrue(error_found,
                            "Expected error not found: %s" % expected_error)
        self._wait_for_stats_all_buckets(self.servers[:self.num_servers])

        if expected_error:
            for bucket in self.buckets:
                bucket.kvs[1] = KVStore()
        self.verify_results(self.master)
        self._verify_stats_all_buckets(self.servers[:self.num_servers])
Beispiel #6
0
 def __init__(self, new_params={}):
     self.name = new_params.get(Bucket.name, "default")
     self.type = new_params.get(Bucket.type, Bucket.bucket_type.MEMBASE)
     self.replicas = new_params.get(Bucket.replicas, 0)
     self.size = new_params.get(Bucket.size, 100)
     self.kvs = {1:KVStore()}
     self.eviction_policy = new_params.get(Bucket.eviction_policy, Bucket.bucket_eviction_policy.VALUE_ONLY)
     self.replicaIndex = new_params.get(Bucket.replicaIndex, 0)
     self.priority = new_params.get(Bucket.priority, None)
     self.uuid = None
     self.lww = new_params.get(Bucket.lww, False)
     self.maxTTL = new_params.get(Bucket.maxTTL, None)
     self.flush_enabled = new_params.get(Bucket.flush_enabled, 1)
     self.compressionMode = new_params.get(Bucket.compressionMode, Bucket.bucket_compression_mode.PASSIVE)
     self.nodes = None
     self.stats = None
     self.servers = []
     self.vbuckets = []
     self.forward_map = []
Beispiel #7
0
    def setUp(self):
        self.times_teardown_called = 1
        super(BackupBaseTest, self).setUp()
        self.shell = RemoteMachineShellConnection(self.master)
        info = self.shell.extract_remote_info()
        self.os = info.type.lower()
        self.value_size = self.input.param("value_size", 256)
        self.expire_time = self.input.param("expire_time", 60)
        self.number_of_backups = self.input.param("number_of_backups", 1)
        self.backup_type = self.input.param("backup_type", None)
        self.item_flag = self.input.param("item_flag", 0)
        self.couchbase_login_info = "%s:%s" % (
            self.input.membase_settings.rest_username,
            self.input.membase_settings.rest_password)
        self.backup_location = self.input.param("backup_location",
                                                "/tmp/backup")
        self.command_options = self.input.param("command_options", '')
        if self.command_options is not '':
            self.command_options = self.command_options.split(";")
        self.doc_ops = self.input.param("doc_ops", None)
        if self.doc_ops is not None:
            self.doc_ops = self.doc_ops.split(";")
        self.backup_x_options = self.input.param("backup_x_options", None)
        if self.backup_x_options is not None:
            temp = self.backup_x_options.split(";")
            temp_x_options = {}
            for element in temp:
                temp_array = element.split()
                temp_x_options[temp_array[0]] = temp_array[1]
            self.backup_x_options = temp_x_options

        self.restore_x_options = self.input.param("restore_x_options", None)
        if self.restore_x_options is not None:
            temp = self.restore_x_options.split(";")
            temp_x_options = {}
            for element in temp:
                temp_array = element.split()
                temp_x_options[temp_array[0]] = temp_array[1]
            self.restore_x_options = temp_x_options
        servers_in = [self.servers[i + 1] for i in range(self.num_servers - 1)]
        for bucket in self.buckets:
            bucket.kvs[2] = KVStore()
        self.cluster.rebalance(self.servers[:1], servers_in, [])
Beispiel #8
0
 def setUp(self):
     self.times_teardown_called = 1
     super(BackupBaseTest, self).setUp()
     self.shell = RemoteMachineShellConnection(self.master)
     self.value_size = self.input.param("value_size", 256)
     self.expire_time = self.input.param("expire_time", 60)
     self.item_flag = self.input.param("item_flag", 0)
     self.couchbase_login_info = "%s:%s" % (self.input.membase_settings.rest_username,
                                            self.input.membase_settings.rest_password)
     self.backup_location = self.input.param("backup_location", "/tmp/backup")
     self.command_options = self.input.param("command_options", None)
     if self.command_options is not None:
         self.command_options = self.command_options.split(";")
     self.doc_ops = self.input.param("doc_ops", None)
     if self.doc_ops is not None:
         self.doc_ops = self.doc_ops.split(";")
     servers_in = [self.servers[i+1] for i in range(self.num_servers-1)]
     for bucket in self.buckets:
         bucket.kvs[2]= KVStore()
     self.cluster.rebalance(self.servers[:1], servers_in, [])
Beispiel #9
0
 def __init__(self, bucket_size='', name="", authType="sasl", saslPassword="", num_replicas=0, port=11211, master_id=None,
              type='', eviction_policy="valueOnly", bucket_priority=None, uuid="", lww=False,
              storageBackend="couchstore"):
     self.name = name
     self.port = port
     self.type = type
     self.nodes = None
     self.stats = None
     self.servers = []
     self.vbuckets = []
     self.forward_map = []
     self.numReplicas = num_replicas
     self.saslPassword = saslPassword
     self.authType = ""
     self.bucket_size = bucket_size
     self.kvs = {1:KVStore()}
     self.authType = authType
     self.master_id = master_id
     self.eviction_policy = eviction_policy
     self.bucket_priority = bucket_priority
     self.uuid = uuid
     self.lww = lww
     self.storageBackend = storageBackend