コード例 #1
0
    def test_vbucket_id_option(self):
        bucket = RestConnection(self.server_origin).get_bucket(self.buckets[0])
        self.num_items = self.num_items - (self.num_items % len(bucket.vbuckets))
        num_items_per_vb = self.num_items / len(bucket.vbuckets)
        template = '{{ "mutated" : 0, "age": {0}, "first_name": "{1}" }}'
        gen_load = DocumentGenerator('cbtransfer', template, range(5), ['james', 'john'], start=0, end=self.num_items)
        client = MemcachedClient(self.server_origin.ip,
                                 int(bucket.vbuckets[0].master.split(':')[1]))
        kv_value_dict = {}
        vb_id_to_check = bucket.vbuckets[-1].id
        for vb_id in xrange(len(bucket.vbuckets)):
            cur_items_per_vb = 0
            while cur_items_per_vb < num_items_per_vb:
                key, value = gen_load.next()

                client.set(key, 0, 0, value, vb_id)
                if vb_id_to_check == vb_id:
                    kv_value_dict[key] = value
                cur_items_per_vb += 1

        transfer_source = 'http://%s:%s' % (self.server_origin.ip, self.server_origin.port)
        transfer_destination = 'http://%s:%s' % (self.server_recovery.ip, self.server_recovery.port)
        output = self.shell.execute_cbtransfer(transfer_source, transfer_destination,
                                      "-b %s -B %s -i %s" % (bucket.name, bucket.name, vb_id_to_check))
        client = MemcachedClient(self.server_recovery.ip,
                                 int(bucket.vbuckets[0].master.split(':')[1]))
        for key, value in kv_value_dict.iteritems():
            _, _, d = client.get(key, vbucket=vb_id_to_check)
            self.assertEquals(d, value, 'Key: %s expected. Value expected %s. Value actual %s' % (
                                        key, value, d))
コード例 #2
0
 def getr_negative_corrupted_vbucket_test(self):
     vbucket_state = self.input.param("vbucket_state", '')
     gen = DocumentGenerator('test_docs', '{{"age": {0}}}', xrange(5),
                             start=0, end=self.num_items)
     self.perform_docs_ops(self.master, [gen], 'create')
     self.log.info("Checking replica read")
     client = VBucketAwareMemcached(RestConnection(self.master), self.default_bucket_name)
     vbuckets_num = RestConnection(self.master).get_vbuckets(self.buckets[0])
     while gen.has_next():
         try:
             key, _ = gen.next()
             vBucketId = client._get_vBucket_id(key)
             mem = client.memcached_for_replica_vbucket(vBucketId)
             if vbucket_state:
                 mem.set_vbucket_state(vBucketId, vbucket_state)
                 msg = "Vbucket %s set to pending state" % vBucketId
                 mem_to_read = mem
             else:
                 wrong_vbucket = [v for v in client.vBucketMapReplica
                                if mem.host != client.vBucketMapReplica[v][0].split(':')[0] or\
                                str(mem.port) != client.vBucketMapReplica[v][0].split(':')[1]][0]
                 mem_to_read = client.memcached_for_replica_vbucket(wrong_vbucket)
                 msg = "Key: %s. Correct host is %s, test try to get from %s host. " %(
                                                     key, mem.host, mem_to_read.host)
                 msg += "Correct vbucket %s, wrong vbucket %s" % (vBucketId, wrong_vbucket)
             self.log.info(msg)
             client._send_op(mem_to_read.getr, key)
         except Exception, ex:
             if self.error and str(ex).find(self.error) != -1:
                 self.log.info("Expected error %s appeared as expected" % self.error)
             else:
                 raise ex
         else:
             if self.error:
                 self.fail("Expected error %s didn't appear as expected" % self.error)
コード例 #3
0
 def _load_by_vbuckets(self, bucket):
     bucket = RestConnection(self.master).get_bucket(bucket)
     self.num_items = self.num_items - (self.num_items % len(bucket.vbuckets))
     num_items_per_vb = self.num_items / len(bucket.vbuckets)
     template = '{{ "mutated" : 0, "age": {0}, "first_name": "{1}" }}'
     gen_load = DocumentGenerator("vbuckettool", template, range(5), ["james", "john"], start=0, end=self.num_items)
     self._get_clients(bucket)
     for vb in bucket.vbuckets:
         self.keys_per_vbuckets_dict[vb] = []
     for i in xrange(gen_load.end):
         key, value = gen_load.next()
         vb_id = self._get_vBucket_id(key)
         self.clients[vb.master].set(key, 0, 0, value, vb_id)
         self.keys_per_vbuckets_dict[vb_id].append(key)
コード例 #4
0
 def getr_rebalance_test(self):
     gen = DocumentGenerator('test_docs', '{{"age": {0}}}', xrange(5),
                                   start=0, end=self.num_items)
     self.perform_docs_ops(self.master, [gen], 'create')
     self.log.info("Checking replica read")
     client = VBucketAwareMemcached(RestConnection(self.master), self.default_bucket_name)
     rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
                         self.servers[self.nodes_init : self.nodes_init + self.nodes_in],
                         [])
     try:
         while gen.has_next():
             key, _ = gen.next()
             o, c, d = client.getr(key)
     finally:
         rebalance.result()
コード例 #5
0
    def replicate_correct_data_after_rollback(self):
        '''
        @attention: This test case has some issue with docker runs. It
        passes without any issue on VMs.
        '''

        NUMBER_OF_DOCS = 10000

        # populate the kvs, they will look like ...
        """
        key: keyname-x
        value:
          {
          "mutated": 0,
            "_id": "keyname-x",
             "val-field-name": "serial-vals-100"
            }
        """
        vals = ['serial-vals-' + str(i) for i in range(NUMBER_OF_DOCS)]
        template = '{{ "val-field-name": "{0}"  }}'
        gen_load = DocumentGenerator('keyname',
                                     template,
                                     vals,
                                     start=0,
                                     end=NUMBER_OF_DOCS)

        rc = self.cluster.load_gen_docs(self.servers[0],
                                        self.buckets[0].name,
                                        gen_load,
                                        self.buckets[0].kvs[1],
                                        "create",
                                        exp=0,
                                        flag=0,
                                        batch_size=1000,
                                        compression=self.sdk_compression)

        # store the KVs which were modified and active on node 1
        modified_kvs_active_on_node1 = {}
        vbucket_client = VBucketAwareMemcached(RestConnection(self.master),
                                               'default')
        client = MemcachedClientHelper.direct_client(self.servers[0],
                                                     'default')
        for i in range(NUMBER_OF_DOCS // 100):
            keyname = 'keyname-' + str(i)
            vbId = ((zlib.crc32(keyname) >> 16) & 0x7fff) & (self.vbuckets - 1)
            if vbucket_client.vBucketMap[vbId].split(
                    ':')[0] == self.servers[0].ip:
                rc = client.get(keyname)
                modified_kvs_active_on_node1[keyname] = rc[2]

        # stop persistence
        for bucket in self.buckets:
            for s in self.servers[:self.nodes_init]:
                client = MemcachedClientHelper.direct_client(s, bucket)
                try:
                    client.stop_persistence()
                except MemcachedError as e:
                    if self.bucket_type == 'ephemeral':
                        self.assertTrue(
                            "Memcached error #4 'Invalid':  Flusher not running. for vbucket :0 to mc "
                            in str(e))
                        return
                    else:
                        raise

        # modify less than 1/2 of the keys
        vals = [
            'modified-serial-vals-' + str(i)
            for i in range(NUMBER_OF_DOCS // 100)
        ]
        template = '{{ "val-field-name": "{0}"  }}'
        gen_load = DocumentGenerator('keyname',
                                     template,
                                     vals,
                                     start=0,
                                     end=NUMBER_OF_DOCS // 100)
        rc = self.cluster.load_gen_docs(self.servers[0],
                                        self.buckets[0].name,
                                        gen_load,
                                        self.buckets[0].kvs[1],
                                        "create",
                                        exp=0,
                                        flag=0,
                                        batch_size=1000,
                                        compression=self.sdk_compression)

        # kill memcached, when it comes back because persistence is disabled it will have lost the second set of mutations
        shell = RemoteMachineShellConnection(self.servers[0])
        shell.kill_memcached()
        time.sleep(10)

        # start persistence on the second node
        client = MemcachedClientHelper.direct_client(self.servers[1],
                                                     'default')
        client.start_persistence()

        time.sleep(5)

        # failover to the second node
        rc = self.cluster.failover(self.servers,
                                   self.servers[1:2],
                                   graceful=True)
        time.sleep(30)  # give time for the failover to complete

        # check the values, they should be what they were prior to the second update
        client = MemcachedClientHelper.direct_client(self.servers[0],
                                                     'default')
        for k, v in modified_kvs_active_on_node1.items():
            rc = client.get(k)
            self.assertTrue(v == rc[2],
                            'Expected {0}, actual {1}'.format(v, rc[2]))

        # need to rebalance the node back into the cluster
        # def rebalance(self, servers, to_add, to_remove, timeout=None, use_hostnames=False, services = None):

        rest_obj = RestConnection(self.servers[0])
        nodes_all = rest_obj.node_statuses()
        for node in nodes_all:
            if node.ip == self.servers[1].ip:
                break

        node_id_for_recovery = node.id
        status = rest_obj.add_back_node(node_id_for_recovery)
        if status:
            rest_obj.set_recovery_type(node_id_for_recovery,
                                       recoveryType='delta')
        rc = self.cluster.rebalance(self.servers[:self.nodes_init], [], [])
コード例 #6
0
 def test_various_docid_keysize_combinations(self):
     self.set_allow_large_keys(self.allow_large_keys)
     self.change_max_item_size(self.max_item_size)
     self.change_max_array_size(self.max_array_size)
     query_definitions = self._create_indexes()
     self.sleep(30)
     rest = RestConnection(self.master)
     index_map = rest.get_index_id_map()
     for bucket in self.buckets:
         self.rest.flush_bucket(bucket)
     generators = []
     template = '{{"name":"{0}", "age":{1}, "encoded_array": {2}, "encoded_big_value_array": {3}}}'
     max_item_length = self.max_item_size * 4
     max_array_element_size = (self.max_array_size * 4) / 10
     for i in range(self.num_docs):
         index_id = "".join(
             random.choice(lowercase)
             for k in range(random.randint(1, 255)))
         encoded_array = []
         name = "".join(
             random.choice(lowercase)
             for k in range(random.randint(max_item_length)))
         age = random.choice(range(4, 59))
         big_value_array = [name]
         for j in range(30):
             element = "".join(
                 random.choice(lowercase)
                 for k in range(random.randint(max_array_element_size)))
             encoded_array.append(element)
         generators.append(
             DocumentGenerator(index_id,
                               template, [name], [age], [encoded_array],
                               [big_value_array],
                               start=0,
                               end=1))
     index_id = "".join(random.choice(lowercase) for k in range(250))
     name = "".join(
         random.choice(lowercase)
         for k in range(random.randint(max_item_length)))
     age = random.choice(range(4, 59))
     big_value_array = [name]
     encoded_array = []
     for j in range(30):
         element = "".join(
             random.choice(lowercase)
             for k in range(random.randint(max_array_element_size)))
         encoded_array.append(element)
     generators.append(
         DocumentGenerator(index_id,
                           template, [name], [age], [encoded_array],
                           [big_value_array],
                           start=0,
                           end=1))
     self.load(generators,
               buckets=self.buckets,
               flag=self.item_flag,
               verify_data=False,
               batch_size=self.batch_size)
     self.full_docs_list = self.generate_full_docs_list(generators)
     for bucket in self.buckets:
         for query_definition in query_definitions:
             index_id = str(
                 index_map[bucket.name][query_definition.index_name]["id"])
             actual_result = self.rest.full_table_scan_gsi_index_with_rest(
                 index_id, body={"stale": "false"})
             expected_result = self._get_expected_results_for_scan(
                 query_definition)
             msg = "Results don't match for index {0}. Actual: {1}, Expected: {2}"
             self.assertEqual(
                 sorted(actual_result), sorted(expected_result),
                 msg.format(query_definition.index_name, actual_result,
                            expected_result))
コード例 #7
0
    def getr_test(self):
        if self.nodes_init > len(self.servers):
            result = unittest.TextTestRunner(verbosity=2)._makeResult()
            result.skipped = [('getr_test', "There is not enough VMs!!!")]
            return result

        gen_1 = DocumentGenerator('test_docs',
                                  '{{"age": {0}}}',
                                  xrange(5),
                                  start=0,
                                  end=self.num_items / 2)
        gen_2 = DocumentGenerator('test_docs',
                                  '{{"age": {0}}}',
                                  xrange(5),
                                  start=self.num_items / 2,
                                  end=self.num_items)
        if self.value_size:
            gen_1 = DocumentGenerator('test_docs',
                                      '{{"name": "{0}"}}',
                                      [self.value_size * 'a'],
                                      start=0,
                                      end=self.num_items / 2)
            gen_2 = DocumentGenerator('test_docs',
                                      '{{"name": "{0}"}}',
                                      [self.value_size * 'a'],
                                      start=self.num_items / 2,
                                      end=self.num_items)
        self.log.info("LOAD PHASE")
        if not self.skipload:
            self.perform_docs_ops(self.master, [gen_1, gen_2], self.data_ops)

        self.log.info("CLUSTER OPS PHASE")
        if self.rebalance == GetrTests.AFTER_REBALANCE:
            self.cluster.rebalance(self.servers[:self.nodes_init],
                                   self.servers[self.nodes_init:], [])
        if self.rebalance == GetrTests.DURING_REBALANCE:
            rebalance = self.cluster.async_rebalance(
                self.servers[:self.nodes_init],
                self.servers[self.nodes_init:self.nodes_init + self.nodes_in],
                [])
        if self.rebalance == GetrTests.SWAP_REBALANCE:
            self.cluster.rebalance(
                self.servers[:self.nodes_init],
                self.servers[self.nodes_init:self.nodes_init + self.nodes_in],
                self.servers[self.nodes_init - self.nodes_in:self.nodes_init])
        if self.warmup_nodes:
            self.perform_warm_up()
        if self.failover:
            self.perform_failover()
        if self.wait_expiration:
            self.sleep(self.expiration)
        try:
            self.log.info("READ REPLICA PHASE")
            servrs = self.servers[:self.nodes_init]
            self.expire_pager(servrs)
            if self.failover in [
                    GetrTests.FAILOVER_NO_REBALANCE,
                    GetrTests.FAILOVER_REBALANCE
            ]:
                servrs = self.servers[:self.nodes_init - self.failover_factor]
            if self.rebalance == GetrTests.AFTER_REBALANCE:
                servrs = self.servers
            if self.rebalance == GetrTests.SWAP_REBALANCE:
                servrs = self.servers[:self.nodes_init - self.nodes_in]
                servrs.extend(self.servers[self.nodes_init:self.nodes_init +
                                           self.nodes_in])

            self.log.info("Checking replica read")
            if self.failover == GetrTests.FAILOVER_NO_REBALANCE:
                self._verify_all_buckets(self.master,
                                         only_store_hash=False,
                                         replica_to_read=self.replica_to_read,
                                         batch_size=1)
            else:
                self.verify_cluster_stats(servrs,
                                          only_store_hash=False,
                                          replica_to_read=self.replica_to_read,
                                          batch_size=1,
                                          timeout=(self.wait_timeout * 10))
        except Exception, ex:
            if self.error and str(ex).find(self.error) != -1:
                self.log.info("Expected error %s appeared as expected" %
                              self.error)
            else:
                print traceback.format_exc()
                raise ex
コード例 #8
0
ファイル: cbas_base.py プロジェクト: sreebhargava143/TAF
    def perform_doc_ops_in_all_cb_buckets(self,
                                          operation,
                                          start_key=0,
                                          end_key=1000,
                                          batch_size=10,
                                          exp=0,
                                          _async=False,
                                          durability="",
                                          mutation_num=0,
                                          cluster=None,
                                          buckets=[],
                                          key=None):
        """
        Create/Update/Delete docs in all cb buckets
        :param operation: String - "create","update","delete"
        :param start_key: Doc Key to start the operation with
        :param end_key: Doc Key to end the operation with
        :param batch_size: Batch size of doc_ops
        :param exp: MaxTTL used for doc operations
        :param _async: Boolean to decide whether to start ops in parallel
        :param durability: Durability level to use for doc operation
        :param mutation_num: Mutation count to keep track per doc_loading
        :param cluster: cluster object for cluster on which this doc load
                        operation has to be performed.
        :param buckets: list of buckets on which doc load operation
                        has to be performed.
        :param key: key for the generated docs
        :return:
        """
        first = ['james', 'sharon', 'dave', 'bill', 'mike', 'steve']
        profession = ['doctor', 'lawyer']

        template_obj = JsonObject.create()
        template_obj.put("number", 0)
        template_obj.put("first_name", "")
        template_obj.put("profession", "")
        template_obj.put("mutated", mutation_num)
        template_obj.put("mutation_type", "ADD")

        if not key:
            key = "test_docs"

        doc_gen = DocumentGenerator(key,
                                    template_obj,
                                    start=start_key,
                                    end=end_key,
                                    randomize=False,
                                    first_name=first,
                                    profession=profession,
                                    number=range(70))
        if cluster:
            bucket_util = cluster.bucket_util
        else:
            cluster = self.cluster
            bucket_util = self.bucket_util
        try:
            if _async:
                if buckets:
                    for bucket in buckets:
                        return bucket_util.async_load_bucket(
                            cluster,
                            bucket,
                            doc_gen,
                            operation,
                            exp,
                            durability=durability,
                            batch_size=batch_size,
                            suppress_error_table=True)
                else:
                    return bucket_util._async_load_all_buckets(
                        cluster,
                        doc_gen,
                        operation,
                        exp,
                        durability=durability,
                        batch_size=batch_size,
                        suppress_error_table=True)
            else:
                bucket_util.sync_load_all_buckets(cluster,
                                                  doc_gen,
                                                  operation,
                                                  exp,
                                                  durability=durability,
                                                  batch_size=batch_size,
                                                  suppress_error_table=True)
        except Exception as e:
            self.log.error(e.message)
コード例 #9
0
ファイル: plasma_data_size.py プロジェクト: umang-cb/Jython
 def test_change_key_size(self):
     self.iterations = self.input.param("num_iterations", 5)
     buckets = self._create_plasma_buckets()
     if self.plasma_dgm:
         self.get_dgm_for_plasma(indexer_nodes=[self.dgmServer])
     query_definition = QueryDefinition(
         index_name="index_name_big_values",
         index_fields=["bigValues"],
         query_template="SELECT * FROM %s WHERE bigValues IS NOT NULL",
         groups=["simple"],
         index_where_clause=" bigValues IS NOT NULL ")
     self.multi_create_index(buckets=buckets,
                             query_definitions=[query_definition])
     template = '{{"name":"{0}", "age":{1}, "bigValues": "{2}" }}'
     generators = []
     for j in range(self.iterations):
         for i in range(10):
             name = FIRST_NAMES[random.choice(range(len(FIRST_NAMES)))]
             id_size = random.choice(range(5, 10))
             short_str = "".join(
                 random.choice(lowercase) for k in range(id_size))
             id = "{0}-{1}".format(name, short_str)
             age = random.choice(range(4, 19))
             bigValues = "".join(random.choice(lowercase) for k in range(5))
             generators.append(
                 DocumentGenerator(id,
                                   template, [name], [age], [bigValues],
                                   start=0,
                                   end=10))
         self.load(generators,
                   flag=self.item_flag,
                   verify_data=False,
                   batch_size=self.batch_size)
         self.full_docs_list = self.generate_full_docs_list(generators)
         self.gen_results = TuqGenerators(self.log, self.full_docs_list)
         self.multi_query_using_index(buckets=buckets,
                                      query_definitions=[query_definition])
         for i in range(10):
             name = FIRST_NAMES[random.choice(range(len(FIRST_NAMES)))]
             id_size = random.choice(range(100, 200))
             long_str = "".join(
                 random.choice(lowercase) for k in range(id_size))
             id = "{0}-{1}".format(name, long_str)
             age = random.choice(range(4, 19))
             bigValues = "".join(random.choice(lowercase) for k in range(5))
             generators.append(
                 DocumentGenerator(id,
                                   template, [name], [age], [bigValues],
                                   start=0,
                                   end=10))
         self.load(generators,
                   flag=self.item_flag,
                   verify_data=False,
                   batch_size=self.batch_size)
         self.full_docs_list = self.generate_full_docs_list(generators)
         self.gen_results = TuqGenerators(self.log, self.full_docs_list)
         self.multi_query_using_index(buckets=buckets,
                                      query_definitions=[query_definition])
     self.sleep(30)
     self.multi_drop_index(buckets=buckets,
                           query_definitions=[query_definition])
コード例 #10
0
    def setUp(self):
        super(RackzoneBaseTest, self).setUp()
        self.product = self.input.param("product", "cb")
        self.vbuckets = self.input.param("vbuckets", 128)
        self.version = self.input.param("version", "2.5.1-1082")
        self.type = self.input.param('type', 'enterprise')
        self.doc_ops = self.input.param("doc_ops", None)
        if self.doc_ops is not None:
            self.doc_ops = self.doc_ops.split(";")
        self.defaul_map_func = "function (doc) {\n  emit(doc._id, doc);\n}"

        self.nodes_init = self.input.param("nodes_init", 1)
        self.nodes_in = self.input.param("nodes_in", 1)
        self.nodes_out = self.input.param("nodes_out", 1)
        self.doc_ops = self.input.param("doc_ops", "create")
        nodes_init = self.cluster.servers[1:self.nodes_init] if self.nodes_init != 1 else []
        self.task.rebalance([self.cluster.master], nodes_init, [])
        self.cluster.nodes_in_cluster.append(self.cluster.master)
        self.bucket_util.create_default_bucket()
        self.bucket_util.add_rbac_user()
        #define the data that will be used to test
        self.blob_generator = self.input.param("blob_generator", False)
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)
        if not rest.is_enterprise_edition():
            raise Exception("This couchbase server is not Enterprise Edition.\
                  This RZA feature requires Enterprise Edition to work")
        if self.blob_generator:
            #gen_load data is used for upload before each test(1000 items by default)
            self.gen_load = BlobGenerator('test', 'test-', self.value_size, end=self.num_items)
            #gen_update is used for doing mutation for 1/2th of uploaded data
            self.gen_update = BlobGenerator('test', 'test-', self.value_size, end=(self.num_items / 2 - 1))
            #upload data before each test
            tasks = []
            for bucket in self.bucket_util.buckets:
                tasks.append(self.task.async_load_gen_docs(self.cluster, bucket, self.gen_load, "create", 0,
                                                           batch_size=20, persist_to=self.persist_to,
                                                           replicate_to=self.replicate_to,
                                                           pause_secs=5, timeout_secs=5))
            for task in tasks:
                self.task.jython_task_manager.get_task_result(task)
        else:
            tasks = []
            age = range(5)
            first = ['james', 'sharon']
            template = '{{ "mutated" : 0, "age": {0}, "first_name": "{1}" }}'
            self.gen_load = DocumentGenerator('test_docs', template, age, first, start=0, end=self.num_items)
            for bucket in self.bucket_util.buckets:
                tasks.append(self.task.async_load_gen_docs(self.cluster, bucket, self.gen_load, "create", 0,
                                                           batch_size=20, persist_to=self.persist_to,
                                                           replicate_to=self.replicate_to,
                                                           pause_secs=5, timeout_secs=5))
            for task in tasks:
                self.task.jython_task_manager.get_task_result(task)
        shell = RemoteMachineShellConnection(self.cluster.master)
        type = shell.extract_remote_info().distribution_type
        shell.disconnect()
        self.os_name = "linux"
        self.is_linux = True
        self.cbstat_command = "%scbstats" % (LINUX_COUCHBASE_BIN_PATH)
        if type.lower() == 'windows':
            self.is_linux = False
            self.os_name = "windows"
            self.cbstat_command = "%scbstats.exe" % (WIN_COUCHBASE_BIN_PATH)
        if type.lower() == 'mac':
            self.cbstat_command = "%scbstats" % (MAC_COUCHBASE_BIN_PATH)
        if self.nonroot:
            self.cbstat_command = "/home/%s%scbstats" % (self.cluster.master.ssh_username,
                                                         LINUX_COUCHBASE_BIN_PATH)
コード例 #11
0
ファイル: ibr.py プロジェクト: rayleyva/testrunner
    def testMultipleBackups(self):
        if not self.command_options:
            self.command_options = []

        options = self.command_options

        if self.backup_type is not None:
            if "accu" in self.backup_type:
                options = self.command_options + [' -m accu']
            if "diff" in self.backup_type:
                options = self.command_options + [' -m diff']

        diff_backup = [" -m diff"]
        accu_backup = [" -m accu"]
        current_backup = [" -m diff"]

        for count in range(self.number_of_backups):
            if "mix" in self.backup_type:
                if current_backup == diff_backup:
                    current_backup = accu_backup
                    options = self.command_options + accu_backup
                elif current_backup == accu_backup:
                    current_backup = diff_backup
                    options = self.command_options + diff_backup

            # Update data
            template = '{{ "mutated" : {0}, "age": {0}, "first_name": "{1}" }}'
            gen_update = DocumentGenerator('load_by_id_test',
                                           template,
                                           range(5), ['james', 'john'],
                                           start=0,
                                           end=self.num_items)
            self._load_all_buckets(self.master,
                                   gen_update,
                                   "update",
                                   0,
                                   1,
                                   self.item_flag,
                                   True,
                                   batch_size=20000,
                                   pause_secs=5,
                                   timeout_secs=180)
            self._wait_for_stats_all_buckets(self.servers[:self.num_servers])

            #Take a backup
            self.shell.execute_cluster_backup(self.couchbase_login_info,
                                              self.backup_location, options)

        # Save copy of data
        kvs_before = {}
        for bucket in self.buckets:
            kvs_before[bucket.name] = bucket.kvs[1]
        bucket_names = [bucket.name for bucket in self.buckets]

        # Delete all buckets
        self._all_buckets_delete(self.master)
        gc.collect()

        self._bucket_creation()
        self.sleep(20)

        self.restoreAndVerify(bucket_names, kvs_before)
コード例 #12
0
ファイル: capiXDCR.py プロジェクト: ritamcouchbase/viewtests
    def test_capi_with_online_upgrade(self):
        self._install(self._input.servers[:self.src_init + self.dest_init])
        upgrade_version = self._input.param("upgrade_version", "5.0.0-1797")
        upgrade_nodes = self.src_cluster.get_nodes()
        extra_nodes = self._input.servers[self.src_init + self.dest_init:]

        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED,
                                          'true')

        gen = DocumentGenerator('es',
                                '{{"key":"value","mutated":0}}',
                                xrange(100),
                                start=0,
                                end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.perform_update_delete()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED,
                                          'false')

        self._wait_for_es_replication_to_catchup()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED,
                                          'true')

        RestConnection(upgrade_nodes[0]).get_nodes_versions()
        added_versions = RestConnection(extra_nodes[0]).get_nodes_versions()
        self.cluster.rebalance(upgrade_nodes + extra_nodes, extra_nodes, [])
        self.log.info("Rebalance in all {0} nodes completed".format(
            added_versions[0]))
        RestConnection(upgrade_nodes[0]).get_nodes_versions()
        self.sleep(15)
        status, content = ClusterOperationHelper.find_orchestrator(
            upgrade_nodes[0])
        self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
                        format(status, content))
        self.log.info("after rebalance in the master is {0}".format(content))
        find_master = False
        for new_server in extra_nodes:
            if content.find(new_server.ip) >= 0:
                find_master = True
                self.log.info("{0} Node {1} becomes the master".format(
                    added_versions[0], new_server.ip))
                break
        if not find_master:
            raise Exception(
                "After rebalance in {0} Nodes, one of them doesn't become the master"
                .format(added_versions[0]))
        self.log.info("Rebalancing out all old version nodes")
        self.cluster.rebalance(upgrade_nodes + extra_nodes, [], upgrade_nodes)
        self.src_master = self._input.servers[self.src_init + self.dest_init]

        self._install(self.src_cluster.get_nodes(), version=upgrade_version)
        upgrade_nodes = self._input.servers[self.src_init + self.dest_init:]
        extra_nodes = self.src_cluster.get_nodes()

        RestConnection(upgrade_nodes[0]).get_nodes_versions()
        added_versions = RestConnection(extra_nodes[0]).get_nodes_versions()
        self.cluster.rebalance(upgrade_nodes + extra_nodes, extra_nodes, [])
        self.log.info("Rebalance in all {0} nodes completed".format(
            added_versions[0]))
        RestConnection(upgrade_nodes[0]).get_nodes_versions()
        self.sleep(15)
        status, content = ClusterOperationHelper.find_orchestrator(
            upgrade_nodes[0])
        self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
                        format(status, content))
        self.log.info("after rebalance in the master is {0}".format(content))
        self.log.info("Rebalancing out all old version nodes")
        self.cluster.rebalance(upgrade_nodes + extra_nodes, [], upgrade_nodes)
        self.src_master = self._input.servers[0]

        self.log.info("######### Upgrade of CB cluster completed ##########")

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED,
                                          'true')

        gen = DocumentGenerator('es',
                                '{{"key":"value"}}',
                                xrange(100),
                                start=0,
                                end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.perform_update_delete()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED,
                                          'false')

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results()
コード例 #13
0
ファイル: failoverbasetest.py プロジェクト: bharath-gp/TAF
 def setUp(self):
     self._cleanup_nodes = []
     self._failed_nodes = []
     super(FailoverBaseTest, self).setUp()
     self.defaul_map_func = "function (doc) {\n  emit(doc._id, doc);\n}"
     self.default_view_name = "default_view"
     self.default_view = View(self.default_view_name, self.defaul_map_func,
                              None)
     self.failoverMaster = self.input.param("failoverMaster", False)
     self.vbuckets = self.input.param("vbuckets", 1024)
     self.total_vbuckets = self.input.param("total_vbuckets", 1024)
     self.compact = self.input.param("compact", False)
     self.std_vbucket_dist = self.input.param("std_vbucket_dist", 20)
     self.withMutationOps = self.input.param("withMutationOps", False)
     self.withViewsOps = self.input.param("withViewsOps", False)
     self.createIndexesDuringFailover = self.input.param(
         "createIndexesDuringFailover", False)
     self.upr_check = self.input.param("upr_check", True)
     self.withQueries = self.input.param("withQueries", False)
     self.numberViews = self.input.param("numberViews", False)
     self.gracefulFailoverFail = self.input.param("gracefulFailoverFail",
                                                  False)
     self.runRebalanceAfterFailover = self.input.param(
         "runRebalanceAfterFailover", True)
     self.failoverMaster = self.input.param("failoverMaster", False)
     self.check_verify_failover_type = self.input.param(
         "check_verify_failover_type", True)
     self.recoveryType = self.input.param("recoveryType", "delta")
     self.bidirectional = self.input.param("bidirectional", False)
     self.stopGracefulFailover = self.input.param("stopGracefulFailover",
                                                  False)
     self._value_size = self.input.param("value_size", 256)
     self.victim_type = self.input.param("victim_type", "other")
     self.victim_count = self.input.param("victim_count", 1)
     self.stopNodes = self.input.param("stopNodes", False)
     self.killNodes = self.input.param("killNodes", False)
     self.doc_ops = self.input.param("doc_ops", [])
     self.firewallOnNodes = self.input.param("firewallOnNodes", False)
     self.deltaRecoveryBuckets = self.input.param("deltaRecoveryBuckets",
                                                  None)
     self.wait_timeout = self.input.param("wait_timeout", 60)
     self.active_resident_threshold = int(
         self.input.param("active_resident_threshold", 0))
     self.max_verify = self.input.param("max_verify", None)
     if self.doc_ops:
         self.doc_ops = self.doc_ops.split(":")
     self.num_failed_nodes = self.input.param("num_failed_nodes", 0)
     self.dgm_run = self.input.param("dgm_run", True)
     credentials = self.input.membase_settings
     self.add_back_flag = False
     self.during_ops = self.input.param("during_ops", None)
     self.graceful = self.input.param("graceful", True)
     if self.recoveryType:
         self.recoveryType = self.recoveryType.split(":")
     if self.deltaRecoveryBuckets:
         self.deltaRecoveryBuckets = self.deltaRecoveryBuckets.split(":")
     # Defintions of Blod Generator used in tests
     age = range(5)
     first = ['james', 'sharon']
     template = '{{ "age": {0}, "first_name": "{1}" }}'
     self.gen_initial_create = DocumentGenerator('failover',
                                                 template,
                                                 age,
                                                 first,
                                                 start=0,
                                                 end=self.num_items)
     self.gen_create = DocumentGenerator('failover',
                                         template,
                                         age,
                                         first,
                                         start=self.num_items,
                                         end=self.num_items * 1.5)
     self.gen_update = DocumentGenerator('failover',
                                         template,
                                         age,
                                         first,
                                         start=self.num_items / 2,
                                         end=self.num_items)
     self.gen_delete = DocumentGenerator('failover',
                                         template,
                                         age,
                                         first,
                                         start=self.num_items / 4,
                                         end=self.num_items / 2 - 1)
     self.afterfailover_gen_create = DocumentGenerator(
         'failover',
         template,
         age,
         first,
         start=self.num_items * 1.6,
         end=self.num_items * 2)
     self.afterfailover_gen_update = DocumentGenerator('failover',
                                                       template,
                                                       age,
                                                       first,
                                                       start=1,
                                                       end=self.num_items /
                                                       4)
     self.afterfailover_gen_delete = DocumentGenerator(
         'failover',
         template,
         age,
         first,
         start=self.num_items * .5,
         end=self.num_items * 0.75)
     if self.vbuckets != None and self.vbuckets != self.total_vbuckets:
         self.total_vbuckets = self.vbuckets
     self.nodes_init = self.input.param("nodes_init", 1)
     self.nodes_in = self.input.param("nodes_in", 1)
     self.nodes_out = self.input.param("nodes_out", 1)
     nodes_init = self.cluster.servers[
         1:self.nodes_init] if self.nodes_init != 1 else []
     self.task.rebalance([self.cluster.master], nodes_init, [])
     self.cluster.nodes_in_cluster.append(self.cluster.master)
     self.bucket_util.create_default_bucket()
     self.bucket_util.add_rbac_user()
     self.log.info("==============  FailoverBaseTest setup was finished for test #{0} {1} =============="\
                   .format(self.case_number, self._testMethodName))