Example #1
0
    def incremental_rebalance_in_out_with_mutation_and_deletion(self):
        self.cluster.rebalance(self.servers[:self.num_servers],
                               self.servers[1:self.num_servers], [])
        gen_delete = BlobGenerator('mike',
                                   'mike-',
                                   self.value_size,
                                   start=self.num_items / 2,
                                   end=self.num_items)

        for i in reversed(range(self.num_servers)[self.num_servers / 2:]):
            tasks = self._async_load_all_buckets(self.master, self.gen_update,
                                                 "update", 0)
            tasks.extend(
                self._async_load_all_buckets(self.master, gen_delete, "delete",
                                             0))

            self.cluster.rebalance(self.servers[:i], [],
                                   self.servers[i:self.num_servers])
            time.sleep(10)
            self.cluster.rebalance(self.servers[:self.num_servers],
                                   self.servers[i:self.num_servers], [])
            for task in tasks:
                task.result()
            self._load_all_buckets(self.master, gen_delete, "create", 0)
            self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
            self._verify_all_buckets(self.master, max_verify=self.max_verify)
            self._verify_stats_all_buckets(self.servers[:self.num_servers])
Example #2
0
 def checks_tap_connections_tests(self):
     servs_init = self.servers[:self.nodes_init]
     servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)]
     servs_out = [self.servers[self.nodes_init - i - 1] for i in range(self.nodes_out)]
     rest = RestConnection(self.master)
     buckets_stats_before = {}
     for bucket in self.buckets:
         _, result = rest.get_bucket_stats_json(bucket)
         buckets_stats_before[bucket.name] = result["op"]["samples"]["ep_tap_user_count"];
     self.log.info("current nodes : {0}".format([node.id for node in rest.node_statuses()]))
     self.log.info("adding nodes {0} to cluster".format(servs_in))
     self.log.info("removing nodes {0} from cluster".format(servs_out))
     result_nodes = set(servs_init + servs_in) - set(servs_out)
     self.cluster.rebalance(servs_init[:self.nodes_init], servs_in, servs_out)
     gen = BlobGenerator('mike2', 'mike2-', self.value_size, end=self.num_items)
     self._load_all_buckets(self.master, gen, "create", 0)
     self.verify_cluster_stats(result_nodes)
     buckets_stats_after = {}
     for bucket in self.buckets:
         _, result = rest.get_bucket_stats_json(bucket)
         buckets_stats_after[bucket.name] = result["op"]["samples"]["ep_tap_user_count"];
         for stat in buckets_stats_after[bucket.name][len(buckets_stats_before[bucket.name]) - 1:]:
             if stat != 0:
                 self.log.error("'ep_tap_user_count' for bucket '{0}' before test:{1}".format(bucket.name, buckets_stats_before[bucket.name]))
                 self.log.error("'ep_tap_user_count' for bucket '{0}' after test:{1}".format(bucket.name, buckets_stats_after[bucket.name]))
                 self.log.error("'ep_tap_user_count' != 0 as expected");
         self.log.info("'ep_tap_user_count' for bucket '{0}' = 0 for the entire test".format(bucket.name));
Example #3
0
    def incremental_rebalance_out_in_with_mutation(self):
        init_num_nodes = self.input.param("init_num_nodes", 1)

        self.cluster.rebalance(self.servers[:self.num_servers],
                               self.servers[1:init_num_nodes], [])
        gen = BlobGenerator('mike',
                            'mike-',
                            self.value_size,
                            end=self.num_items)
        self._load_all_buckets(self.master, gen, "create", 0)

        for i in range(self.num_servers):
            tasks = self._async_load_all_buckets(self.master, gen, "update", 0)

            self.cluster.rebalance(
                self.servers[:self.num_servers],
                self.servers[init_num_nodes:init_num_nodes + i + 1], [])
            time.sleep(10)
            self.cluster.rebalance(
                self.servers[:self.num_servers], [],
                self.servers[init_num_nodes:init_num_nodes + i + 1])
            for task in tasks:
                task.result()
            self._wait_for_stats_all_buckets(self.servers[:init_num_nodes])
            self._verify_all_buckets(self.master, max_verify=self.max_verify)
            self._verify_stats_all_buckets(self.servers[:init_num_nodes])
Example #4
0
 def _load_doc_data_all_buckets(self,
                                op_type='create',
                                start=0,
                                end=0,
                                expiry=0):
     loaded = False
     count = 0
     gen_load = BlobGenerator('observe',
                              'observe',
                              1024,
                              start=start,
                              end=end)
     while not loaded and count < 60:
         try:
             self._load_all_buckets(self.servers[0], gen_load, op_type,
                                    expiry)
             loaded = True
         except MemcachedError as error:
             if error.status == 134:
                 loaded = False
                 self.log.error(
                     "Memcached error 134, wait for 5 seconds and then try again"
                 )
                 count += 1
                 time.sleep(5)
Example #5
0
    def collectinfo_test(self):
        """We use cbcollect_info to automatically collect the logs for server node

        First we load some items to the node. Optionally you can do some mutation
        against these items. Then we use cbcollect_info the automatically generate
        the zip file containing all the logs about the node. We want to verify we have
        all the log files according to the LOG_FILE_NAME_LIST and in stats.log, we have
        stats for all the buckets we have created"""

        gen_load = BlobGenerator('nosql',
                                 'nosql-',
                                 self.value_size,
                                 end=self.num_items)
        gen_update = BlobGenerator('nosql',
                                   'nosql-',
                                   self.value_size,
                                   end=(self.num_items / 2 - 1))
        gen_expire = BlobGenerator('nosql',
                                   'nosql-',
                                   self.value_size,
                                   start=self.num_items / 2,
                                   end=(self.num_items * 3 / 4 - 1))
        gen_delete = BlobGenerator('nosql',
                                   'nosql-',
                                   self.value_size,
                                   start=self.num_items * 3 / 4,
                                   end=self.num_items)
        self._load_all_buckets(self.master, gen_load, "create", 0)

        if (self.doc_ops is not None):
            if ("update" in self.doc_ops):
                self._load_all_buckets(self.master, gen_update, "update", 0)
            if ("delete" in self.doc_ops):
                self._load_all_buckets(self.master, gen_delete, "delete", 0)
            if ("expire" in self.doc_ops):
                self._load_all_buckets(self.master, gen_expire, "update",
                                       self.expire_time)
                time.sleep(self.expire_time + 1)
        self._wait_for_stats_all_buckets(self.servers[:self.num_servers])

        self.shell.delete_files("%s.zip" % (self.log_filename))
        self.shell.delete_files(
            "cbcollect_info*"
        )  #This is the folder generated after unzip the log package
        self.shell.execute_cbcollect_info("%s.zip" % (self.log_filename))
        self.verify_results(self.log_filename)
Example #6
0
 def setUp(self):
     super(BucketFlushTests, self).setUp()
     self.nodes_in = self.input.param("nodes_in", 0)
     self.value_size = self.input.param("value_size", 256)
     self.gen_create = BlobGenerator('bucketflush',
                                     'bucketflush-',
                                     self.value_size,
                                     end=self.num_items)
Example #7
0
 def _async_load_doc_data_all_buckets(self, op_type='create', start=0):
     gen_load = BlobGenerator('warmup',
                              'warmup-',
                              self.data_size,
                              start=start,
                              end=self.num_items)
     tasks = self._async_load_all_buckets(self.servers[0], gen_load,
                                          op_type, 0)
     return tasks
Example #8
0
 def rebalance_in_with_ops_batch(self):
     gen_delete = BlobGenerator('mike', 'mike-', self.value_size, start=(self.num_items / 2 - 1), end=self.num_items)
     gen_create = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items + 1, end=self.num_items * 3 / 2)
     servs_in = [self.servers[i + 1] for i in range(self.nodes_in)]
     rebalance = self.cluster.async_rebalance(self.servers[:1], servs_in, [])
     if(self.doc_ops is not None):
         # define which doc's ops will be performed during rebalancing
         # allows multiple of them but one by one
         if("update" in self.doc_ops):
             self._load_all_buckets(self.servers[0], self.gen_update, "update", 0, 1, 4294967295, True, batch_size=20000, pause_secs=5, timeout_secs=180)
         if("create" in self.doc_ops):
             self._load_all_buckets(self.servers[0], gen_create, "create", 0, 1, 4294967295, True, batch_size=20000, pause_secs=5, timeout_secs=180)
         if("delete" in self.doc_ops):
             self._load_all_buckets(self.servers[0], gen_delete, "delete", 0, 1, 4294967295, True, batch_size=20000, pause_secs=5, timeout_secs=180)
     rebalance.result()
     self._wait_for_stats_all_buckets(self.servers[:self.nodes_in + 1])
     self._verify_all_buckets(self.servers[0], 1, 1000, None, only_store_hash=True, batch_size=5000)
     self._verify_stats_all_buckets(self.servers[:self.nodes_in + 1])
Example #9
0
 def rebalance_in_with_ops(self):
     gen_delete = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items / 2, end=self.num_items)
     gen_create = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items + 1, end=self.num_items * 3 / 2)
     servs_in = [self.servers[i + 1] for i in range(self.nodes_in)]
     rebalance = self.cluster.async_rebalance(self.servers[:1], servs_in, [])
     if(self.doc_ops is not None):
         # define which doc's ops will be performed during rebalancing
         # allows multiple of them but one by one
         if("update" in self.doc_ops):
             self._load_all_buckets(self.master, self.gen_update, "update", 0)
         if("create" in self.doc_ops):
             self._load_all_buckets(self.master, gen_create, "create", 0)
         if("delete" in self.doc_ops):
             self._load_all_buckets(self.master, gen_delete, "delete", 0)
     rebalance.result()
     self._wait_for_stats_all_buckets(self.servers[:self.nodes_in + 1])
     self._verify_all_buckets(self.master, max_verify=self.max_verify)
     self._verify_stats_all_buckets(self.servers[:self.nodes_in + 1])
Example #10
0
    def online_cluster_upgrade(self):
        self._install(self.servers[:self.src_init + self.dest_init ])
        self.initial_version = self.upgrade_versions[0]
        self._install(self.servers[self.src_init + self.dest_init:])
        self.cluster.shutdown()
        XDCRReplicationBaseTest.setUp(self)
        bucket_default = self._get_bucket('default', self.src_master)
        bucket_sasl = self._get_bucket('bucket0', self.src_master)
        bucket_standard = self._get_bucket('standard_bucket0', self.dest_master)

        self._load_bucket(bucket_default, self.src_master, self.gen_create, 'create', exp=0)
        self._load_bucket(bucket_sasl, self.src_master, self.gen_create, 'create', exp=0)
        self._load_bucket(bucket_standard, self.dest_master, self.gen_create, 'create', exp=0)
        gen_create2 = BlobGenerator('loadTwo', 'loadTwo-', self._value_size, end=self._num_items)
        self._load_bucket(bucket_sasl, self.dest_master, gen_create2, 'create', exp=0)

        self._online_upgrade(self.src_nodes, self.servers[self.src_init + self.dest_init:])
        self._install(self.src_nodes)
        self._online_upgrade(self.servers[self.src_init + self.dest_init:], self.src_nodes, False)

        self._load_bucket(bucket_default, self.src_master, self.gen_delete, 'delete', exp=0)
        self._load_bucket(bucket_default, self.src_master, self.gen_update, 'create', exp=self._expires)
        self._load_bucket(bucket_sasl, self.src_master, self.gen_delete, 'delete', exp=0)
        self._load_bucket(bucket_sasl, self.src_master, self.gen_update, 'create', exp=self._expires)

        self._online_upgrade(self.dest_nodes, self.servers[self.src_init + self.dest_init:])
        self._install(self.dest_nodes)
        self._online_upgrade(self.servers[self.src_init + self.dest_init:], self.dest_nodes, False)

        self._load_bucket(bucket_standard, self.dest_master, self.gen_delete, 'delete', exp=0)
        self._load_bucket(bucket_standard, self.dest_master, self.gen_update, 'create', exp=self._expires)
        self.do_merge_bucket(self.src_master, self.dest_master, True, bucket_sasl)
        bucket_sasl = self._get_bucket('bucket0', self.dest_master)
        gen_delete2 = BlobGenerator('loadTwo', 'loadTwo-', self._value_size,
            start=int((self._num_items) * (float)(100 - self._percent_delete) / 100), end=self._num_items)
        gen_update2 = BlobGenerator('loadTwo', 'loadTwo-', self._value_size, start=0,
            end=int(self._num_items * (float)(self._percent_update) / 100))
        self._load_bucket(bucket_sasl, self.dest_master, gen_delete2, 'delete', exp=0)
        self._load_bucket(bucket_sasl, self.dest_master, gen_update2, 'create', exp=self._expires)

        self.do_merge_bucket(self.dest_master, self.src_master, False, bucket_sasl)
        self.do_merge_bucket(self.src_master, self.dest_master, False, bucket_default)
        self.do_merge_bucket(self.dest_master, self.src_master, False, bucket_standard)
        self.verify_xdcr_stats(self.src_nodes, self.dest_nodes, True)
Example #11
0
    def rebalance_in_out_at_once_persistence_stopped(self):
        num_nodes_with_stopped_persistence = self.input.param(
            "num_nodes_with_stopped_persistence", 1)
        servs_init = self.servers[:self.nodes_init]
        servs_in = [
            self.servers[i + self.nodes_init] for i in range(self.nodes_in)
        ]
        servs_out = [
            self.servers[self.nodes_init - i - 1]
            for i in range(self.nodes_out)
        ]
        rest = RestConnection(self.master)
        self._wait_for_stats_all_buckets(servs_init)
        for server in servs_init[:min(num_nodes_with_stopped_persistence, self.
                                      nodes_init)]:
            shell = RemoteMachineShellConnection(server)
            for bucket in self.buckets:
                shell.execute_cbepctl(bucket, "stop", "", "", "")
        self.sleep(5)
        self.num_items_without_persistence = self.input.param(
            "num_items_without_persistence", 100000)
        gen_extra = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items / 2\
                                      , end=self.num_items / 2 + self.num_items_without_persistence)
        self.log.info("current nodes : {0}".format(
            [node.id for node in rest.node_statuses()]))
        self.log.info("adding nodes {0} to cluster".format(servs_in))
        self.log.info("removing nodes {0} from cluster".format(servs_out))
        tasks = self._async_load_all_buckets(self.master,
                                             gen_extra,
                                             "create",
                                             0,
                                             batch_size=1000)
        result_nodes = set(servs_init + servs_in) - set(servs_out)
        # wait timeout in 60 min because MB-7386 rebalance stuck
        self.cluster.rebalance(servs_init[:self.nodes_init],
                               servs_in,
                               servs_out,
                               timeout=self.wait_timeout * 60)
        for task in tasks:
            task.result()

        self._wait_for_stats_all_buckets(servs_init[:self.nodes_init - self.nodes_out], \
                                         ep_queue_size=self.num_items_without_persistence * 0.9, ep_queue_size_cond='>')
        self._wait_for_stats_all_buckets(servs_in)
        self._verify_all_buckets(self.master, timeout=None)
        self._verify_stats_all_buckets(result_nodes)
        #verify that curr_items_tot corresponds to sum of curr_items from all nodes
        verified = True
        for bucket in self.buckets:
            verified &= RebalanceHelper.wait_till_total_numbers_match(
                self.master, bucket)
        self.assertTrue(
            verified,
            "Lost items!!! Replication was completed but sum(curr_items) don't match the curr_items_total"
        )
Example #12
0
    def ops_change_cas(self):
        """CAS value manipulation by update, delete, expire test.

        We load a certain number of items. Then for half of them, we use
        MemcachedClient cas() method to mutate those item values in order
        to change CAS value of those items.
        We use MemcachedClient set() method to set a quarter of the items expired.
        We also use MemcachedClient delete() to delete a quarter of the items"""

        gen_load_mysql = BlobGenerator('nosql',
                                       'nosql-',
                                       self.value_size,
                                       end=self.num_items)
        gen_update = BlobGenerator('nosql',
                                   'nosql-',
                                   self.value_size,
                                   end=(self.num_items / 2 - 1))
        gen_delete = BlobGenerator('nosql',
                                   'nosql-',
                                   self.value_size,
                                   start=self.num_items / 2,
                                   end=(3 * self.num_items / 4 - 1))
        gen_expire = BlobGenerator('nosql',
                                   'nosql-',
                                   self.value_size,
                                   start=3 * self.num_items / 4,
                                   end=self.num_items)
        self._load_all_buckets(self.master,
                               gen_load_mysql,
                               "create",
                               0,
                               flag=self.item_flag)

        if (self.doc_ops is not None):
            if ("update" in self.doc_ops):
                self.verify_cas("update", gen_update)
            if ("delete" in self.doc_ops):
                self.verify_cas("delete", gen_delete)
            if ("expire" in self.doc_ops):
                self.verify_cas("expire", gen_expire)
        self._wait_for_stats_all_buckets(
            [self.master])  #we only need 1 node to do cas test
Example #13
0
    def offline_cluster_upgrade(self):
        self._install(self.servers[:self.src_init + self.dest_init ])
        upgrade_nodes = self.input.param('upgrade_nodes', "src").split(";")
        self.cluster.shutdown()
        XDCRReplicationBaseTest.setUp(self)
        bucket = self._get_bucket('default', self.src_master)
        self._load_bucket(bucket, self.src_master, self.gen_create, 'create', exp=0)
        bucket = self._get_bucket('bucket0', self.src_master)
        self._load_bucket(bucket, self.src_master, self.gen_create, 'create', exp=0)
        bucket = self._get_bucket('bucket0', self.dest_master)
        gen_create2 = BlobGenerator('loadTwo', 'loadTwo', self._value_size, end=self._num_items)
        self._load_bucket(bucket, self.dest_master, gen_create2, 'create', exp=0)
        nodes_to_upgrade = []
        if "src" in upgrade_nodes :
            nodes_to_upgrade += self.src_nodes
        if "dest" in upgrade_nodes :
            nodes_to_upgrade += self.dest_nodes

        for upgrade_version in self.upgrade_versions:
            for server in nodes_to_upgrade:
                remote = RemoteMachineShellConnection(server)
                remote.stop_server()
                remote.disconnect()
            upgrade_threads = self._async_update(upgrade_version, nodes_to_upgrade)
            #wait upgrade statuses
            for upgrade_thread in upgrade_threads:
                upgrade_thread.join()
            success_upgrade = True
            while not self.queue.empty():
                success_upgrade &= self.queue.get()
            if not success_upgrade:
                self.fail("Upgrade failed!")
            self.sleep(self.expire_time)

        bucket = self._get_bucket('bucket0', self.src_master)
        gen_create3 = BlobGenerator('loadThree', 'loadThree', self._value_size, end=self._num_items)
        self._load_bucket(bucket, self.src_master, gen_create3, 'create', exp=0)
        self.do_merge_bucket(self.src_master, self.dest_master, True, bucket)
        bucket = self._get_bucket('default', self.src_master)
        self._load_bucket(bucket, self.src_master, gen_create2, 'create', exp=0)
        self.do_merge_bucket(self.src_master, self.dest_master, False, bucket)
        self.verify_xdcr_stats(self.src_nodes, self.dest_nodes, True)
Example #14
0
 def setUp(self):
     super(Rebalance, self).setUp()
     if self._replication_direction_str in "bidirection":
         self.gen_create2 = BlobGenerator('LoadTwo',
                                          'LoadTwo',
                                          self._value_size,
                                          end=self._num_items)
         self.gen_delete2 = BlobGenerator(
             'LoadTwo',
             'LoadTwo-',
             self._value_size,
             start=int((self._num_items) *
                       (float)(100 - self._percent_delete) / 100),
             end=self._num_items)
         self.gen_update2 = BlobGenerator(
             'LoadTwo',
             'LoadTwo-',
             self._value_size,
             start=0,
             end=int(self._num_items * (float)(self._percent_update) / 100))
Example #15
0
    def setUp(self):
        super(bidirectional, self).setUp()

        self.gen_create2 = BlobGenerator('loadTwo',
                                         'loadTwo',
                                         self._value_size,
                                         end=self._num_items)
        self.gen_delete2 = BlobGenerator(
            'loadTwo',
            'loadTwo-',
            self._value_size,
            start=int(
                (self._num_items) * (float)(100 - self._percent_delete) / 100),
            end=self._num_items)
        self.gen_update2 = BlobGenerator(
            'loadTwo',
            'loadTwo-',
            self._value_size,
            start=0,
            end=int(self._num_items * (float)(self._percent_update) / 100))
Example #16
0
 def rebalance_in_with_ops(self):
     gen_delete = BlobGenerator('mike',
                                'mike-',
                                self.value_size,
                                start=self.num_items / 2,
                                end=self.num_items)
     gen_create = BlobGenerator('mike',
                                'mike-',
                                self.value_size,
                                start=self.num_items + 1,
                                end=self.num_items * 3 / 2)
     servs_in = [
         self.servers[i + self.nodes_init] for i in range(self.nodes_in)
     ]
     if self.output_time:
         start_time = time.time()
     rebalance = self.cluster.async_rebalance(
         self.servers[:self.nodes_init], servs_in, [])
     if (self.doc_ops is not None):
         tasks = []
         # define which doc's ops will be performed during rebalancing
         # allows multiple of them but one by one
         if ("update" in self.doc_ops):
             tasks += self._async_load_all_buckets(self.master,
                                                   self.gen_update,
                                                   "update", 0)
         if ("create" in self.doc_ops):
             tasks += self._async_load_all_buckets(self.master, gen_create,
                                                   "create", 0)
         if ("delete" in self.doc_ops):
             tasks += self._async_load_all_buckets(self.master, gen_delete,
                                                   "delete", 0)
         for task in tasks:
             task.result()
     rebalance.result()
     if self.output_time:
         delta_time = time.time() - start_time
         self.log.info("TIME FOR REBALANCE IS %s SECS (%s MINS)" %
                       (delta_time, delta_time / 60))
     self.verify_cluster_stats(self.servers[:self.nodes_in +
                                            self.nodes_init])
Example #17
0
 def _async_load_doc_data_all_buckets(self,
                                      op_type='create',
                                      start=0,
                                      end=0):
     gen_load = BlobGenerator('observe',
                              'observe',
                              1024,
                              start=start,
                              end=end)
     tasks = self._async_load_all_buckets(self.servers[0], gen_load,
                                          op_type, 0)
     return tasks
Example #18
0
 def setUp(self):
     super(BucketFlushTests, self).setUp()
     self.nodes_in = self.input.param("nodes_in", 0)
     self.value_size = self.input.param("value_size", 256)
     self.data_op = self.input.param("data_op", "create")
     self.use_ascii = self.input.param("use_ascii", "False")
     self.gen_create = BlobGenerator('bucketflush', 'bucketflush-', self.value_size, end=self.num_items)
     try:
         self.default_test_setup()
     except Exception, e:
         self.cluster.shutdown()
         self.fail(e)
Example #19
0
 def incremental_rebalance_out_with_mutation_and_deletion(self):
     gen_2 = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items / 2,
                           end=self.num_items)
     for i in reversed(range(self.num_servers)[1:]):
         rebalance = self.cluster.async_rebalance(self.servers[:i], [], [self.servers[i]])
         self._load_all_buckets(self.master, self.gen_update, "update", 0)
         self._load_all_buckets(self.master, gen_2, "delete", 0)
         rebalance.result()
         self._load_all_buckets(self.master, gen_2, "create", 0)
         self._wait_for_stats_all_buckets(self.servers[:i])
         self._verify_all_buckets(self.master, max_verify=self.max_verify)
         self._verify_stats_all_buckets(self.servers[:i])
Example #20
0
    def checkpoint_failover_master(self):
        """Load N items. During the load, failover Master.
        Verify backfill doesn't happen on R1, R2."""

        param = 'checkpoint'
        stat_key = 'vb_0:open_checkpoint_id'
        rest = RestConnection(self.master)
        nodes = rest.node_statuses()
        failover_node = None
        for node in nodes:
            if node.id.find(self.master.ip) >= 0:
                failover_node = node

        self._set_checkpoint_size(self.servers[:self.num_servers], self.bucket,
                                  self.checkpoint_size)
        generate_load = BlobGenerator('nosql',
                                      'nosql-',
                                      self.value_size,
                                      end=self.num_items)
        data_load_thread = Thread(target=self._load_all_buckets,
                                  name="load_data",
                                  args=(self.master, generate_load, "create",
                                        0, 1, 0, True, self.checkpoint_size, 5,
                                        180))
        data_load_thread.start()
        time.sleep(5)
        prev_backfill_timestamp_R1 = self._get_backfill_timestamp(
            self.replica1, self.replica2)
        prev_backfill_timestamp_R2 = self._get_backfill_timestamp(
            self.replica2, self.replica3)

        failed_over = rest.fail_over(failover_node.id)
        if not failed_over:
            self.log.info(
                "unable to failover the node the first time. try again in  60 seconds.."
            )
            #try again in 60 seconds
            time.sleep(75)
            failed_over = rest.fail_over(failover_node.id)
        self.assertTrue(failed_over,
                        "unable to failover node %s" % (self.master.ip))
        self.log.info("failed over node : {0}".format(failover_node.id))
        data_load_thread.join()

        self._verify_backfill_happen(self.replica1, self.replica2,
                                     prev_backfill_timestamp_R1)
        self._verify_backfill_happen(self.replica2, self.replica3,
                                     prev_backfill_timestamp_R2)
        self.cluster.rebalance(self.servers[:self.num_servers], [],
                               [self.master])
        self.cluster.rebalance(self.servers[1:self.num_servers], [self.master],
                               [])
Example #21
0
    def checkpoint_replication_pause(self):
        """With 3 replicas load data. pause replication to R2. Let checkpoints close on Master and R1.
        Restart replication of R2 and R3, backfill should not be seen on R1 and R2."""

        param = 'checkpoint'
        stat_key = 'vb_0:last_closed_checkpoint_id'

        self._set_checkpoint_size(self.servers[:self.num_servers], self.bucket,
                                  str(self.checkpoint_size))
        time.sleep(5)
        prev_backfill_timestamp_R1 = self._get_backfill_timestamp(
            self.replica1, self.replica2)
        prev_backfill_timestamp_R2 = self._get_backfill_timestamp(
            self.replica2, self.replica3)

        generate_load = BlobGenerator('nosql',
                                      'nosql-',
                                      self.value_size,
                                      end=self.num_items)
        data_load_thread = Thread(target=self._load_all_buckets,
                                  name="load_data",
                                  args=(self.master, generate_load, "create",
                                        0, 1, 0, True, self.checkpoint_size, 5,
                                        180))
        data_load_thread.start()
        self._stop_replication(self.replica2, self.bucket)

        m_stats = StatsCommon.get_stats([self.master], self.bucket, param,
                                        stat_key)
        chk_pnt = int(m_stats[m_stats.keys()[0]]) + 2
        tasks = []
        tasks.append(
            self.cluster.async_wait_for_stats([self.master], self.bucket,
                                              param, stat_key, '>=', chk_pnt))
        tasks.append(
            self.cluster.async_wait_for_stats([self.replica1], self.bucket,
                                              param, stat_key, '>=', chk_pnt))
        for task in tasks:
            try:
                task.result(60)
            except TimeoutError:
                self.fail("Checkpoint not closed")

        data_load_thread.join()
        self._start_replication(self.replica2, self.bucket)

        self._verify_checkpoint_id(param, stat_key, m_stats)
        self._verify_stats_all_buckets(self.servers[:self.num_servers])
        self._verify_backfill_happen(self.replica1, self.replica2,
                                     prev_backfill_timestamp_R1)
        self._verify_backfill_happen(self.replica2, self.replica3,
                                     prev_backfill_timestamp_R2)
Example #22
0
 def incremental_rebalance_in_with_ops(self):
     for i in range(1, self.num_servers, 2):
         rebalance = self.cluster.async_rebalance(self.servers[:i], self.servers[i:i + 2], [])
         if self.doc_ops is not None:
         # define which doc's operation will be performed during rebalancing
         #only one type of ops can be passed
             if("update" in self.doc_ops):
                 # 1/2th of data will be updated in each iteration
                 self._load_all_buckets(self.master, self.gen_update, "update", 0)
             elif("create" in self.doc_ops):
                 # 1/2th of initial data will be added in each iteration
                 gen_create = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items * (1 + i) / 2.0 , end=self.num_items * (1 + i / 2.0))
                 self._load_all_buckets(self.master, gen_create, "create", 0)
             elif("delete" in self.doc_ops):
                 # 1/(num_servers) of initial data will be removed after each iteration
                 # at the end we should get empty base( or couple items)
                 gen_delete = BlobGenerator('mike', 'mike-', self.value_size, start=int(self.num_items * (1 - i / (self.num_servers - 1.0))) + 1, end=int(self.num_items * (1 - (i - 1) / (self.num_servers - 1.0))))
                 self._load_all_buckets(self.master, gen_delete, "delete", 0)
         rebalance.result()
         self._wait_for_stats_all_buckets(self.servers[:i + 2])
         self._verify_all_buckets(self.master, max_verify=self.max_verify)
         self._verify_stats_all_buckets(self.servers[:i + 2])
Example #23
0
 def setUp(self):
     log = logger.Logger.get_logger()
     self._input = TestInputSingleton.input
     self._keys_count = self._input.param("keys_count", DEFAULT_KEY_COUNT)
     self._num_replicas = self._input.param("replica", DEFAULT_REPLICA)
     self.bidirectional = self._input.param("bidirectional", False)
     self.case_number = self._input.param("case_number", 0)
     self._value_size = self._input.param("value_size", 256)
     self.wait_timeout = self._input.param("wait_timeout", 60)
     self._servers = self._input.servers
     self.master = self._servers[0]
     self._failed_nodes = []
     num_buckets = 0
     self.buckets = []
     self.default_bucket = self._input.param("default_bucket", True)
     if self.default_bucket:
         self.default_bucket_name = "default"
         num_buckets += 1
     self._standard_buckets = self._input.param("standard_buckets", 0)
     self._sasl_buckets = self._input.param("sasl_buckets", 0)
     num_buckets += self._standard_buckets + self._sasl_buckets
     self.dgm_run = self._input.param("dgm_run", True)
     self.log = logger.Logger().get_logger()
     self._cluster_helper = Cluster()
     self.disabled_consistent_view = self._input.param(
         "disabled_consistent_view", None)
     self._quota = self._initialize_nodes(self._cluster_helper,
                                          self._servers,
                                          self.disabled_consistent_view)
     if self.dgm_run:
         self.quota = 256
     self.bucket_size = int(
         (2.0 / 3.0) / float(num_buckets) * float(self._quota))
     self.gen_create = BlobGenerator('loadOne',
                                     'loadOne_',
                                     self._value_size,
                                     end=self._keys_count)
     self.add_back_flag = False
     self._cleanup_nodes = []
     log.info("==============  setup was started for test #{0} {1}=============="\
                   .format(self.case_number, self._testMethodName))
     RemoteUtilHelper.common_basic_setup(self._servers)
     BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
     for server in self._servers:
         ClusterOperationHelper.cleanup_cluster([server])
     ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self)
     self._setup_cluster()
     self._create_buckets_()
     log.info("==============  setup was finished for test #{0} {1} =============="\
                   .format(self.case_number, self._testMethodName))
Example #24
0
 def incremental_rebalance_in_with_mutation_and_expiration(self):
     gen_2 = BlobGenerator('mike',
                           'mike-',
                           self.value_size,
                           start=self.num_items / 2,
                           end=self.num_items)
     for i in range(self.num_servers)[1:]:
         rebalance = self.cluster.async_rebalance(self.servers[:i],
                                                  [self.servers[i]], [])
         self._load_all_buckets(self.master, self.gen_update, "update", 0)
         self._load_all_buckets(self.master, gen_2, "update", 5)
         self.sleep(5)
         rebalance.result()
         self._load_all_buckets(self.master, gen_2, "create", 0)
         self.verify_cluster_stats(self.servers[:i + 1])
Example #25
0
 def rebalance_out_with_ops(self):
     gen_delete = BlobGenerator('mike',
                                'mike-',
                                self.value_size,
                                start=self.num_items / 2,
                                end=self.num_items)
     gen_create = BlobGenerator('mike',
                                'mike-',
                                self.value_size,
                                start=self.num_items + 1,
                                end=self.num_items * 3 / 2)
     servs_out = [
         self.servers[self.num_servers - i - 1]
         for i in range(self.nodes_out)
     ]
     rebalance = self.cluster.async_rebalance(self.servers[:1], [],
                                              servs_out)
     # define which doc's ops will be performed during rebalancing
     # allows multiple of them but one by one
     tasks = []
     if (self.doc_ops is not None):
         if ("update" in self.doc_ops):
             tasks += self._async_load_all_buckets(self.master,
                                                   self.gen_update,
                                                   "update", 0)
         if ("create" in self.doc_ops):
             tasks += self._async_load_all_buckets(self.master, gen_create,
                                                   "create", 0)
         if ("delete" in self.doc_ops):
             tasks += self._async_load_all_buckets(self.master, gen_delete,
                                                   "delete", 0)
         for task in tasks:
             task.result()
     rebalance.result()
     self.verify_cluster_stats(self.servers[:self.num_servers -
                                            self.nodes_out])
Example #26
0
 def _load_gen_data(self, cname, node):
     for op_type in self._seed_data_ops_lst:
         num_items_ratio = self._get_num_items_ratio(op_type)
         load_gen = BlobGenerator(cname,
                                  cname,
                                  self._value_size,
                                  end=num_items_ratio)
         self._log.info(
             "Starting Load operation '{0}' for items (ratio) '{1}' on node '{2}'...."
             .format(op_type, num_items_ratio, cname))
         if self._seed_data_mode_str == XDCRConstants.SEED_DATA_MODE_SYNC:
             self._load_all_buckets(node, load_gen, op_type, 0)
             self._log.info("Completed Load of {0}".format(op_type))
         else:
             self._async_load_all_buckets(node, load_gen, op_type, 0)
             self._log.info("Started async Load of {0}".format(op_type))
Example #27
0
    def rebalance_in_out_at_once_with_max_buckets_number(self):
        servs_init = self.servers[:self.nodes_init]
        servs_in = [
            self.servers[i + self.nodes_init] for i in range(self.nodes_in)
        ]
        servs_out = [
            self.servers[self.nodes_init - i - 1]
            for i in range(self.nodes_out)
        ]
        rest = RestConnection(self.master)
        self._wait_for_stats_all_buckets(servs_init)
        self.log.info("current nodes : {0}".format(
            [node.id for node in rest.node_statuses()]))
        self.log.info("adding nodes {0} to cluster".format(servs_in))
        self.log.info("removing nodes {0} from cluster".format(servs_out))
        result_nodes = set(servs_init + servs_in) - set(servs_out)

        rest = RestConnection(self.master)
        bucket_num = rest.get_internalSettings("maxBucketCount")
        self.bucket_size = self.quota / bucket_num

        self.log.info('total %s buckets will be created with size %s MB' %
                      (bucket_num, self.bucket_size))
        self.cluster.create_default_bucket(self.master, self.bucket_size,
                                           self.num_replicas)
        self.buckets.append(
            Bucket(name="default",
                   authType="sasl",
                   saslPassword="",
                   num_replicas=self.num_replicas,
                   bucket_size=self.bucket_size))
        self._create_sasl_buckets(self.master, (bucket_num - 1) / 2)
        self._create_standard_buckets(self.master,
                                      bucket_num - 1 - (bucket_num - 1) / 2)

        gen = BlobGenerator('mike',
                            'mike-',
                            self.value_size,
                            end=self.num_items)
        self._load_all_buckets(self.master, gen, "create", 0)
        self._wait_for_stats_all_buckets(servs_init)

        rebalance = self.cluster.async_rebalance(servs_init, servs_in,
                                                 servs_out)
        self._async_load_all_buckets(self.master, gen, "update", 0)
        rebalance.result()
        self.verify_cluster_stats(result_nodes)
Example #28
0
    def checkpoint_server_down(self):
        """Load N items. Shut down server R2. Then Restart R2 and
        verify backfill happens on R1 and R2."""

        param = 'checkpoint'
        stat_key = 'vb_0:open_checkpoint_id'
        rest = RestConnection(self.master)

        self._set_checkpoint_size(self.servers[:self.num_servers], self.bucket,
                                  self.checkpoint_size)
        generate_load_one = BlobGenerator('nosql',
                                          'nosql-',
                                          self.value_size,
                                          end=self.num_items)
        self._load_all_buckets(self.master,
                               generate_load_one,
                               "create",
                               0,
                               1,
                               0,
                               True,
                               batch_size=self.checkpoint_size,
                               pause_secs=5,
                               timeout_secs=180)
        self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
        prev_backfill_timestamp_R1 = self._get_backfill_timestamp(
            self.replica1, self.replica2)
        prev_backfill_timestamp_R2 = self._get_backfill_timestamp(
            self.replica2, self.replica3)

        m_stats = StatsCommon.get_stats([self.master], self.bucket, param,
                                        stat_key)
        self._stop_server(self.replica2)
        time.sleep(5)
        data_load_thread = Thread(target=self._load_data_use_workloadgen,
                                  name="load_data",
                                  args=(self.master, ))
        data_load_thread.start()
        data_load_thread.join()
        self._start_server(self.replica2)
        time.sleep(5)

        self._verify_checkpoint_id(param, stat_key, m_stats)
        self._verify_backfill_happen(self.replica1, self.replica2,
                                     prev_backfill_timestamp_R1, True)
        self._verify_backfill_happen(self.replica2, self.replica3,
                                     prev_backfill_timestamp_R2, True)
Example #29
0
    def incremental_rebalance_in_out_with_mutation(self):
        self.cluster.rebalance(self.servers[:self.num_servers],
                               self.servers[1:self.num_servers], [])
        gen = BlobGenerator('mike', 'mike-', self.value_size, end=self.num_items)
        self._load_all_buckets(self.master, gen, "create", 0)

        for i in reversed(range(self.num_servers)[self.num_servers / 2:]):
            tasks = self._async_load_all_buckets(self.master, gen, "update", 0)

            self.cluster.rebalance(self.servers[:i], [], self.servers[i:self.num_servers])
            time.sleep(5)
            for task in tasks:
                task.result(self.wait_timeout * 20)
            tasks = self._async_load_all_buckets(self.master, gen, "update", 0)
            self.cluster.rebalance(self.servers[:self.num_servers],
                                   self.servers[i:self.num_servers], [])
            for task in tasks:
                task.result(self.wait_timeout * 20)
            self.verify_cluster_stats(self.servers[:self.num_servers])
Example #30
0
    def test_start_compaction(self):

        # disable auto compaction
        self.disable_compaction()

        # create ddoc and add views
        ddoc_name = "ddoc1"
        views = self.make_default_views("test_add_views", 3)
        server = self.servers[0]
        self.create_views(server, ddoc_name, views)

        # load initial documents
        gen_load = BlobGenerator('test_view_compaction',
                                 'test_view_compaction-',
                                 self.value_size,
                                 end=self.num_items)

        self._load_all_buckets(server, gen_load, "create", 0)

        # start fragmentation monitor
        fragmentation_monitor = \
            self.cluster.async_monitor_view_fragmentation(server,
                                                          ddoc_name,
                                                          self.fragmentation_value,
                                                          timeout = 20)

        # generate load until fragmentation reached
        while fragmentation_monitor.state != "FINISHED":

            # update docs to create fragmentation
            self._load_all_buckets(server, gen_load, "update", 0)
            for view in views:

                # run queries to create indexes
                self.cluster.query_view(server, ddoc_name, view.name, {})
        fragmentation_monitor.result()

        # compact ddoc and make sure fragmentation is less than high_mark
        # will throw exception if failed
        result = self.cluster.compact_view(server, ddoc_name)

        self.assertTrue(result)