예제 #1
0
    def test_nwusage_reset_to_zero(self):
        self.setup_xdcr()
        self.sleep(60)

        self._set_doc_size_num()
        nw_limit = self._input.param("nw_limit", self._get_nwusage_limit())
        self._set_nwusage_limit(self.src_cluster, nw_limit * self.num_src_nodes)

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        tasks = self.src_cluster.async_load_all_buckets_from_generator(kv_gen=gen_create)

        self.sleep(30)
        self._set_nwusage_limit(self.src_cluster, 0)
        event_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Network limit reset to 0 at {0}".format(event_time))

        for task in tasks:
            task.result()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.dest_cluster.get_master_node(), nw_limit=nw_limit,
                                     no_of_nodes=self.num_dest_nodes, end_time=event_time)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=0,
                                     no_of_nodes=self.num_src_nodes,
                                     event_time=event_time, nw_usage="0")
예제 #2
0
    def test_nwusage_with_hard_failover_and_bwthrottle_enabled(self):
        self.setup_xdcr()
        self.sleep(60)
        self._set_doc_size_num()
        nw_limit = self._input.param("nw_limit", self._get_nwusage_limit())
        self._set_nwusage_limit(self.src_cluster, nw_limit * self.num_src_nodes)

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.sleep(15)

        self.src_cluster.failover_and_rebalance_nodes()
        failover_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Node failed over at {0}".format(failover_time))

        self.sleep(15)

        self.src_cluster.rebalance_in()
        node_back_time = self._get_current_time(self.src_cluster.get_master_node())
        self.log.info("Node added back at {0}".format(node_back_time))

        self._wait_for_replication_to_catchup()

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), end_time=failover_time)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=failover_time,
                                     end_time=node_back_time, no_of_nodes=self.num_src_nodes - 1)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), event_time=node_back_time,
                                     no_of_nodes=self.num_src_nodes)
예제 #3
0
 def test_Transfer(self):
     shell = RemoteMachineShellConnection(self.master)
     gen_update = BlobGenerator('testdata',
                                'testdata-',
                                self.value_size,
                                end=100)
     self._load_all_buckets(self.master,
                            gen_update,
                            "create",
                            0,
                            1,
                            0,
                            True,
                            batch_size=20000,
                            pause_secs=5,
                            timeout_secs=180)
     self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
     source = "http://" + self.master.ip + ":8091"
     info = shell.extract_remote_info()
     path = '/tmp/backup'
     #if info.type.lower() == "windows":
     #    path = '/cygdrive/c' + path
     shell.delete_files(path)
     create_dir = "mkdir " + path
     shell.execute_command(create_dir)
     options = "-b default " + " -u " + self.master.rest_username + " -p " + self.master.rest_password
     shell.execute_cbtransfer(source, path, options)
     expectedResults = {
         "peername": self.master.ip,
         "sockname": self.master.ip + ":11210",
         "source": "memcached",
         "user": "******",
         'bucket': 'default'
     }
     self.checkConfig(self.eventID, self.master, expectedResults)
예제 #4
0
    def test_incremental_rebalance_in_out_with_mutation_and_deletion(self):
        """
        Rebalances nodes into and out of the cluster while doing mutations and
        deletions. Use 'zone' param to have nodes divided into server groups
        by having zone > 1.

        This test begins by loading a given number of items into the cluster.
        It then adds one node, rebalances that node into the cluster, and then
        rebalances it back out. During the rebalancing we update half of the
        items in the cluster and delete the other half. Once the node has been
        removed and added back we recreate the deleted items, wait for the
        disk queues to drain, and then verify that there has been no data loss,
        sum(curr_items) match the curr_items_total. We then remove and
        add back two nodes at a time and so on until we have reached the point
        where we are adding back and removing at least half of the nodes.
        """
        self.add_remove_servers_and_rebalance(self.servers[1:self.num_servers],
                                              [])
        gen_delete = BlobGenerator('mike',
                                   'mike-',
                                   self.value_size,
                                   start=self.num_items // 2 + 2000,
                                   end=self.num_items)
        for i in reversed(
                list(range(self.num_servers))[self.num_servers // 2:]):
            tasks = self._async_load_all_buckets(self.master,
                                                 self.gen_update,
                                                 "update",
                                                 0,
                                                 pause_secs=5,
                                                 batch_size=1,
                                                 timeout_secs=60)
            tasks.extend(
                self._async_load_all_buckets(self.master,
                                             gen_delete,
                                             "delete",
                                             0,
                                             pause_secs=5,
                                             batch_size=1,
                                             timeout_secs=60))

            self.cluster.rebalance(
                self.servers[:i], [],
                self.servers[i:self.num_servers],
                sleep_before_rebalance=self.sleep_before_rebalance)
            self.sleep(60)
            self.add_remove_servers_and_rebalance(
                self.servers[i:self.num_servers], [],
                sleep_before_rebalance=self.sleep_before_rebalance)
            for task in tasks:
                task.result(self.wait_timeout * 30)

            # Validate seq_no snap_start/stop values after rebalance
            self.check_snap_start_corruption()

            self._load_all_buckets(self.master, gen_delete, "create", 0)
            self.verify_cluster_stats(self.servers[:self.num_servers])

            # Validate seq_no snap_start/stop values after doc_ops 'create'
            self.check_snap_start_corruption()
예제 #5
0
    def test_cbcollectinfo_memory_usuage(self):
        """
           Test to make sure cbcollectinfo did not use a lot of memory.
           We run test with 200K items with size 128 bytes
        """
        gen_load = BlobGenerator('cbcollect',
                                 'cbcollect-',
                                 self.value_size,
                                 end=200000)
        self._load_all_buckets(self.master, gen_load, "create", 0)
        self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
        self.log.info("Delete old logs files")
        self.shell.delete_files("%s.zip" % (self.log_filename))
        self.log.info("Delete old logs directory")
        self.shell.delete_files("cbcollect_info*")
        options = ""
        if self.collect_all_option:
            options = "--multi-node-diag"
            self.log.info("Run collect log with --multi-node-diag option")

        collect_threads = []
        col_thread = Thread(target=self.shell.execute_cbcollect_info,
                            args=("%s.zip" % (self.log_filename), options))
        collect_threads.append(col_thread)
        col_thread.start()
        monitor_mem_thread = Thread(
            target=self._monitor_collect_log_mem_process)
        collect_threads.append(monitor_mem_thread)
        monitor_mem_thread.start()
        self.thred_end = False
        while not self.thred_end:
            if not col_thread.isAlive():
                self.thred_end = True
        for t in collect_threads:
            t.join()
예제 #6
0
 def setUp(self):
     self.array_indexing = False
     super(UpgradeN1QLRBAC, self).setUp()
     self.initial_version = self.input.param('initial_version',
                                             '4.6.0-3653')
     self.upgrade_to = self.input.param("upgrade_to")
     self.n1ql_helper = N1QLHelper(version=self.version,
                                   shell=self.shell,
                                   use_rest=self.use_rest,
                                   max_verify=self.max_verify,
                                   buckets=self.buckets,
                                   item_flag=self.item_flag,
                                   n1ql_port=self.n1ql_port,
                                   full_docs_list=[],
                                   log=self.log,
                                   input=self.input,
                                   master=self.master)
     self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     log.info(self.n1ql_node)
     if self.ddocs_num:
         self.create_ddocs_and_views()
         gen_load = BlobGenerator('pre-upgrade',
                                  'preupgrade-',
                                  self.value_size,
                                  end=self.num_items)
         self._load_all_buckets(self.master,
                                gen_load,
                                "create",
                                self.expire_time,
                                flag=self.item_flag)
예제 #7
0
 def incremental_rebalance_out_with_mutation_and_expiration(self):
     gen_2 = BlobGenerator('mike',
                           'mike-',
                           self.value_size,
                           start=self.num_items / 2 + 2000,
                           end=self.num_items)
     batch_size = 1000
     for i in reversed(range(self.num_servers)[2:]):
         # don't use batch for rebalance out 2-1 nodes
         rebalance = self.cluster.async_rebalance(self.servers[:i], [],
                                                  [self.servers[i]])
         self._load_all_buckets(self.master,
                                self.gen_update,
                                "update",
                                0,
                                batch_size=batch_size,
                                timeout_secs=60)
         self._load_all_buckets(self.master,
                                gen_2,
                                "update",
                                5,
                                batch_size=batch_size,
                                timeout_secs=60)
         rebalance.result()
         self.sleep(5)
         self._load_all_buckets(self.master, gen_2, "create", 0)
         self.verify_cluster_stats(self.servers[:i])
     self.verify_unacked_bytes_all_buckets()
예제 #8
0
    def load_data(self):
        gen_load = BlobGenerator('nosql', 'nosql-', self.value_size, end=self.num_items)
        gen_update = BlobGenerator('nosql', 'nosql-', self.value_size, end=(self.num_items // 2 - 1))
        gen_expire = BlobGenerator('nosql', 'nosql-', self.value_size, start=self.num_items // 2, end=(self.num_items * 3 // 4 - 1))
        gen_delete = BlobGenerator('nosql', 'nosql-', self.value_size, start=self.num_items * 3 // 4, end=self.num_items)
        self._load_all_buckets(self.server_origin, gen_load, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)

        if(self.doc_ops is not None):
            if("update" in self.doc_ops):
                self._load_all_buckets(self.server_origin, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
            if("delete" in self.doc_ops):
                self._load_all_buckets(self.server_origin, gen_delete, "delete", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
            if("expire" in self.doc_ops):
                self._load_all_buckets(self.server_origin, gen_expire, "update", self.expire_time, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
        self._wait_for_stats_all_buckets([self.server_origin])
        time.sleep(30)
예제 #9
0
 def incremental_rebalance_out_with_mutation_and_deletion(self):
     gen_2 = BlobGenerator('rebalance-del',
                           'rebalance-del-',
                           self.value_size,
                           start=self.num_items / 2 + 2000,
                           end=self.num_items)
     batch_size = 1000
     for i in reversed(range(self.num_servers)[1:]):
         # don't use batch for rebalance out 2-1 nodes
         for bucket in self.buckets:
             bucket.kvs[2] = KVStore()
         tasks = [
             self.cluster.async_rebalance(self.servers[:i], [],
                                          [self.servers[i]])
         ]
         tasks += self._async_load_all_buckets(self.master,
                                               self.gen_update,
                                               "update",
                                               0,
                                               kv_store=1,
                                               batch_size=batch_size,
                                               timeout_secs=60)
         tasks += self._async_load_all_buckets(self.master,
                                               gen_2,
                                               "delete",
                                               0,
                                               kv_store=2,
                                               batch_size=batch_size,
                                               timeout_secs=60)
         for task in tasks:
             task.result()
         self.sleep(5)
         self._load_all_buckets(self.master, gen_2, "create", 0)
         self.verify_cluster_stats(self.servers[:i])
     self.verify_unacked_bytes_all_buckets()
예제 #10
0
    def test_seq_add_del_on_bi_xdcr_all_enabled_failover(self):
        self.setup_xdcr_and_load()
        self.merge_all_buckets()
        self.c1_cluster.pause_all_replications()
        self.c2_cluster.pause_all_replications()

        kv_gen = self.c1_cluster.get_kv_gen()[OPS.CREATE]
        new_kv_gen = BlobGenerator(kv_gen.name,
                                   'C-New-Update',
                                   self._value_size,
                                   start=0,
                                   end=1)

        self.c2_cluster.load_all_buckets_from_generator(new_kv_gen,
                                                        ops=OPS.DELETE)

        self.c2_cluster.failover_and_rebalance_nodes()

        self.c1_cluster.load_all_buckets_from_generator(new_kv_gen,
                                                        ops=OPS.UPDATE)

        self.c2_cluster.failover_and_rebalance_nodes()
        self.c1_cluster.resume_all_replications()
        self.c2_cluster.resume_all_replications()

        self.verify_results()
예제 #11
0
 def test_default_collect_logs_in_cluster(self):
     """
        In a cluster, if we run cbcollectinfo from 1 node, it will collect logs
        on 1 node only.
        Initial nodes: 3
     """
     gen_load = BlobGenerator('cbcollect', 'cbcollect-', self.value_size,
                                                         end=self.num_items)
     self._load_all_buckets(self.master, gen_load, "create", 0)
     self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
     self.log.info("Delete old logs files")
     self.shell.delete_files("%s.zip" % (self.log_filename))
     self.log.info("Delete old logs directory")
     self.shell.delete_files("cbcollect_info*")
     output, error = self.shell.execute_cbcollect_info("%s.zip "\
                                                    % (self.log_filename))
     if output:
         if self.debug_logs:
             self.shell.log_command_output(output, error)
         for line in output:
             if "noLogs=1" in line:
                 if "oneNode=1" not in line:
                     self.log.error("Error line: %s" % line)
                     self.fail("cbcollect did not set to collect diag only at 1 node ")
     self.verify_results(self, self.log_filename)
예제 #12
0
    def collectinfo_test_for_views(self):
        self.default_design_doc_name = "Doc1"
        self.view_name = self.input.param("view_name", "View")
        self.generate_map_reduce_error = self.input.param(
            "map_reduce_error", False)
        self.default_map_func = 'function (doc) { emit(doc.age, doc.first_name);}'
        self.gen_load = BlobGenerator('couch',
                                      'cb-',
                                      self.value_size,
                                      end=self.num_items)
        self._load_all_buckets(self.master, self.gen_load, "create", 0)
        self.reduce_fn = "_count"
        expected_num_items = self.num_items
        if self.generate_map_reduce_error:
            self.reduce_fn = "_sum"
            expected_num_items = None

        view = View(self.view_name,
                    self.default_map_func,
                    self.reduce_fn,
                    dev_view=False)
        self.cluster.create_view(self.master, self.default_design_doc_name,
                                 view, 'default', self.wait_timeout * 2)
        query = {"stale": "false", "connection_timeout": 60000}
        try:
            self.cluster.query_view(self.master,
                                    self.default_design_doc_name,
                                    self.view_name,
                                    query,
                                    expected_num_items,
                                    'default',
                                    timeout=self.wait_timeout)
        except Exception, ex:
            if not self.generate_map_reduce_error:
                raise ex
예제 #13
0
 def test_Backup(self):
     shell = RemoteMachineShellConnection(self.master)
     gen_update = BlobGenerator('testdata',
                                'testdata-',
                                self.value_size,
                                end=100)
     self._load_all_buckets(self.master,
                            gen_update,
                            "create",
                            0,
                            1,
                            0,
                            True,
                            batch_size=20000,
                            pause_secs=5,
                            timeout_secs=180)
     self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
     info = shell.extract_remote_info()
     path = '/tmp/backup'
     #if info.type.lower() == "windows":
     #path = 'c:' + path
     shell.delete_files(path)
     create_dir = "mkdir " + path
     shell.execute_command(create_dir)
     shell.execute_cluster_backup(backup_location=path)
     expectedResults = {
         "peername": self.master.ip,
         "sockname": self.master.ip + ":11210",
         "source": "memcached",
         "user": "******",
         'bucket': 'default'
     }
     self.checkConfig(self.eventID, self.master, expectedResults)
예제 #14
0
    def test_verify_mb19697(self):
        self.setup_xdcr_and_load()
        goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
                     + '/goxdcr.log*'

        self.src_cluster.pause_all_replications()

        gen = BlobGenerator("C1-", "C1-", self._value_size, end=100000)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.src_cluster.resume_all_replications()
        self._wait_for_replication_to_catchup()

        gen = BlobGenerator("C1-", "C1-", self._value_size, end=100000)
        load_tasks = self.src_cluster.async_load_all_buckets_from_generator(gen)

        self.src_cluster.rebalance_out()

        for task in load_tasks:
            task.result()

        self._wait_for_replication_to_catchup()

        self.src_cluster.rebalance_in()

        gen = BlobGenerator("C1-", "C1-", self._value_size, end=100000)
        load_tasks = self.src_cluster.async_load_all_buckets_from_generator(gen)

        self.src_cluster.failover_and_rebalance_master()

        for task in load_tasks:
            task.result()

        self._wait_for_replication_to_catchup()

        for node in self.src_cluster.get_nodes():
            _, count = NodeHelper.check_goxdcr_log(
                            node,
                            "counter .+ goes backward, maybe due to the pipeline is restarted",
                            goxdcr_log)
            self.assertEqual(count, 0, "counter goes backward, maybe due to the pipeline is restarted "
                                        "error message found in " + str(node.ip))
            self.log.info("counter goes backward, maybe due to the pipeline is restarted "
                                        "error message not found in " + str(node.ip))

        self.sleep(300)
        self.verify_results()
    def test_checkpointing_with_full_rollback(self):
        bucket = self.src_cluster.get_buckets()[0]
        nodes = self.src_cluster.get_nodes()

        # Stop Persistence on Node A & Node B
        for node in nodes:
            mem_client = MemcachedClientHelper.direct_client(node, bucket)
            mem_client.stop_persistence()

        self.src_cluster.pause_all_replications()

        gen = BlobGenerator("C1-",
                            "C1-",
                            self._value_size,
                            end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.src_cluster.resume_all_replications()

        self.sleep(self._checkpoint_interval * 2)

        self.get_and_validate_latest_checkpoint()

        # Perform mutations on the bucket
        self.async_perform_update_delete()

        self.sleep(self._wait_timeout)

        # Kill memcached on Node A so that Node B becomes master
        shell = RemoteMachineShellConnection(
            self.src_cluster.get_master_node())
        shell.kill_memcached()

        # Start persistence on Node B
        mem_client = MemcachedClientHelper.direct_client(nodes[1], bucket)
        mem_client.start_persistence()

        # Failover Node B
        failover_task = self.src_cluster.async_failover()
        failover_task.result()

        # Wait for Failover & rollback to complete
        self.sleep(self._wait_timeout * 5)

        goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0]) \
                     + '/goxdcr.log*'
        count1 = NodeHelper.check_goxdcr_log(
            nodes[0], "Received rollback from DCP stream", goxdcr_log)
        self.assertGreater(count1, 0,
                           "full rollback not received from DCP as expected")
        self.log.info("full rollback received from DCP as expected")
        count2 = NodeHelper.check_goxdcr_log(nodes[0],
                                             "Rolled back startSeqno to 0",
                                             goxdcr_log)
        self.assertGreater(count2, 0,
                           "startSeqno not rolled back to 0 as expected")
        self.log.info("startSeqno rolled back to 0 as expected")

        shell.disconnect()
예제 #16
0
    def test_nwusage_with_auto_failover_and_bwthrottle_enabled(self):
        self.setup_xdcr()
        self.sleep(60)
        self._set_doc_size_num()
        self.src_cluster.rebalance_in()

        nw_limit = self._input.param("nw_limit", self._get_nwusage_limit())
        self._set_nwusage_limit(self.src_cluster,
                                nw_limit * self.num_src_nodes)

        src_conn = RestConnection(self.src_cluster.get_master_node())
        src_conn.update_autofailover_settings(enabled=True, timeout=30)

        self.src_cluster.pause_all_replications()

        gen_create = BlobGenerator('nwOne',
                                   'nwOne',
                                   self._value_size,
                                   end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create)

        self.src_cluster.resume_all_replications()

        self.sleep(15)

        shell = RemoteMachineShellConnection(self._input.servers[1])
        shell.stop_couchbase()
        self.sleep(30)
        task = self.cluster.async_rebalance(self.src_cluster.get_nodes(), [],
                                            [])
        task.result()
        failover_time = self._get_current_time(
            self.src_cluster.get_master_node())
        self.log.info("Node auto failed over at {0}".format(failover_time))
        FloatingServers._serverlist.append(self._input.servers[1])

        self.sleep(15)

        shell.start_couchbase()
        shell.disable_firewall()
        self.sleep(45)
        self.src_cluster.rebalance_in()
        node_back_time = self._get_current_time(
            self.src_cluster.get_master_node())
        self.log.info("Node added back at {0}".format(node_back_time))

        self._wait_for_replication_to_catchup(timeout=600)

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     end_time=failover_time,
                                     no_of_nodes=3)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     event_time=failover_time,
                                     end_time=node_back_time,
                                     no_of_nodes=2)
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(),
                                     event_time=node_back_time,
                                     no_of_nodes=3)
예제 #17
0
 def test_warm_up_with_eviction(self):
     gen_create = BlobGenerator('eviction', 'eviction-', self.value_size, end=self.num_items)
     gen_create2 = BlobGenerator('eviction2', 'eviction2-', self.value_size, end=self.num_items)
     self._load_all_buckets(self.master, gen_create, "create", 0)
     self._wait_for_stats_all_buckets(self.servers[:self.nodes_init])
     self._verify_stats_all_buckets(self.servers[:self.nodes_init])
     self.timeout = self.wait_timeout
     self.without_access_log = False
     self._stats_befor_warmup(self.buckets[0])
     self._restart_memcache(self.buckets[0])
     ClusterOperationHelper.wait_for_ns_servers_or_assert(
         self.servers[:self.nodes_init], self,
         wait_time=self.wait_timeout, wait_if_warmup=True)
     self.sleep(10, 'Wait some time before next load')
     self._load_all_buckets(self.master, gen_create2, "create", 0)
     self._wait_for_stats_all_buckets(self.servers[:self.nodes_init], timeout=self.wait_timeout * 5)
     self._verify_stats_all_buckets(self.servers[:self.nodes_init])
예제 #18
0
    def rebalance_in_with_compaction_and_ops(self):
        tasks = list()
        servs_in = [self.servers[i + self.nodes_init]
                    for i in range(self.nodes_in)]

        for bucket in self.buckets:
            tasks.append(self.cluster.async_compact_bucket(self.master,
                                                           bucket))
        if self.doc_ops is not None:
            if "update" in self.doc_ops:
                # 1/2th of data will be updated in each iteration
                tasks += self._async_load_all_buckets(
                    self.master, self.gen_update, "update", 0,
                    batch_size=20000, pause_secs=5, timeout_secs=180)
            elif "create" in self.doc_ops:
                # 1/2th of initial data will be added in each iteration
                gen_create = BlobGenerator(
                    'mike', 'mike-', self.value_size,
                    start=self.num_items * (1 + i) / 2.0,
                    end=self.num_items * (1 + i / 2.0))
                tasks += self._async_load_all_buckets(
                    self.master, gen_create, "create", 0,
                    batch_size=20000, pause_secs=5, timeout_secs=180)
            elif "delete" in self.doc_ops:
                 # 1/(num_servers) of initial data will be removed after each iteration
                # at the end we should get empty base( or couple items)
                gen_delete = BlobGenerator('mike', 'mike-', self.value_size,
                                           start=int(self.num_items * (1 - i / (self.num_servers - 1.0))) + 1,
                                           end=int(self.num_items * (1 - (i - 1) / (self.num_servers - 1.0))))
                tasks += self._async_load_all_buckets(
                    self.master, gen_delete, "delete", 0,
                    batch_size=20000, pause_secs=5, timeout_secs=180)

        rebalance_task = self.cluster.async_rebalance(
            self.servers[:self.nodes_init], servs_in, [],
            sleep_before_rebalance=self.sleep_before_rebalance)

        rebalance_task.result()
        for task in tasks:
            task.result()

        self.verify_cluster_stats(self.servers[:self.nodes_in+self.nodes_init])
        self.verify_unacked_bytes_all_buckets()

        # Validate seq_no snap_start/stop values after rebalance
        self.check_snap_start_corruption()
예제 #19
0
    def test_nwusage_with_bidirection(self):
        self.setup_xdcr()
        nw_limit = self._input.param("nw_limit", 1)
        self._set_nwusage_limit(self.src_cluster, nw_limit)
        self._set_nwusage_limit(self.dest_cluster, nw_limit)

        gen_create1 = BlobGenerator('nwOne', 'nwOne', self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(kv_gen=gen_create1)
        gen_create2 = BlobGenerator('nwTwo', 'nwTwo', self._value_size, end=self._num_items)
        self.dest_cluster.load_all_buckets_from_generator(kv_gen=gen_create2)

        self.perform_update_delete()
        self._wait_for_replication_to_catchup(timeout=60)

        self.verify_results()
        self._verify_bandwidth_usage(node=self.src_cluster.get_master_node(), nw_limit=nw_limit)
        self._verify_bandwidth_usage(node=self.dest_cluster.get_master_node(), nw_limit=nw_limit)
예제 #20
0
파일: lwwXDCR.py 프로젝트: umang-cb/Jython
    def test_seq_upd_on_bi_xdcr_all_enabled(self):
        self.setup_xdcr_and_load()
        self.merge_all_buckets()
        self.c1_cluster.pause_all_replications()
        self.c2_cluster.pause_all_replications()

        gen = BlobGenerator("C-new-", "C1-new-", self._value_size, end=1)
        self.c1_cluster.load_all_buckets_from_generator(gen)
        # to add some delay in timestamp
        self.sleep(2)
        gen = BlobGenerator("C-new-", "C2-new-", self._value_size, end=1)
        self.c2_cluster.load_all_buckets_from_generator(gen)

        self.c1_cluster.resume_all_replications()
        self.c2_cluster.resume_all_replications()

        self.verify_results()
예제 #21
0
 def test_cluster_rebalance_in_prompt(self):
     gen_load = BlobGenerator('buckettest', 'buckettest-', self.value_size, start=0, end=self.num_items)
     self._load_all_buckets(self.master, gen_load, "create", 0)
     servers_in = self.servers[1:]
     for servers in servers_in:
         self.secretmgmt_base_obj.setup_pass_node(servers, self.password, startup_type='prompt')
     temp_result = self.cluster.rebalance(self.servers, servers_in, [])
     self.assertTrue(temp_result, 'Rebalance-in did not complete with password node setup')
예제 #22
0
    def rebalance_in_with_failover(self):
        fail_over = self.input.param("fail_over", False)
        gen_update = BlobGenerator('mike', 'mike-', self.value_size, end=self.num_items)
        tasks = []
        tasks += self._async_load_all_buckets(self.master, gen_update, "update", 0)
        for task in tasks:
            task.result()
        servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)]
        self._verify_stats_all_buckets(self.servers[:self.nodes_init], timeout=120)
        self._wait_for_stats_all_buckets(self.servers[:self.nodes_init])
        self.sleep(20)
        prev_failover_stats = self.get_failovers_logs(self.servers[:self.nodes_init], self.buckets)
        prev_vbucket_stats = self.get_vbucket_seqnos(self.servers[:self.nodes_init], self.buckets)
        disk_replica_dataset, disk_active_dataset = self.get_and_compare_active_replica_data_set_all(self.servers[:self.nodes_init], self.buckets, path=None)
        self.rest = RestConnection(self.master)
        self.nodes = self.get_nodes(self.master)
        chosen = RebalanceHelper.pick_nodes(self.master, howmany=1)
        self.rest = RestConnection(self.master)
        self.rest.add_node(self.master.rest_username,
                           self.master.rest_password,
                           self.servers[self.nodes_init].ip,
                           self.servers[self.nodes_init].port)

        # Perform doc-mutation after add-node
        tasks = self._async_load_all_buckets(self.master, gen_update, "update", 0)
        for task in tasks:
            task.result()

        # Validate seq_no snap_start/stop values after rebalance
        self.check_snap_start_corruption()

        # Mark Node for failover
        self.rest.fail_over(chosen[0].id, graceful=fail_over)

        # Perform doc-mutation after node failover
        tasks = self._async_load_all_buckets(self.master, gen_update, "update", 0)
        for task in tasks:
            task.result()

        if fail_over:
            self.assertTrue(self.rest.monitorRebalance(stop_if_loop=True), msg="Graceful Failover Failed")
        self.nodes = self.rest.node_statuses()
        self.rest.rebalance(otpNodes=[node.id for node in self.nodes],ejectedNodes=[chosen[0].id])
        self.assertTrue(self.rest.monitorRebalance(stop_if_loop=True), msg="Rebalance Failed")

        # Validate seq_no snap_start/stop values after rebalance
        self.check_snap_start_corruption()

        # Verification
        new_server_list = self.add_remove_servers(self.servers,self.servers[:self.nodes_init],[chosen[0]],[self.servers[self.nodes_init]])
        self._verify_stats_all_buckets(new_server_list, timeout=120)
        self.verify_cluster_stats(new_server_list, check_ep_items_remaining = True)
        self.compare_failovers_logs(prev_failover_stats, new_server_list, self.buckets)
        self.sleep(30)
        self.data_analysis_active_replica_all(disk_active_dataset, disk_replica_dataset, new_server_list, self.buckets, path=None)
        self.verify_unacked_bytes_all_buckets()
        nodes = self.get_nodes_in_cluster(self.master)
        self.vb_distribution_analysis(servers = nodes, buckets = self.buckets, std = 1.0 , total_vbuckets = self.total_vbuckets)
예제 #23
0
    def setUp(self):
        super(CommunityBaseTest, self).setUp()
        self.product = self.input.param("product", "cb")
        self.vbuckets = self.input.param("vbuckets", 128)
        self.version = self.input.param("version", "2.5.1-1082")
        self.type = self.input.param('type', 'community')
        self.doc_ops = self.input.param("doc_ops", None)
        if self.doc_ops is not None:
            self.doc_ops = self.doc_ops.split(";")
        self.defaul_map_func = "function (doc) {\n  emit(doc._id, doc);\n}"
        self.couchbase_login = "******" % (self.input.membase_settings.rest_username,
                                          self.input.membase_settings.rest_password)
        self.backup_option = self.input.param("backup_option", '')
        #define the data that will be used to test
        self.blob_generator = self.input.param("blob_generator", True)
        self.cli_test = self.input.param("cli_test", False)
        self.rest = RestConnection(self.master)
        if self.rest.is_enterprise_edition():
            raise Exception("This couchbase server is not Community Edition."
                  "Tests require Community Edition to test")
        self.version = self.rest.get_nodes_version()[:5]

        if self.blob_generator:
            #gen_load data is used for upload before each test(1000 items by default)
            self.gen_load = BlobGenerator('test', 'test-', self.value_size, end=self.num_items)
            #gen_update is used for doing mutation for 1/2th of uploaded data
            self.gen_update = BlobGenerator('test', 'test-', self.value_size, end=(self.num_items / 2 - 1))
            #upload data before each test
            self._load_all_buckets(self.servers[0], self.gen_load, "create", 0)
        else:
            self._load_doc_data_all_buckets()
        self.remote = RemoteMachineShellConnection(self.master)
        type = self.remote.extract_remote_info().distribution_type
        self.backup_location = LINUX_BACKUP_PATH
        self.backup_c_location = LINUX_BACKUP_PATH
        self.bin_path = LINUX_COUCHBASE_BIN_PATH
        self.file_extension = ""
        if type.lower() == 'windows':
            self.is_linux = False
            self.backup_location = WIN_BACKUP_PATH
            self.backup_c_location = WIN_BACKUP_C_PATH
            self.bin_path = WIN_COUCHBASE_BIN_PATH
            self.file_extension = ".exe"
        else:
            self.is_linux = True
예제 #24
0
 def setUp(self):
     super(AutoFailoverBaseTest, self).setUp()
     self._get_params()
     self.rest = RestConnection(self.orchestrator)
     self.task_manager = TaskManager("Autofailover_thread")
     self.task_manager.start()
     self.node_failure_task_manager = TaskManager(
         "Nodes_failure_detector_thread")
     self.node_failure_task_manager.start()
     self.initial_load_gen = BlobGenerator('auto-failover',
                                           'auto-failover-',
                                           self.value_size,
                                           end=self.num_items)
     self.update_load_gen = BlobGenerator('auto-failover',
                                          'auto-failover-',
                                          self.value_size,
                                          end=self.update_items)
     self.delete_load_gen = BlobGenerator('auto-failover',
                                          'auto-failover-',
                                          self.value_size,
                                          start=self.update_items,
                                          end=self.delete_items)
     if self.skip_load:
         self._load_all_buckets(self.servers[0], self.initial_load_gen,
                                "create", 0)
         self._async_load_all_buckets(self.orchestrator,
                                      self.update_load_gen, "update", 0)
         self._async_load_all_buckets(self.orchestrator,
                                      self.delete_load_gen, "delete", 0)
     self.server_index_to_fail = self.input.param("server_index_to_fail",
                                                  None)
     if self.server_index_to_fail is None:
         self.server_to_fail = self._servers_to_fail()
     else:
         if isinstance(self.server_index_to_fail, str):
             self.server_to_fail = [
                 self.servers[int(node_item)]
                 for node_item in self.server_index_to_fail.split(":")
             ]
         else:
             self.server_to_fail = [self.servers[self.server_index_to_fail]]
     self.servers_to_add = self.servers[self.nodes_init:self.nodes_init +
                                        self.nodes_in]
     self.servers_to_remove = self.servers[self.nodes_init -
                                           self.nodes_out:self.nodes_init]
예제 #25
0
    def rebalance_out_with_ops(self):
        tasks = list()
        gen_delete = BlobGenerator('mike',
                                   'mike-',
                                   self.value_size,
                                   start=self.num_items // 2,
                                   end=self.num_items)
        gen_create = BlobGenerator('mike',
                                   'mike-',
                                   self.value_size,
                                   start=self.num_items + 1,
                                   end=self.num_items * 3 // 2)
        servs_out = [
            self.servers[self.num_servers - i - 1]
            for i in range(self.nodes_out)
        ]
        # define which doc's ops will be performed during rebalancing
        # allows multiple of them but one by one
        if self.doc_ops is not None:
            if "update" in self.doc_ops:
                tasks += self._async_load_all_buckets(self.master,
                                                      self.gen_update,
                                                      "update", 0)
            if "create" in self.doc_ops:
                tasks += self._async_load_all_buckets(self.master, gen_create,
                                                      "create", 0)
            if "delete" in self.doc_ops:
                tasks += self._async_load_all_buckets(self.master, gen_delete,
                                                      "delete", 0)
        rebalance = self.cluster.async_rebalance(
            self.servers[:1], [],
            servs_out,
            sleep_before_rebalance=self.sleep_before_rebalance,
            cluster_config=self.cluster_config)

        rebalance.result()
        for task in tasks:
            task.result()

        self.verify_cluster_stats(self.servers[:self.num_servers -
                                               self.nodes_out])
        self.verify_unacked_bytes_all_buckets()

        # Validate seq_no snap_start/stop values after rebalance
        self.check_snap_start_corruption()
예제 #26
0
 def test_ns_server_with_rebalance_failover_with_redaction_enabled(self):
     kv_node = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=False)
     rest = RestConnection(self.master)
     # load bucket and do some ops
     gen_create = BlobGenerator('logredac', 'logredac-', self.value_size, end=self.num_items)
     self._load_all_buckets(self.master, gen_create, "create", 0)
     gen_delete = BlobGenerator('logredac', 'logredac-', self.value_size, start=self.num_items / 2,
                                end=self.num_items)
     gen_update = BlobGenerator('logredac', 'logredac-', self.value_size, start=self.num_items + 1,
                                end=self.num_items * 3 / 2)
     self._load_all_buckets(self.master, gen_delete, "create", 0)
     self._load_all_buckets(self.master, gen_update, "create", 0)
     # set log redaction level, collect logs, verify log files exist and verify them for redaction
     self.set_redaction_level()
     self.start_logs_collection()
     services_in = ["kv"]
     to_add_nodes = [self.servers[self.nodes_init]]
     rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
     reached = RestHelper(rest).rebalance_reached()
     self.assertTrue(reached, "rebalance failed, stuck or did not complete")
     rebalance.result()
     # failover a node
     server_failed_over = self.servers[self.nodes_init]
     fail_over_task = self.cluster.async_failover([self.master], failover_nodes=[server_failed_over], graceful=True)
     fail_over_task.result()
     rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [server_failed_over])
     reached = RestHelper(rest).rebalance_reached()
     self.assertTrue(reached, "rebalance failed, stuck or did not complete")
     rebalance.result()
     result = self.monitor_logs_collection()
     log.info(result)
     try:
         logs_path = result["perNode"]["ns_1@" + str(self.master.ip)]["path"]
     except KeyError:
         logs_path = result["perNode"]["*****@*****.**"]["path"]
     redactFileName = logs_path.split('/')[-1]
     nonredactFileName = logs_path.split('/')[-1].replace('-redacted', '')
     remotepath = logs_path[0:logs_path.rfind('/') + 1]
     self.verify_log_files_exist(remotepath=remotepath,
                                 redactFileName=redactFileName,
                                 nonredactFileName=nonredactFileName)
     self.verify_log_redaction(remotepath=remotepath,
                               redactFileName=redactFileName,
                               nonredactFileName=nonredactFileName,
                               logFileName="ns_server.debug.log")
예제 #27
0
 def setUp(self):
     super(MemorySanity, self).setUp()
     self.kv_verify = self.input.param('kv_verify', True)
     self.log.info("==============  MemorySanityTest setup was started for test #{0} {1}==============" \
                   .format(self.case_number, self._testMethodName))
     self.gen_create = BlobGenerator('loadOne',
                                     'loadOne_',
                                     self.value_size,
                                     end=self.num_items)
예제 #28
0
class QueryWorkbenchTests(BaseTestCase):
    n1ql_port = 8093
    _input = TestInputSingleton.input
    num_items = _input.param("items", 100)
    _value_size = _input.param("value_size", 256)
    gen_create = BlobGenerator('loadOne',
                               'loadOne',
                               _value_size,
                               end=num_items)
    #bucket and ram quota
    buckets_ram = {
        "CUSTOMER": 100,
        "DISTRICT": 100,
        "HISTORY": 100,
        "ITEM": 100,
        "NEW_ORDER": 100,
        "ORDERS": 100,
        "ORDER_LINE": 100
    }

    #"default:": 100}

    def setUp(self):
        super(QueryWorkbenchTests, self).setUp()
        server = self.master
        if self.input.tuq_client and "client" in self.input.tuq_client:
            server = self.tuq_client
        self.rest = RestConnection(server)
        #self.rest.delete_bucket("default")
        time.sleep(20)
        # drop and recreate buckets
        for i, bucket_name in enumerate(self.buckets_ram.keys()):
            self.rest.create_bucket(bucket=bucket_name,
                                    ramQuotaMB=int(
                                        self.buckets_ram[bucket_name]),
                                    replicaNumber=0,
                                    proxyPort=11218 + i)
            self.log.info(self.servers[0])
            #bucket = self.src_cluster.get_bucket_by_name(bucket_name)
        time.sleep(20)
        #self.rest.create_bucket(bucket="default",
        #ramQuotaMB=int(self.buckets_ram["default"]),
        #replicaNumber=0,
        #proxyPort=11218)
        self._load_all_buckets(self, self.servers[0], self.gen_create,
                               "create", 0)
        #time.sleep(20)

    def tearDown(self):
        super(QueryWorkbenchTests, self).tearDown()

    def test_describe(self):
        for bucket_name in self.rest.get_buckets():
            query = "infer %s" % bucket_name
            self.log.info(query)
            result = self.rest.query_tool(query, self.n1ql_port)
            self.log.info(result)
 def rebalance_out_with_ops(self):
     gen_delete = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items / 2, end=self.num_items)
     gen_create = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items + 1, end=self.num_items * 3 / 2)
     servs_out = [self.servers[self.num_servers - i - 1] for i in range(self.nodes_out)]
     tasks = [self.cluster.async_rebalance(self.servers[:1], [], servs_out)]
     # define which doc's ops will be performed during rebalancing
     # allows multiple of them but one by one
     if(self.doc_ops is not None):
         if("update" in self.doc_ops):
             tasks += self._async_load_all_buckets(self.master, self.gen_update, "update", 0)
         if("create" in self.doc_ops):
             tasks += self._async_load_all_buckets(self.master, gen_create, "create", 0)
         if("delete" in self.doc_ops):
             tasks += self._async_load_all_buckets(self.master, gen_delete, "delete", 0)
     for task in tasks:
         task.result()
     self.verify_cluster_stats(self.servers[:self.num_servers - self.nodes_out])
     self.verify_unacked_bytes_all_buckets()
예제 #30
0
 def test_data_analysis_disk_memory_comparison_all(self):
     """
         Method to show disk+memory vs memory comparison using cbtransfer functionality
         This will be done cluster level comparison
     """
     create = BlobGenerator('loadOne', 'loadOne_', self.value_size, end=self.num_items)
     update = BlobGenerator('loadOne', 'loadOne-', self.value_size, end=(self.num_items // 2 - 1))
     self._load_all_buckets(self.master, create, "create", 0,
                            batch_size=10000, pause_secs=10, timeout_secs=60)
     self.num_items=self.num_items - self.num_items // 2
     self._verify_stats_all_buckets(self.servers, timeout=120)
     self._wait_for_stats_all_buckets(self.servers)
     self._async_load_all_buckets(self.master, update, "update", 0)
     self._verify_stats_all_buckets(self.servers, timeout=120)
     self._wait_for_stats_all_buckets(self.servers)
     self.sleep(60)
     self.data_analysis_all()
     self.verify_unacked_bytes_all_buckets()