コード例 #1
0
ファイル: failovertests.py プロジェクト: mschoch/testrunner
 def tearDown(self):
     try:
         self._cluster_helper.shutdown()
         log = logger.Logger.get_logger()
         log.info("==============  tearDown was started for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
         RemoteUtilHelper.common_basic_setup(self._servers)
         log.info("10 seconds delay to wait for membase-server to start")
         time.sleep(10)
         for server in self._cleanup_nodes:
             shell = RemoteMachineShellConnection(server)
             o, r = shell.execute_command("iptables -F")
             shell.log_command_output(o, r)
             o, r = shell.execute_command("/sbin/iptables -A INPUT -p tcp -i eth0 --dport 1000:60000 -j ACCEPT")
             shell.log_command_output(o, r)
             o, r = shell.execute_command("/sbin/iptables -A OUTPUT -p tcp -o eth0 --dport 1000:60000 -j ACCEPT")
             shell.log_command_output(o, r)
             o, r = shell.execute_command("/etc/init.d/couchbase-server start")
             shell.log_command_output(o, r)
             shell.disconnect()
         BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
         ClusterOperationHelper.cleanup_cluster(self._servers)
         ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self)
         log.info("==============  tearDown was finished for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
     finally:
         pass
コード例 #2
0
    def common_tearDown(servers, testcase):
        log = logger.Logger.get_logger()
        log.info(
            "==============  common_tearDown was started for test #{0} {1} ==============".format(
                testcase.case_number, testcase._testMethodName
            )
        )
        RemoteUtilHelper.common_basic_setup(servers)

        log.info("10 seconds delay to wait for couchbase-server to start")
        time.sleep(10)
        ClusterOperationHelper.wait_for_ns_servers_or_assert(
            servers, testcase, wait_time=AutoFailoverBaseTest.MAX_FAIL_DETECT_TIME * 15, wait_if_warmup=True
        )
        try:
            rest = RestConnection(self._servers[0])
            buckets = rest.get_buckets()
            for bucket in buckets:
                MemcachedClientHelper.flush_bucket(servers[0], bucket.name)
        except Exception:
            pass
        BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
        ClusterOperationHelper.cleanup_cluster(servers)
        log.info(
            "==============  common_tearDown was finished for test #{0} {1} ==============".format(
                testcase.case_number, testcase._testMethodName
            )
        )
コード例 #3
0
 def tearDown(self):
     if hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \
                 and 'stop-on-failure' in TestInputSingleton.input.test_params and \
                 str(TestInputSingleton.input.test_params['stop-on-failure']).lower() == 'true':
                 # supported starting with python2.7
                 log.warn("CLEANUP WAS SKIPPED")
                 self.cluster.shutdown(force=True)
                 self._log_finish(self)
     else:
         try:
             self.log.info("==============  tearDown was started for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
             RemoteUtilHelper.common_basic_setup(self.servers)
             BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
             for node in self.servers:
                 master = node
                 try:
                     ClusterOperationHelper.cleanup_cluster(self.servers,
                                                            master=master)
                 except:
                     continue
             self.log.info("==============  tearDown was finished for test #{0} {1} =============="\
                           .format(self.case_number, self._testMethodName))
         finally:
             super(FailoverBaseTest, self).tearDown()
コード例 #4
0
ファイル: tuq_cluster_ops.py プロジェクト: lichia/testrunner
 def test_warmup(self):
     index_field = self.input.param("index_field", 'name')
     indexes = []
     try:
         indexes = self._create_multiple_indexes(index_field)
         num_srv_warm_up = self.input.param("srv_warm_up", 1)
         if self.input.tuq_client is None:
             self.fail("For this test external tuq server is requiered. " + \
                       "Please specify one in conf")
         self.test_union_all()
         for server in self.servers[self.nodes_init - num_srv_warm_up:self.nodes_init]:
             remote = RemoteMachineShellConnection(server)
             remote.stop_server()
             remote.start_server()
             remote.disconnect()
         #run query, result may not be as expected, but tuq shouldn't fail
         try:
             self.test_union_all()
         except:
             pass
         ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
         self.sleep(5)
         self.test_union_all()
     finally:
         self._delete_multiple_indexes(indexes)
コード例 #5
0
ファイル: basetestcase.py プロジェクト: paul-guo-/appstack
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.cluster = Cluster()
        self.servers = self.input.servers
        self.buckets = {}

        self.default_bucket = self.input.param("default_bucket", True)
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
        self.num_servers = self.input.param("servers", len(self.servers))
        self.num_replicas = self.input.param("replicas", 1)
        self.num_items = self.input.param("items", 1000)
        self.dgm_run = self.input.param("dgm_run", False)

        if not self.input.param("skip_cleanup", False):
            BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
            for server in self.servers:
                ClusterOperationHelper.cleanup_cluster([server])
            ClusterOperationHelper.wait_for_ns_servers_or_assert([self.servers[0]], self)

        self.quota = self._initialize_nodes(self.cluster, self.servers)
        if self.dgm_run:
            self.quota = 256
        self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)
        if self.default_bucket:
            self.cluster.create_default_bucket(self.servers[0], self.bucket_size, self.num_replicas)
            self.buckets['default'] = {1 : KVStore()}
        self._create_sasl_buckets(self.servers[0], self.sasl_buckets)
コード例 #6
0
ファイル: moxi.py プロジェクト: Boggypop/testrunner
 def cleanup(self):
     rest = RestConnection(self.master)
     rest.stop_rebalance()
     BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
     for server in self.servers:
         ClusterOperationHelper.cleanup_cluster([server])
     ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
コード例 #7
0
 def cleanup_cluster(self):
     if not "skip_cleanup" in TestInputSingleton.input.test_params:
         BucketOperationHelper.delete_all_buckets_or_assert(
             self.servers, self.testcase)
         ClusterOperationHelper.cleanup_cluster(self.servers)
         ClusterOperationHelper.wait_for_ns_servers_or_assert(
             self.servers, self.testcase)
コード例 #8
0
ファイル: uibasetest.py プロジェクト: DavidAlphaFox/couchbase
 def tearDown(self):
     try:
         if self.driver:
             path_screen = self.input.ui_conf['screenshots'] or 'logs/screens'
             full_path = '{1}/screen_{0}.png'.format(time.time(), path_screen)
             self.log.info('screenshot is available: %s' % full_path)
             if not os.path.exists(path_screen):
                 os.mkdir(path_screen)
             self.driver.get_screenshot_as_file(os.path.abspath(full_path))
         rest = RestConnection(self.servers[0])
         if rest._rebalance_progress_status() == 'running':
             stopped = rest.stop_rebalance()
             self.assertTrue(stopped, msg="unable to stop rebalance")
         BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
         for server in self.servers:
             ClusterOperationHelper.cleanup_cluster([server])
         ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
         if self.driver:
             self.driver.close()
     except Exception as e:
         raise e
     finally:
         if self.driver:
             self.shell.disconnect()
         self.cluster.shutdown()
コード例 #9
0
ファイル: rebalanceXDCR.py プロジェクト: bcui6611/testrunner
    def tearDown(self):
#        super(Rebalance, self).tearDown()
        try:
            self.log.info("==============  XDCRbasetests stats for test #{0} {1} =============="\
                        .format(self.case_number, self._testMethodName))
            self._end_replication_flag = 1
            if hasattr(self, '_stats_thread1'): self._stats_thread1.join()
            if hasattr(self, '_stats_thread2'): self._stats_thread2.join()
            if hasattr(self, '_stats_thread3'): self._stats_thread3.join()
            if self._replication_direction_str in "bidirection":
                if hasattr(self, '_stats_thread4'): self._stats_thread4.join()
                if hasattr(self, '_stats_thread5'): self._stats_thread5.join()
                if hasattr(self, '_stats_thread6'): self._stats_thread6.join()
            if self._replication_direction_str in "bidirection":
                self.log.info("Type of run: BIDIRECTIONAL XDCR")
            else:
                self.log.info("Type of run: UNIDIRECTIONAL XDCR")
            self._print_stats(self.src_master)
            if self._replication_direction_str in "bidirection":
                self._print_stats(self.dest_master)
            self.log.info("============== = = = = = = = = END = = = = = = = = = = ==============")
            self.log.info("==============  rebalanceXDCR cleanup was started for test #{0} {1} =============="\
                    .format(self.case_number, self._testMethodName))
            for nodes in [self.src_nodes, self.dest_nodes]:
                for node in nodes:
                    BucketOperationHelper.delete_all_buckets_or_assert([node], self)
                    ClusterOperationHelper.cleanup_cluster([node], self)
                    ClusterOperationHelper.wait_for_ns_servers_or_assert([node], self)
            self.log.info("==============  rebalanceXDCR cleanup was finished for test #{0} {1} =============="\
                    .format(self.case_number, self._testMethodName))
        finally:
            self.cluster.shutdown(force=True)
            self._log_finish(self)
コード例 #10
0
 def offline_cluster_upgrade_and_rebalance(self):
     num_stoped_nodes = self.input.param('num_stoped_nodes', self.nodes_init)
     stoped_nodes = self.servers[self.nodes_init - num_stoped_nodes :self.nodes_init]
     servs_out = self.servers[self.nodes_init - num_stoped_nodes - self.nodes_out :self.nodes_init - num_stoped_nodes]
     servs_in = self.servers[self.nodes_init:self.nodes_init + self.nodes_in]
     self._install(self.servers)
     self.operations(self.servers[:self.nodes_init])
     if self.ddocs_num:
         self.create_ddocs_and_views()
     if self.during_ops:
         for opn in self.during_ops:
             getattr(self, opn)()
     for upgrade_version in self.upgrade_versions:
         self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for upgrade to {0} version".\
                    format(upgrade_version))
         for server in stoped_nodes:
             remote = RemoteMachineShellConnection(server)
             remote.stop_server()
             remote.disconnect()
         upgrade_threads = self._async_update(upgrade_version, stoped_nodes)
         try:
             self.cluster.rebalance(self.servers[:self.nodes_init], servs_in, servs_out)
         except RebalanceFailedException:
             self.log.info("rebalance failed as expected")
         for upgrade_thread in upgrade_threads:
             upgrade_thread.join()
         success_upgrade = True
         while not self.queue.empty():
             success_upgrade &= self.queue.get()
         if not success_upgrade:
             self.fail("Upgrade failed!")
         ClusterOperationHelper.wait_for_ns_servers_or_assert(stoped_nodes, self)
         self.cluster.rebalance(self.servers[:self.nodes_init], [], servs_out)
         self.dcp_rebalance_in_offline_upgrade_from_version2_to_version3()
         self.verification(list(set(self.servers[:self.nodes_init] + servs_in) - set(servs_out)))
コード例 #11
0
ファイル: cli_base.py プロジェクト: arod1987/testrunner
 def tearDown(self):
     if not self.input.param("skip_cleanup", True):
         if self.times_teardown_called > 1 :
             self.shell.disconnect()
     if self.input.param("skip_cleanup", True):
         if self.case_number > 1 or self.times_teardown_called > 1:
             self.shell.disconnect()
     self.times_teardown_called += 1
     serverInfo = self.servers[0]
     rest = RestConnection(serverInfo)
     zones = rest.get_zone_names()
     for zone in zones:
         if zone != "Group 1":
             rest.delete_zone(zone)
     self.clusters_dic = self.input.clusters
     if self.clusters_dic:
         if len(self.clusters_dic) > 1:
             self.dest_nodes = self.clusters_dic[1]
             self.dest_master = self.dest_nodes[0]
             if self.dest_nodes and len(self.dest_nodes) > 1:
                 self.log.info("======== clean up destination cluster =======")
                 rest = RestConnection(self.dest_nodes[0])
                 rest.remove_all_remote_clusters()
                 rest.remove_all_replications()
                 BucketOperationHelper.delete_all_buckets_or_assert(self.dest_nodes, self)
                 ClusterOperationHelper.cleanup_cluster(self.dest_nodes)
         elif len(self.clusters_dic) == 1:
             self.log.error("=== need 2 cluster to setup xdcr in ini file ===")
     else:
         self.log.info("**** If run xdcr test, need cluster config is setup in ini file. ****")
     super(CliBaseTest, self).tearDown()
コード例 #12
0
ファイル: setgettests.py プロジェクト: arod1987/testrunner
    def setUp_bucket(self, unittest):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        unittest.assertTrue(self.input, msg="input parameters missing...")
        self.test = unittest
        self.master = self.input.servers[0]
        rest = RestConnection(self.master)
        rest.init_cluster(username=self.master.rest_username, password=self.master.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
        ClusterOperationHelper.cleanup_cluster([self.master])
        BucketOperationHelper.delete_all_buckets_or_assert([self.master], self.test)

        serverInfo = self.master
        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.memoryQuota)

        # Add built-in user
        testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': '******'}]
        RbacBase().create_user_source(testuser, 'builtin', self.master)
        time.sleep(10)

        # Assign user to role
        role_list = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin'}]
        RbacBase().add_user_role(role_list, RestConnection(self.master), 'builtin')
        time.sleep(10)
コード例 #13
0
    def test_insert_x_delete_y_docs_destroy_cluster(self):
        num_docs = self.helper.input.param("num-docs", 100000)
        num_deleted_docs = self.helper.input.param("num-deleted-docs", 10000)
        msg = "description : have a cluster, insert {0} docs, delete "\
            "{1} docs while destroying the cluster into a single node "\
            "and query it"
        self.log.info(msg.format(num_docs, num_deleted_docs))
        design_name = "dev_test_delete_{0}_docs_destroy_cluster".format(
            num_deleted_docs)
        prefix = str(uuid.uuid4())[:7]

        # Make sure we are fully clustered
        ClusterOperationHelper.add_and_rebalance(self.helper.servers)

        self.helper.create_index_fun(design_name, prefix)
        inserted_keys = self.helper.insert_docs(num_docs, prefix)

        # Start destroying the cluster and rebalancing it without waiting
        # until it's finished
        ClusterOperationHelper.cleanup_cluster(self.helper.servers,
                                                    False)

        deleted_keys = self.helper.delete_docs(num_deleted_docs, prefix)
        self._wait_for_rebalance()

        # Verify that the docs got delete and are no longer part of the
        # spatial view
        results = self.helper.get_results(design_name, num_docs)
        result_keys = self.helper.get_keys(results)
        self.assertEqual(len(result_keys), num_docs - len(deleted_keys))
        self.helper.verify_result(inserted_keys, deleted_keys + result_keys)
コード例 #14
0
ファイル: failovertests.py プロジェクト: jchris/testrunner
 def common_setup(input, testcase):
     servers = input.servers
     RemoteUtilHelper.common_basic_setup(servers)
     BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
     for server in servers:
         ClusterOperationHelper.cleanup_cluster([server])
     ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase)
コード例 #15
0
ファイル: expirytests.py プロジェクト: membase/testrunner
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.master = TestInputSingleton.input.servers[0]
        ClusterOperationHelper.cleanup_cluster([self.master])
        BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)

        self._bucket_name = 'default'

        serverInfo = self.master
        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        self._bucket_port = info.moxi
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        bucket_ram = info.memoryQuota * 2 / 3

        # Add built-in user
        testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': '******'}]
        RbacBase().create_user_source(testuser, 'builtin', self.master)
        time.sleep(10)

        # Assign user to role
        role_list = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin'}]
        RbacBase().add_user_role(role_list, RestConnection(self.master), 'builtin')
        time.sleep(10)

        rest.create_bucket(bucket=self._bucket_name,
                           ramQuotaMB=bucket_ram,
                           proxyPort=info.memcached)
        msg = 'create_bucket succeeded but bucket "default" does not exist'
        self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(self._bucket_name, rest), msg=msg)
        ready = BucketOperationHelper.wait_for_memcached(serverInfo, self._bucket_name)
        self.assertTrue(ready, "wait_for_memcached failed")
        self._log_start()
コード例 #16
0
 def offline_cluster_upgrade_and_reboot(self):
     self._install(self.servers[:self.nodes_init])
     self.operations(self.servers[:self.nodes_init])
     if self.ddocs_num:
         self.create_ddocs_and_views()
     if self.during_ops:
         for opn in self.during_ops:
             getattr(self, opn)()
     num_stoped_nodes = self.input.param('num_stoped_nodes', self.nodes_init)
     stoped_nodes = self.servers[self.nodes_init - num_stoped_nodes :self.nodes_init]
     self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for upgrade")
     for upgrade_version in self.upgrade_versions:
         for server in stoped_nodes:
             remote = RemoteMachineShellConnection(server)
             remote.stop_server()
             remote.disconnect()
         self.sleep(self.sleep_time)
         upgrade_threads = self._async_update(upgrade_version, stoped_nodes)
         for upgrade_thread in upgrade_threads:
             upgrade_thread.join()
         success_upgrade = True
         while not self.queue.empty():
             success_upgrade &= self.queue.get()
         if not success_upgrade:
             self.fail("Upgrade failed!")
         for server in stoped_nodes:
             remote = RemoteMachineShellConnection(server)
             remote.stop_server()
             self.sleep(5)
             remote.start_couchbase()
             remote.disconnect()
         ClusterOperationHelper.wait_for_ns_servers_or_assert(stoped_nodes, self)
         self.verification(self.servers[:self.nodes_init])
コード例 #17
0
    def common_setup(self, replica):
        self._input = TestInputSingleton.input
        self._servers = self._input.servers
        first = self._servers[0]
        self.log = logger.Logger().get_logger()
        self.log.info(self._input)
        rest = RestConnection(first)
        for server in self._servers:
            RestHelper(RestConnection(server)).is_ns_server_running()

        ClusterOperationHelper.cleanup_cluster(self._servers)
        BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
        ClusterOperationHelper.add_all_nodes_or_assert(self._servers[0], self._servers, self._input.membase_settings, self)
        nodes = rest.node_statuses()
        otpNodeIds = []
        for node in nodes:
            otpNodeIds.append(node.id)
        info = rest.get_nodes_self()
        bucket_ram = info.mcdMemoryReserved * 3 / 4
        rest.create_bucket(bucket="default",
                           ramQuotaMB=int(bucket_ram),
                           replicaNumber=replica,
                           proxyPort=rest.get_nodes_self().moxi)
        msg = "wait_for_memcached fails"
        ready = BucketOperationHelper.wait_for_memcached(first, "default"),
        self.assertTrue(ready, msg)
        rebalanceStarted = rest.rebalance(otpNodeIds, [])
        self.assertTrue(rebalanceStarted,
                        "unable to start rebalance on master node {0}".format(first.ip))
        self.log.info('started rebalance operation on master node {0}'.format(first.ip))
        rebalanceSucceeded = rest.monitorRebalance()
        # without a bucket this seems to fail
        self.assertTrue(rebalanceSucceeded,
                        "rebalance operation for nodes: {0} was not successful".format(otpNodeIds))
        self.awareness = VBucketAwareMemcached(rest, "default")
コード例 #18
0
 def tearDown(self):
         try:
             if (hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \
                 and TestInputSingleton.input.param("stop-on-failure", False))\
                     or self.input.param("skip_cleanup", False):
                 self.log.warn("CLEANUP WAS SKIPPED")
             else:
                 self.log.info("==============  basetestcase cleanup was started for test #{0} {1} =============="\
                       .format(self.case_number, self._testMethodName))
                 rest = RestConnection(self.master)
                 alerts = rest.get_alerts()
                 if alerts is not None and len(alerts) != 0:
                     self.log.warn("Alerts were found: {0}".format(alerts))
                 if rest._rebalance_progress_status() == 'running':
                     self.log.warning("rebalancing is still running, test should be verified")
                     stopped = rest.stop_rebalance()
                     self.assertTrue(stopped, msg="unable to stop rebalance")
                 BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
                 ClusterOperationHelper.cleanup_cluster(self.servers)
                 self.sleep(10)
                 ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
                 self.log.info("==============  basetestcase cleanup was finished for test #{0} {1} =============="\
                       .format(self.case_number, self._testMethodName))
         except BaseException:
             # increase case_number to retry tearDown in setup for the next test
             self.case_number += 1000
         finally:
             # stop all existing task manager threads
             self.cluster.shutdown()
             self._log_finish(self)
コード例 #19
0
ファイル: createtests.py プロジェクト: jchris/testrunner
 def setUp(self):
     self.log = logger.Logger.get_logger()
     self.input = TestInputSingleton.input
     self.assertTrue(self.input, msg="input parameters missing...")
     self.servers = self.input.servers
     self.master = self.servers[0]
     rest = RestConnection(self.master)
     rest.init_cluster(username=self.master.rest_username,
                       password=self.master.rest_password)
     info = rest.get_nodes_self()
     node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
     rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
     BucketOperationHelper.delete_all_buckets_or_assert(servers=self.servers, test_case=self)
     ClusterOperationHelper.cleanup_cluster(servers=self.servers)
     credentials = self.input.membase_settings
     ClusterOperationHelper.add_all_nodes_or_assert(master=self.master, all_servers=self.servers, rest_settings=credentials, test_case=self)
     rest = RestConnection(self.master)
     nodes = rest.node_statuses()
     otpNodeIds = []
     for node in nodes:
         otpNodeIds.append(node.id)
     rebalanceStarted = rest.rebalance(otpNodeIds, [])
     self.assertTrue(rebalanceStarted,
                     "unable to start rebalance on master node {0}".format(self.master.ip))
     self.log.info('started rebalance operation on master node {0}'.format(self.master.ip))
     rebalanceSucceeded = rest.monitorRebalance()
コード例 #20
0
ファイル: uibasetest.py プロジェクト: arod1987/testrunner
 def tearDown(self):
     try:
         test_failed = len(self._resultForDoCleanups.errors)
         if self.driver and test_failed:
             BaseHelper(self).create_screenshot()
         if self.driver:
             self.driver.close()
         if test_failed and TestInputSingleton.input.param("stop-on-failure", False):
             print "test fails, teardown will be skipped!!!"
             return
         rest = RestConnection(self.servers[0])
         try:
             reb_status = rest._rebalance_progress_status()
         except ValueError as e:
             if e.message == 'No JSON object could be decoded':
                 print "cluster not initialized!!!"
                 return
         if reb_status == 'running':
             stopped = rest.stop_rebalance()
             self.assertTrue(stopped, msg="unable to stop rebalance")
         BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
         for server in self.servers:
             ClusterOperationHelper.cleanup_cluster([server])
         ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
     except Exception as e:
         raise e
     finally:
         if self.driver:
             self.shell.disconnect()
コード例 #21
0
ファイル: swaprebalance.py プロジェクト: jason-hou/testrunner
 def reset(self):
     self.log.info(
         "==============  SwapRebalanceBase cleanup was started for test #{0} {1} ==============".format(
             self.case_number, self._testMethodName
         )
     )
     self.log.info("Stopping load in Teardown")
     SwapRebalanceBase.stop_load(self.loaders)
     for server in self.servers:
         rest = RestConnection(server)
         if rest._rebalance_progress_status() == "running":
             self.log.warning("rebalancing is still running, test should be verified")
             stopped = rest.stop_rebalance()
             self.assertTrue(stopped, msg="unable to stop rebalance")
     BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
     for server in self.servers:
         ClusterOperationHelper.cleanup_cluster([server])
         if server.data_path:
             rest = RestConnection(server)
             rest.set_data_path(data_path=server.data_path)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
     self.log.info(
         "==============  SwapRebalanceBase cleanup was finished for test #{0} {1} ==============".format(
             self.case_number, self._testMethodName
         )
     )
コード例 #22
0
    def test_insert_x_docs_during_rebalance(self):
        num_docs = self.helper.input.param("num-docs", 100000)
        msg = "description : have a single node, insert {0} docs, "\
            "query it, add another node, start rebalancing, insert {0} "\
            "docs, finish rebalancing, keep on adding nodes..."
        self.log.info(msg.format(num_docs))
        design_name = "dev_test_insert_{0}_docs_during_rebalance".format(
            num_docs)
        prefix = str(uuid.uuid4())[:7]

        # Make sure we are fully de-clustered
        ClusterOperationHelper.cleanup_cluster(self.helper.servers)

        self.helper.create_index_fun(design_name)
        inserted_keys = self.helper.insert_docs(num_docs, prefix)

        # Add all servers to the master server one by one and start
        # rebalacing
        for server in self.helper.servers[1:]:
            ClusterOperationHelper.add_and_rebalance(
                [self.helper.master, server], False)
            # Docs with the same prefix are overwritten and not newly created
            prefix = str(uuid.uuid4())[:7]
            inserted_keys.extend(self.helper.insert_docs(
                    num_docs, prefix, wait_for_persistence=False))
            self._wait_for_rebalance()

        # Make sure data is persisted
        self.helper.wait_for_persistence()

        # Verify that all documents got inserted
        self.helper.query_index_for_verification(design_name, inserted_keys)
コード例 #23
0
ファイル: rebalancingtests.py プロジェクト: jchris/testrunner
    def common_setup(input, testcase, bucket_ram_ratio=(2.8 / 3.0), replica=0):
        log = logger.Logger.get_logger()
        servers = input.servers
        BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
        ClusterOperationHelper.cleanup_cluster(servers)
        ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase)
        serverInfo = servers[0]

        log.info('picking server : {0} as the master'.format(serverInfo))
        #if all nodes are on the same machine let's have the bucket_ram_ratio as bucket_ram_ratio * 1/len(servers)
        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(servers)
        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
        if "ascii" in TestInputSingleton.input.test_params\
        and TestInputSingleton.input.test_params["ascii"].lower() == "true":
            BucketOperationHelper.create_multiple_buckets(serverInfo, replica, node_ram_ratio * bucket_ram_ratio,
                                                          howmany=1, sasl=False)
        else:
            BucketOperationHelper.create_multiple_buckets(serverInfo, replica, node_ram_ratio * bucket_ram_ratio,
                                                          howmany=1, sasl=True)
        buckets = rest.get_buckets()
        for bucket in buckets:
            ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket.name)
            testcase.assertTrue(ready, "wait_for_memcached failed")
コード例 #24
0
 def test_prepared_with_warmup(self):
     try:
         num_srv_warm_up = self.input.param("srv_warm_up", 1)
         if self.input.tuq_client is None:
             self.fail("For this test external tuq server is requiered. " +\
                       "Please specify one in conf")
         self.test_union_all()
         for server in self.servers[self.nodes_init - num_srv_warm_up:self.nodes_init]:
             remote = RemoteMachineShellConnection(server)
             remote.stop_server()
             remote.start_server()
             remote.disconnect()
             #run query, result may not be as expected, but tuq shouldn't fail
         try:
             self.test_union_all()
         except:
             pass
         ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self, wait_if_warmup=True)
         self.verify_cluster_stats(self.servers[:self.nodes_init])
         self.sleep(50)
         self.verify_cluster_stats(self.servers[:self.nodes_init])
         self.log.info("-"*100)
         self.log.info("Querying alternate query node to test the encoded_prepare ....")
         self.test_prepared_union()
         self.log.info("-"*100)
     finally:
         self.log.info("Done with encoded_prepare ....")
コード例 #25
0
    def install(self, params):
#        log = logger.new_logger("Installer")
        build = self.build_url(params)
        remote_client = RemoteMachineShellConnection(params["server"])
        info = remote_client.extract_remote_info()
        type = info.type.lower()
        server = params["server"]
        if "vbuckets" in params:
            vbuckets = int(params["vbuckets"][0])
        else:
            vbuckets = None
        if type == "windows":
            build = self.build_url(params)
            remote_client.download_binary_in_win(build.url, params["product"], params["version"])
            remote_client.membase_install_win(build, params["version"])
        else:
            downloaded = remote_client.download_build(build)
            if not downloaded:
                log.error(downloaded, 'unable to download binaries : {0}'.format(build.url))
            #TODO: need separate methods in remote_util for couchbase and membase install
            path = server.data_path or '/tmp'
            remote_client.membase_install(build, path=path, vbuckets=vbuckets)
            log.info('wait 5 seconds for membase server to start')
            time.sleep(5)
        if "rest_vbuckets" in params:
            rest_vbuckets = int(params["rest_vbuckets"])
            ClusterOperationHelper.set_vbuckets(server, rest_vbuckets)
コード例 #26
0
    def _cluster_setup(self):
        replicas = self.input.param("replicas", 1)
        keys_count = self.input.param("keys-count", 0)
        num_buckets = self.input.param("num-buckets", 1)

        bucket_name = "default"
        master = self.servers[0]
        credentials = self.input.membase_settings
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        rest.init_cluster(username=self.master.rest_username,
                          password=self.master.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        rest.reset_autofailover()
        ClusterOperationHelper.add_and_rebalance(self.servers, True)

        if num_buckets == 1:
            bucket_ram = info.memoryQuota * 2 / 3
            rest.create_bucket(bucket=bucket_name,
                               ramQuotaMB=bucket_ram,
                               replicaNumber=replicas,
                               proxyPort=info.moxi)
        else:
            created = BucketOperationHelper.create_multiple_buckets(self.master, replicas, howmany=num_buckets)
            self.assertTrue(created, "unable to create multiple buckets")

        buckets = rest.get_buckets()
        for bucket in buckets:
                ready = BucketOperationHelper.wait_for_memcached(self.master, bucket.name)
                self.assertTrue(ready, msg="wait_for_memcached failed")

        for bucket in buckets:
            inserted_keys_cnt = self.load_data(self.master, bucket.name, keys_count)
            log.info('inserted {0} keys'.format(inserted_keys_cnt))
コード例 #27
0
ファイル: biXDCR.py プロジェクト: ashvindersingh/testrunner
    def replication_while_rebooting_a_non_master_destination_node(self):
        self._load_all_buckets(self.src_master, self.gen_create, "create", 0)
        self._load_all_buckets(self.dest_master, self.gen_create2, "create", 0)
        self._async_update_delete_data()
        self.sleep(self._timeout)

        reboot_node_dest = self.dest_nodes[len(self.dest_nodes) - 1]
        shell = RemoteMachineShellConnection(reboot_node_dest)
        if shell.extract_remote_info().type.lower() == 'windows':
            o, r = shell.execute_command("shutdown -r -f -t 0")
        elif shell.extract_remote_info().type.lower() == 'linux':
            o, r = shell.execute_command("reboot")
        shell.log_command_output(o, r)
        reboot_node_src = self.src_nodes[len(self.src_nodes) - 1]
        shell = RemoteMachineShellConnection(reboot_node_src)
        if shell.extract_remote_info().type.lower() == 'windows':
            o, r = shell.execute_command("shutdown -r -f -t 0")
        elif shell.extract_remote_info().type.lower() == 'linux':
            o, r = shell.execute_command("reboot")
        shell.log_command_output(o, r)

        self.sleep(360)
        ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_dest], self, wait_if_warmup=True)
        ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_src], self, wait_if_warmup=True)
        self.merge_buckets(self.src_master, self.dest_master, bidirection=True)
        self.verify_results(verify_src=True)
コード例 #28
0
    def setUp(self):
        self._cleanup_nodes = []
        self._failed_nodes = []
        super(FailoverBaseTest, self).setUp()
        self.bidirectional = self.input.param("bidirectional", False)
        self._value_size = self.input.param("value_size", 256)
        self.dgm_run = self.input.param("dgm_run", True)
        credentials = self.input.membase_settings
        self.add_back_flag = False
        self.during_ops = self.input.param("during_ops", None)

        self.log.info(
            "==============  FailoverBaseTest setup was started for test #{0} {1}==============".format(
                self.case_number, self._testMethodName
            )
        )
        try:
            rest = RestConnection(self.master)
            ClusterOperationHelper.add_all_nodes_or_assert(self.master, self.servers, credentials, self)
            nodes = rest.node_statuses()
            rest.rebalance(otpNodes=[node.id for node in nodes], ejectedNodes=[])
            msg = "rebalance failed after adding these nodes {0}".format(nodes)
            self.assertTrue(rest.monitorRebalance(), msg=msg)
        except Exception, e:
            self.cluster.shutdown()
            self.fail(e)
コード例 #29
0
    def test_full_eviction_changed_to_value_eviction(self):

        KEY_NAME = 'key1'

        gen_create = BlobGenerator('eviction', 'eviction-', self.value_size, end=self.num_items)
        gen_create2 = BlobGenerator('eviction2', 'eviction2-', self.value_size, end=self.num_items)
        self._load_all_buckets(self.master, gen_create, "create", 0)

        self._wait_for_stats_all_buckets(self.servers[:self.nodes_init])
        self._verify_stats_all_buckets(self.servers[:self.nodes_init])
        remote = RemoteMachineShellConnection(self.master)
        for bucket in self.buckets:
            output, _ = remote.execute_couchbase_cli(cli_command='bucket-edit',
                                                         cluster_host="localhost",
                                                         user=self.master.rest_username,
                                                         password=self.master.rest_password,
                                                         options='--bucket=%s --bucket-eviction-policy=valueOnly' % bucket.name)
            self.assertTrue(' '.join(output).find('SUCCESS') != -1, 'Eviction policy wasn\'t changed')
        ClusterOperationHelper.wait_for_ns_servers_or_assert(
                                            self.servers[:self.nodes_init], self,
                                            wait_time=self.wait_timeout, wait_if_warmup=True)
        self.sleep(10, 'Wait some time before next load')
        #self._load_all_buckets(self.master, gen_create2, "create", 0)
        #import pdb;pdb.set_trace()


        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, 'default')
        mcd = client.memcached(KEY_NAME)
        try:
            rc = mcd.set(KEY_NAME, 0,0, json.dumps({'value':'value2'}))
            self.fail('Bucket is incorrectly functional')
        except MemcachedError, e:
            pass   # this is the exception we are hoping for
コード例 #30
0
 def common_setUp(self, with_buckets):
     ClusterOperationHelper.cleanup_cluster(self.servers)
     server = self.servers[0]
     if with_buckets:
         BucketOperationHelper.delete_all_buckets_or_assert(self.servers, test_case=self)
         ok = BucketOperationHelper.create_multiple_buckets(server, 1)
         if not ok:
             self.fail("unable to create multiple buckets on this node : {0}".format(server))
コード例 #31
0
    def replication_while_rebooting_a_non_master_src_dest_node(self):
        self.setup_xdcr_and_load()
        self.async_perform_update_delete()
        self.sleep(self._wait_timeout)

        reboot_node_dest = self.dest_cluster.reboot_one_node(self)
        NodeHelper.wait_node_restarted(reboot_node_dest, self, wait_time=self._wait_timeout * 4, wait_if_warmup=True)

        reboot_node_src = self.src_cluster.reboot_one_node(self)
        NodeHelper.wait_node_restarted(reboot_node_src, self, wait_time=self._wait_timeout * 4, wait_if_warmup=True)

        self.sleep(120)
        ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_dest], self, wait_if_warmup=True)
        ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_src], self, wait_if_warmup=True)
        self.verify_results()
コード例 #32
0
 def test_expired_mutation(self):
     if self.non_default_collection:
         self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket.src_bucket.src_bucket", expiry=100, wait_for_loading=False)
     else:
         self.load_data_to_collection(self.docs_per_day * self.num_docs, "src_bucket._default._default", expiry=100, wait_for_loading=False)
     # set expiry pager interval
     ClusterOperationHelper.flushctl_set(self.master, "exp_pager_stime", 1, bucket=self.src_bucket_name)
     body = self.create_save_function_body(self.function_name, "handler_code/bucket_op_expired.js")
     self.deploy_function(body)
     # Wait for eventing to catch up with all the expiry mutations and verify results
     if self.non_default_collection:
         self.verify_doc_count_collections("src_bucket.src_bucket.src_bucket", 0)
     else:
         self.verify_doc_count_collections("src_bucket._default._default", 0)
     self.undeploy_and_delete_function(body)
コード例 #33
0
 def _reboot_node(self, node):
     self.log.info("Rebooting node '{0}'....".format(node.ip))
     shell = RemoteMachineShellConnection(node)
     if shell.extract_remote_info().type.lower() == 'windows':
         o, r = shell.execute_command("shutdown -r -f -t 0")
     elif shell.extract_remote_info().type.lower() == 'linux':
         o, r = shell.execute_command("reboot")
     shell.log_command_output(o, r)
     # wait for restart and warmup on all node
     self.sleep(self.wait_timeout * 5)
     # disable firewall on these nodes
     self.stop_firewall_on_node(node)
     # wait till node is ready after warmup
     ClusterOperationHelper.wait_for_ns_servers_or_assert(
         [node], self, wait_if_warmup=True)
コード例 #34
0
ファイル: memcapable.py プロジェクト: rayleyva/testrunner
 def setUp(self):
     self.log = logger.Logger.get_logger()
     self.params = TestInputSingleton.input.test_params
     self.master = TestInputSingleton.input.servers[0]
     rest = RestConnection(self.master)
     rest.init_cluster(self.master.rest_username, self.master.rest_password)
     info = rest.get_nodes_self()
     rest.init_cluster_memoryQuota(self.master.rest_username, self.master.rest_password,
                                   memoryQuota=info.mcdMemoryReserved)
     ClusterOperationHelper.cleanup_cluster([self.master])
     ClusterOperationHelper.wait_for_ns_servers_or_assert([self.master], self)
     self._create_default_bucket()
     self.keys_cleanup = []
     self.onenodemc = MemcachedClientHelper.direct_client(self.master, "default", timeout=600)
     self.onenodemoxi = MemcachedClientHelper.proxy_client(self.master, "default", timeout=600)
コード例 #35
0
 def test_warmup(self):
     self.run_async_data()
     for server in self.nodes_out_list:
         remote = RemoteMachineShellConnection(server)
         remote.stop_server()
         remote.start_server()
         remote.disconnect()
         self.sleep(120, "Wait for warmup")
     ClusterOperationHelper.wait_for_ns_servers_or_assert(
         self.servers, self)
     self.run_mutation_operations_for_situational_tests()
     for t in self.load_thread_list:
         if t.is_alive():
             if t is not None:
                 t.signal = False
コード例 #36
0
ファイル: basetestcase.py プロジェクト: sumedhpb/Jython
 def _cluster_cleanup(self):
     rest = RestConnection(self.master)
     alerts = rest.get_alerts()
     if rest._rebalance_progress_status() == 'running':
         self.kill_memcached()
         log.warning(
             "rebalancing is still running, test should be verified")
         stopped = rest.stop_rebalance()
         self.assertTrue(stopped, msg="unable to stop rebalance")
     self.delete_all_buckets_or_assert(self.servers)
     ClusterOperationHelper.cleanup_cluster(self.servers,
                                            master=self.master)
     #         self.sleep(10)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(
         self.servers, self)
コード例 #37
0
    def _test_cluster_topology_change_body(self):
        bucket = "default"
        BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")
        self.add_nodes_and_rebalance()

        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}

        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
                                                                                             ram_load_ratio=1,
                                                                                             value_size_distribution=distribution,
                                                                                             moxi=True,
                                                                                             write_only=True,
                                                                                             number_of_threads=2)

        self.log.info("Sleep after data load")
        ready = RebalanceHelper.wait_for_persistence(self.master, bucket, bucket_type=self.bucket_type)
        self.assertTrue(ready, "not all items persisted. see logs")
        #let's create a unique folder in the remote location
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder)
            shell.disconnect()

        ClusterOperationHelper.cleanup_cluster(self.servers)
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)

        servers = []
        for i in range(0, len(self.servers) - 1):
            servers.append(self.servers[i])

        self.add_node_and_rebalance(servers[0], servers)

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
        BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)

        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder)
            time.sleep(10)

        BucketOperationHelper.verify_data(self.master, inserted_keys, False, False, 11210, self)
コード例 #38
0
    def tearDown(self):
        try:
            test_failed = (hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures or self._resultForDoCleanups.errors)) \
                or (hasattr(self, '_exc_info') and self._exc_info()[1] is not None)
            if test_failed and TestInputSingleton.input.param("stop-on-failure", False)\
                    or self.input.param("skip_cleanup", False):
                self.log.warn("CLEANUP WAS SKIPPED")
            else:
                if test_failed and self.input.param('BUGS', False):
                    self.log.warn(
                        "Test failed. Possible reason is: {0}".format(
                            self.input.param('BUGS', False)))

                self.log.info("==============  basetestcase cleanup was started for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))
                rest = RestConnection(self.master)
                alerts = rest.get_alerts()
                if alerts is not None and len(alerts) != 0:
                    self.log.warn("Alerts were found: {0}".format(alerts))
                if rest._rebalance_progress_status() == 'running':
                    self.log.warning(
                        "rebalancing is still running, test should be verified"
                    )
                    stopped = rest.stop_rebalance()
                    self.assertTrue(stopped, msg="unable to stop rebalance")
                BucketOperationHelper.delete_all_buckets_or_assert(
                    self.servers, self)
                if self.input.param("forceEject", False):
                    for server in self.servers:
                        if server != self.servers[0]:
                            try:
                                rest = RestConnection(server)
                                rest.force_eject_node()
                            except BaseException, e:
                                self.log.error(e)
                ClusterOperationHelper.cleanup_cluster(self.servers)
                self.sleep(10)
                ClusterOperationHelper.wait_for_ns_servers_or_assert(
                    self.servers, self)
                self.log.info("==============  basetestcase cleanup was finished for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))
        except BaseException:
            # increase case_number to retry tearDown in setup for the next test
            self.case_number += 1000
        finally:
            # stop all existing task manager threads
            self.cluster.shutdown()
            self._log_finish(self)
コード例 #39
0
 def test_observe_with_warmup(self):
     self._load_doc_data_all_buckets('create', 0, self.num_items)
     # Persist all the loaded data item
     self.log.info("Nodes in cluster: %s" % self.servers[:self.nodes_init])
     for bucket in self.buckets:
         self.log.info('\n\nwaiting for persistence')
         RebalanceHelper.wait_for_persistence(self.master, bucket)
         self.log.info('\n\n_stats_befor_warmup')
         self._stats_befor_warmup(bucket.name)
         self.log.info('\n\n_restart_memcache')
         self._restart_memcache(bucket.name)
         # for bucket in self.buckets:
         self.log.info('\n\n_wait_warmup_completed')
         ClusterOperationHelper._wait_warmup_completed(
             self, self.servers[:self.nodes_init], bucket.name)
         self._run_observe(self)
コード例 #40
0
 def _online_upgrade(self,
                     update_servers,
                     extra_servers,
                     check_newmaster=True):
     RestConnection(update_servers[0]).get_nodes_versions()
     added_versions = RestConnection(extra_servers[0]).get_nodes_versions()
     self.cluster.rebalance(update_servers + extra_servers, extra_servers,
                            [])
     self.log.info("Rebalance in all {0} nodes completed".format(
         added_versions[0]))
     RestConnection(update_servers[0]).get_nodes_versions()
     self.sleep(self.sleep_time)
     status, content = ClusterOperationHelper.find_orchestrator(
         update_servers[0])
     self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
                     format(status, content))
     self.log.info("after rebalance in the master is {0}".format(content))
     if check_newmaster and not self.upgrade_same_version:
         FIND_MASTER = False
         for new_server in extra_servers:
             if content.find(new_server.ip) >= 0:
                 FIND_MASTER = True
                 self.log.info("{0} Node {1} becomes the master".format(
                     added_versions[0], new_server.ip))
                 break
         if not FIND_MASTER:
             raise Exception(
                 "After rebalance in {0} Nodes, one of them doesn't become the master"
                 .format(added_versions[0]))
     self.log.info("Rebalanced out all old version nodes")
     self.cluster.rebalance(update_servers + extra_servers, [],
                            update_servers)
     if self.upgrade_versions[0] >= "3.0.0":
         self._enable_xdcr_trace_logging(extra_servers)
コード例 #41
0
    def rebalance_in_with_warming_up(self):
        servs_in = self.servers[self.nodes_init:self.nodes_init + self.nodes_in]
        servs_init = self.servers[:self.nodes_init]
        warmup_node = servs_init[-1]
        shell = RemoteMachineShellConnection(warmup_node)
        shell.stop_couchbase()
        self.sleep(20)
        shell.start_couchbase()
        shell.disconnect()
        try:
            rebalance = self.cluster.async_rebalance(
                servs_init, servs_in, [],
                sleep_before_rebalance=self.sleep_before_rebalance)
            rebalance.result()
        except RebalanceFailedException:
            self.log.info("rebalance was failed as expected")
            self.assertTrue(ClusterOperationHelper._wait_warmup_completed(self, [warmup_node], \
                            self.default_bucket_name, wait_time=self.wait_timeout * 10))

            self.log.info("second attempt to rebalance")
            rebalance = self.cluster.async_rebalance(
                servs_init + servs_in, [], [],
                sleep_before_rebalance=self.sleep_before_rebalance)
            rebalance.result()
        self.verify_cluster_stats(self.servers[:self.nodes_in + self.nodes_init])
        self.verify_unacked_bytes_all_buckets()
コード例 #42
0
 def setUp(self):
     super(EventingUpgrade, self).setUp()
     self.rest = RestConnection(self.master)
     self.server = self.master
     self.queue = queue.Queue()
     self.src_bucket_name = self.input.param('src_bucket_name', 'src_bucket')
     self.eventing_log_level = self.input.param('eventing_log_level', 'INFO')
     self.dst_bucket_name = self.input.param('dst_bucket_name', 'dst_bucket')
     self.dst_bucket_name1 = self.input.param('dst_bucket_name1', 'dst_bucket1')
     self.dst_bucket_curl = self.input.param('dst_bucket_curl', 'dst_bucket_curl')
     self.source_bucket_mutation = self.input.param('source_bucket_mutation', 'source_bucket_mutation')
     self.metadata_bucket_name = self.input.param('metadata_bucket_name', 'metadata')
     self.n1ql_op_dst=self.input.param('n1ql_op_dst', 'n1ql_op_dst')
     self.gens_load = self.generate_docs(self.docs_per_day)
     self.upgrade_version = self.input.param("upgrade_version")
     ClusterOperationHelper.flushctl_set(self.master, "exp_pager_stime", 60, bucket=self.src_bucket_name)
コード例 #43
0
    def update_orchestrator(self, ref_node=None, retry=5):

        if len(self.nodes) > 0:

            if ref_node is None:
                ref_node = self.nodes[0]

            address = {'server_ip': ref_node.ip, 'port': ref_node.port}
            rest = create_rest(**address)

            status, content = ClusterOperationHelper.find_orchestrator_with_rest(
                rest)

            if status == True:
                content = re.sub(r".*@", "", content).strip("'").split(':')
                orchestrator_ip, orchestrator_port = \
                    content[0], content[1] if len(content) > 1 else cfg.COUCHBASE_PORT

                # look up matching node in self nodes
                for node in self.nodes:
                    if node.ip == orchestrator_ip and \
                        int(node.port) == int(orchestrator_port):
                        self.orchestrator = node
                        break
            elif retry > 0:
                # wait
                time.sleep(5)

                # select random node and retry
                ref_node = self.nodes[random.randint(0, len(self.nodes) - 1)]
                retry = retry - 1
                return self.update_orchestrator(ref_node, retry)
コード例 #44
0
    def _failover_swap_rebalance(self):
        master = self.servers[0]
        rest = RestConnection(master)
        creds = self.input.membase_settings
        num_initial_servers = self.num_initial_servers
        intial_severs = self.servers[:num_initial_servers]

        self.log.info("CREATE BUCKET PHASE")
        SwapRebalanceBase.create_buckets(self)

        # Cluster all starting set of servers
        self.log.info("INITIAL REBALANCE PHASE")
        status, servers_rebalanced = RebalanceHelper.rebalance_in(intial_severs, len(intial_severs) - 1)
        self.assertTrue(status, msg="Rebalance was failed")

        self.log.info("DATA LOAD PHASE")
        self.loaders = SwapRebalanceBase.start_load_phase(self, master)

        # Wait till load phase is over
        SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
        self.log.info("DONE LOAD PHASE")

        # Start the swap rebalance
        self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master)))
        toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.failover_factor)
        optNodesIds = [node.id for node in toBeEjectedNodes]
        if self.fail_orchestrator:
            status, content = ClusterOperationHelper.find_orchestrator(master)
            self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
            format(status, content))
            optNodesIds[0] = content

        self.log.info("FAILOVER PHASE")
        # Failover selected nodes
        for node in optNodesIds:
            self.log.info("failover node {0} and rebalance afterwards".format(node))
            rest.fail_over(node)
            self.assertTrue(rest.monitorRebalance(),
                msg="failed after failover of {0}".format(node))

        new_swap_servers = self.servers[num_initial_servers:num_initial_servers + self.failover_factor]
        for server in new_swap_servers:
            otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip, server.port)
            msg = "unable to add node {0} to the cluster"
            self.assertTrue(otpNode, msg.format(server.ip))

        if self.fail_orchestrator:
            rest = RestConnection(new_swap_servers[0])
            master = new_swap_servers[0]

        self.log.info("DATA ACCESS PHASE")
        self.loaders = SwapRebalanceBase.start_access_phase(self, master)

        rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], \
            ejectedNodes=optNodesIds)

        self.assertTrue(rest.monitorRebalance(),
            msg="rebalance operation failed after adding node {0}".format(new_swap_servers))

        SwapRebalanceBase.verification_phase(self, master)
コード例 #45
0
 def setUp(self):
     self.log = logger.Logger.get_logger()
     self.servers = TestInputSingleton.input.servers
     self.params = TestInputSingleton.input.test_params
     master = self.servers[0]
     rest = RestConnection(self.servers[0])
     rest.init_cluster(master.rest_username, master.rest_password)
     rest.init_cluster_memoryQuota(master.rest_username,
                                   master.rest_password,
                                   memoryQuota=1000)
     ClusterOperationHelper.cleanup_cluster(self.servers)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(
         self.servers, self)
     self._create_default_bucket()
     self.smartclient = MemcachedClientHelper.direct_client(
         master, "default")
コード例 #46
0
 def online_upgrade_with_failover(self, services=None):
     servers_in = self.servers[self.nodes_init:self.num_servers]
     self.cluster.rebalance(self.servers[:self.nodes_init],
                            servers_in, [],
                            services=services)
     log.info("Rebalance in all {0} nodes" \
              .format(self.input.param("upgrade_version", "")))
     self.sleep(self.sleep_time)
     status, content = ClusterOperationHelper.find_orchestrator(self.master)
     self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}". \
                     format(status, content))
     FIND_MASTER = False
     for new_server in servers_in:
         if content.find(new_server.ip) >= 0:
             self._new_master(new_server)
             FIND_MASTER = True
             self.log.info("%s node %s becomes the master" \
                           % (self.input.param("upgrade_version", ""), new_server.ip))
             break
     if self.input.param("initial_version", "")[:5] in COUCHBASE_VERSION_2 \
             and not FIND_MASTER:
         raise Exception( \
             "After rebalance in {0} nodes, {0} node doesn't become master" \
                 .format(self.input.param("upgrade_version", "")))
     servers_out = self.servers[:self.nodes_init]
     self._new_master(self.servers[self.nodes_init])
     log.info("failover and rebalance nodes")
     self.cluster.failover(self.servers[:self.num_servers],
                           failover_nodes=servers_out,
                           graceful=False)
     self.cluster.rebalance(self.servers[:self.num_servers], [],
                            servers_out)
     self.sleep(180)
コード例 #47
0
 def tearDown(self):
     """ Some test involve kill couchbase server.  If the test steps failed
         right after kill erlang process, we need to start couchbase server
         in teardown so that the next test will not be false failed """
     super(RackzoneBaseTest, self).tearDown()
     ClusterOperationHelper.cleanup_cluster(self.servers, master=self.master)
     for server in self.servers:
         shell = RemoteMachineShellConnection(server)
         shell.start_couchbase()
         self.sleep(7, "Time needed for couchbase server starts completely.")
     serverInfo = self.servers[0]
     rest = RestConnection(serverInfo)
     zones = rest.get_zone_names()
     for zone in zones:
         if zone != "Group 1":
             rest.delete_zone(zone)
コード例 #48
0
    def rebalance_out_with_warming_up(self):
        master_restart = self.input.param("master_restart", False)
        if master_restart:
            warmup_node = self.master
        else:
            warmup_node = self.servers[len(self.servers) - self.nodes_out - 1]
        servs_out = self.servers[len(self.servers) - self.nodes_out:]
        shell = RemoteMachineShellConnection(warmup_node)
        shell.stop_couchbase()
        self.sleep(20)
        shell.start_couchbase()
        shell.disconnect()
        try:
            rebalance = self.cluster.async_rebalance(self.servers, [],
                                                     servs_out)
            rebalance.result()
        except RebalanceFailedException:
            self.log.info("rebalance was failed as expected")
            self.assertTrue(ClusterOperationHelper._wait_warmup_completed(self, [warmup_node], \
                            self.default_bucket_name, wait_time=self.wait_timeout * 10))

            self.log.info("second attempt to rebalance")
            rebalance = self.cluster.async_rebalance(self.servers, [],
                                                     servs_out)
            rebalance.result()
        self.verify_cluster_stats(self.servers[:len(self.servers) -
                                               self.nodes_out])
        self.verify_unacked_bytes_all_buckets()
コード例 #49
0
    def _cluster_setup(self):
        log = logger.Logger.get_logger()

        replicas = self._input.param("replicas", 1)
        keys_count = self._input.param("keys-count", 0)
        num_buckets = self._input.param("num-buckets", 1)

        bucket_name = "default"
        master = self._servers[0]
        credentials = self._input.membase_settings
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        rest.init_cluster(username=self.master.rest_username,
                          password=self.master.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        rest.reset_autofailover()
        ClusterOperationHelper.add_all_nodes_or_assert(self.master,
                                                       self._servers,
                                                       credentials, self)
        bucket_ram = info.memoryQuota * 2 / 3

        if num_buckets == 1:
            rest.create_bucket(bucket=bucket_name,
                               ramQuotaMB=bucket_ram,
                               replicaNumber=replicas,
                               proxyPort=info.moxi)
        else:
            created = BucketOperationHelper.create_multiple_buckets(
                self.master, replicas, howmany=num_buckets)
            self.assertTrue(created, "unable to create multiple buckets")

        buckets = rest.get_buckets()
        for bucket in buckets:
            ready = BucketOperationHelper.wait_for_memcached(
                self.master, bucket.name)
            self.assertTrue(ready, msg="wait_for_memcached failed")
        nodes = rest.node_statuses()
        rest.rebalance(otpNodes=[node.id for node in nodes], ejectedNodes=[])

        for bucket in buckets:
            inserted_keys_cnt = self.load_data(self.master, bucket.name,
                                               keys_count)
            log.info('inserted {0} keys'.format(inserted_keys_cnt))

        msg = "rebalance failed after adding these nodes {0}".format(nodes)
        self.assertTrue(rest.monitorRebalance(), msg=msg)
        self.assertTrue(ready, "wait_for_memcached failed")
コード例 #50
0
 def rebalance(self):
     while not self.finished:
         ClusterOperationHelper.begin_rebalance_in(self.master,
                                                   self.servers)
         ClusterOperationHelper.end_rebalance(self.master)
         if not self.finished:
             ClusterOperationHelper.begin_rebalance_out(
                 self.master, self.servers[-1:])
             ClusterOperationHelper.end_rebalance(self.master)
コード例 #51
0
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.master = TestInputSingleton.input.servers[0]
        ClusterOperationHelper.cleanup_cluster([self.master])
        BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)

        self._bucket_name = 'default'

        serverInfo = self.master
        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        self._bucket_port = info.moxi
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        bucket_ram = info.memoryQuota * 2 / 3

        # Add built-in user
        testuser = [{
            'id': 'cbadminbucket',
            'name': 'cbadminbucket',
            'password': '******'
        }]
        RbacBase().create_user_source(testuser, 'builtin', self.master)
        time.sleep(10)

        # Assign user to role
        role_list = [{
            'id': 'cbadminbucket',
            'name': 'cbadminbucket',
            'roles': 'admin'
        }]
        RbacBase().add_user_role(role_list, RestConnection(self.master),
                                 'builtin')
        time.sleep(10)

        rest.create_bucket(bucket=self._bucket_name,
                           ramQuotaMB=bucket_ram,
                           proxyPort=info.memcached)
        msg = 'create_bucket succeeded but bucket "default" does not exist'
        self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
            self._bucket_name, rest),
                        msg=msg)
        ready = BucketOperationHelper.wait_for_memcached(
            serverInfo, self._bucket_name)
        self.assertTrue(ready, "wait_for_memcached failed")
        self._log_start()
コード例 #52
0
 def test_warm_up_with_eviction(self):
     gen_create = BlobGenerator('eviction', 'eviction-', self.value_size, end=self.num_items)
     gen_create2 = BlobGenerator('eviction2', 'eviction2-', self.value_size, end=self.num_items)
     self._load_all_buckets(self.master, gen_create, "create", 0)
     self._wait_for_stats_all_buckets(self.servers[:self.nodes_init])
     self._verify_stats_all_buckets(self.servers[:self.nodes_init])
     self.timeout = self.wait_timeout
     self.without_access_log = False
     self._stats_befor_warmup(self.buckets[0])
     self._restart_memcache(self.buckets[0])
     ClusterOperationHelper.wait_for_ns_servers_or_assert(
         self.servers[:self.nodes_init], self,
         wait_time=self.wait_timeout, wait_if_warmup=True)
     self.sleep(10, 'Wait some time before next load')
     self._load_all_buckets(self.master, gen_create2, "create", 0)
     self._wait_for_stats_all_buckets(self.servers[:self.nodes_init], timeout=self.wait_timeout * 5)
     self._verify_stats_all_buckets(self.servers[:self.nodes_init])
コード例 #53
0
 def test_expiry_mutation_for_dcp_stream_boundary_from_beginning(self):
     self.load(load_gen=self.gens_load, bucket=self.src_bucket)
     # set expiry pager interval
     ClusterOperationHelper.flushctl_set(self.master,
                                         "exp_pager_stime",
                                         1,
                                         bucket=self.src_bucket_name)
     body = self.create_save_function_body(
         self.function_name,
         HANDLER_CODE.BUCKET_OPS_ON_DELETE,
         worker_count=3)
     self.deploy_function(body)
     # Wait for eventing to catch up with all the expiry mutations and verify results
     self.verify_eventing_results(self.function_name,
                                  self.docs_per_day * 2016,
                                  on_delete=True)
     self.undeploy_and_delete_function(body)
コード例 #54
0
 def wait_node_restarted(self, server, wait_time=120, wait_if_warmup=False, check_service=False):
     now = time.time()
     if check_service:
         self.wait_service_started(server, wait_time)
         wait_time = now + wait_time - time.time()
     num = 0
     while num < wait_time / 10:
         try:
             ClusterOperationHelper.wait_for_ns_servers_or_assert(
                                         [server], self, wait_time=wait_time - num * 10, wait_if_warmup=wait_if_warmup)
             break
         except BaseException, e:
             if e.message.find('couchApiBase doesn') != -1 or e.message.find('unable to reach') != -1:
                 num += 1
                 self.sleep(10)
             else:
                 raise e
コード例 #55
0
ファイル: newupgradetests.py プロジェクト: saigon/testrunner
    def offline_cluster_upgrade_and_rebalance(self):
        num_stoped_nodes = self.input.param('num_stoped_nodes',
                                            self.nodes_init)
        stoped_nodes = self.servers[self.nodes_init -
                                    num_stoped_nodes:self.nodes_init]
        servs_out = self.servers[self.nodes_init - num_stoped_nodes -
                                 self.nodes_out:self.nodes_init -
                                 num_stoped_nodes]
        servs_in = self.servers[self.nodes_init:self.nodes_init +
                                self.nodes_in]
        self._install(self.servers)
        self.operations(self.servers[:self.nodes_init])
        if self.ddocs_num:
            self.create_ddocs_and_views()
        self.sleep(self.sleep_time,
                   "Pre-setup of old version is done. Wait for upgrade")
        if self.during_ops:
            for opn in self.during_ops:
                getattr(self, opn)()
        for upgrade_version in self.upgrade_versions:

            for server in stoped_nodes:
                remote = RemoteMachineShellConnection(server)
                remote.stop_server()
                remote.disconnect()
            upgrade_threads = self._async_update(upgrade_version, stoped_nodes)
            try:
                self.cluster.rebalance(self.servers[:self.nodes_init],
                                       servs_in, servs_out)
            except RebalanceFailedException:
                self.log.info("rebalance failed as expected")
            for upgrade_thread in upgrade_threads:
                upgrade_thread.join()
            success_upgrade = True
            while not self.queue.empty():
                success_upgrade &= self.queue.get()
            if not success_upgrade:
                self.fail("Upgrade failed!")
            ClusterOperationHelper.wait_for_ns_servers_or_assert(
                stoped_nodes, self)
            self.cluster.rebalance(self.servers[:self.nodes_init], [],
                                   servs_out)
            self.verification(
                list(
                    set(self.servers[:self.nodes_init] + servs_in) -
                    set(servs_out)))
コード例 #56
0
 def test_delete_create_bucket_and_query(self):
 	#Initialization operation
     self.run_multi_operations(buckets = self.buckets,
         query_definitions = self.query_definitions,
         create_index = True, drop_index = False,
         query_with_explain = self.run_query_with_explain, query = self.run_query)
     #Remove bucket and recreate it
     for bucket in self.buckets:
     	self.rest.delete_bucket(bucket.name)
     self.sleep(2)
     #Flush bucket and recreate it
     self._bucket_creation()
     self.sleep(2)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
     #Verify the result set is empty
     self.verify_index_absence(query_definitions = self.query_definitions, buckets = self.buckets)
     index_map = self.get_index_stats()
     self.assertTrue(len(index_map) == 0, "Index Stats still show {0}".format(index_map))
コード例 #57
0
    def setup_cluster(self):
        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
        mem_quota = int(self.rest.get_nodes_self().mcdMemoryReserved *
                        node_ram_ratio)
        self.rest.init_cluster(self.master.rest_username,
                               self.master.rest_password)
        self.rest.init_cluster_memoryQuota(self.master.rest_username,
                                           self.master.rest_password,
                                           memoryQuota=mem_quota)
        for server in self.servers:
            ClusterOperationHelper.cleanup_cluster([server])
        ClusterOperationHelper.wait_for_ns_servers_or_assert([self.master],
                                                             self.testcase)

        rebalanced = ClusterOperationHelper.add_and_rebalance(self.servers)
        self.testcase.assertTrue(rebalanced, "cluster is not rebalanced")

        self._create_default_bucket()
コード例 #58
0
    def test_full_eviction_changed_to_value_eviction(self):

        KEY_NAME = 'key1'

        gen_create = BlobGenerator('eviction',
                                   'eviction-',
                                   self.value_size,
                                   end=self.num_items)
        gen_create2 = BlobGenerator('eviction2',
                                    'eviction2-',
                                    self.value_size,
                                    end=self.num_items)
        self._load_all_buckets(self.master, gen_create, "create", 0)

        self._wait_for_stats_all_buckets(self.servers[:self.nodes_init])
        self._verify_stats_all_buckets(self.servers[:self.nodes_init])
        remote = RemoteMachineShellConnection(self.master)
        for bucket in self.buckets:
            output, _ = remote.execute_couchbase_cli(
                cli_command='bucket-edit',
                cluster_host="localhost:8091",
                user=self.master.rest_username,
                password=self.master.rest_password,
                options='--bucket=%s --bucket-eviction-policy=valueOnly' %
                bucket.name)
            self.assertTrue(' '.join(output).find('SUCCESS') != -1,
                            'Eviction policy wasn\'t changed')
        ClusterOperationHelper.wait_for_ns_servers_or_assert(
            self.servers[:self.nodes_init],
            self,
            wait_time=self.wait_timeout,
            wait_if_warmup=True)
        self.sleep(10, 'Wait some time before next load')
        #self._load_all_buckets(self.master, gen_create2, "create", 0)
        #import pdb;pdb.set_trace()

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, 'default')
        mcd = client.memcached(KEY_NAME)
        try:
            rc = mcd.set(KEY_NAME, 0, 0, json.dumps({'value': 'value2'}))
            self.fail('Bucket is incorrectly functional')
        except MemcachedError, e:
            pass  # this is the exception we are hoping for
コード例 #59
0
ファイル: viewssecurity.py プロジェクト: couchbase/testrunner
    def test_view_ops_n2n_encryption_enabled(self):

        ntonencryptionBase().disable_nton_cluster([self.master])
        self.log.info("###### Generating x509 certificate#####")
        self.generate_x509_certs(self.servers)
        self.log.info("###### uploading x509 certificate#####")
        self.upload_x509_certs(self.servers)

        self._load_doc_data_all_buckets()
        for bucket in self.buckets:
            self._execute_ddoc_ops("create",
                                   self.test_with_view,
                                   self.num_ddocs,
                                   self.num_views_per_ddoc,
                                   bucket=bucket)

        self._wait_for_stats_all_buckets([self.master])
        ntonencryptionBase().setup_nton_cluster(
            [self.master], clusterEncryptionLevel="strict")
        self.x509enable = True

        encryption_result = ntonencryptionBase().setup_nton_cluster(
            self.servers, 'enable', self.ntonencrypt_level)
        self.assertTrue(encryption_result,
                        "Retries Exceeded. Cannot enable n2n encryption")

        self._verify_ddoc_ops_all_buckets()
        self._verify_ddoc_data_all_buckets()

        self._execute_ddoc_ops("update",
                               self.test_with_view,
                               self.num_ddocs // 2,
                               self.num_views_per_ddoc // 2,
                               bucket=bucket)
        self._wait_for_stats_all_buckets([self.master])

        ntonencryptionBase().disable_nton_cluster([self.master])

        self._verify_ddoc_ops_all_buckets()
        self._verify_ddoc_data_all_buckets()

        self._execute_ddoc_ops("delete",
                               self.test_with_view,
                               self.num_ddocs // 2,
                               self.num_views_per_ddoc // 2,
                               bucket=bucket)

        self._wait_for_stats_all_buckets([self.master])
        ntonencryptionBase().setup_nton_cluster(
            [self.master], clusterEncryptionLevel="strict")
        self._verify_ddoc_ops_all_buckets()
        self._verify_ddoc_data_all_buckets()

        assert ClusterOperationHelper.check_if_services_obey_tls(
            servers=[self.master
                     ]), "Port binding after enforcing TLS incorrect"
コード例 #60
0
 def setUp(self):
     super(DiskAutoFailoverBasetest, self).bareSetUp()
     self.log.info(
         "=============Starting Diskautofailover base setup=============")
     self.original_data_path = self.rest.get_data_path()
     ClusterOperationHelper.cleanup_cluster(self.servers, True, self.master)
     self.targetMaster = True
     self.reset_cluster()
     self.disk_location = self.input.param("data_location", "/data")
     self.disk_location_size = self.input.param("data_location_size", 5120)
     self.data_location = "{0}/data".format(self.disk_location)
     self.disk_timeout = self.input.param("disk_timeout", 120)
     self.read_loadgen = self.input.param("read_loadgen", False)
     self.log.info(
         "Cleanup the cluster and set the data location to the one specified by the test."
     )
     for server in self.servers:
         self._create_data_locations(server)
         if server == self.master:
             master_services = self.get_services(self.servers[:1],
                                                 self.services_init,
                                                 start_node=0)
         else:
             master_services = None
         if master_services:
             master_services = master_services[0].split(",")
         self._initialize_node_with_new_data_location(
             server, self.data_location, master_services)
     self.services = self.get_services(self.servers[:self.nodes_init], None)
     self.cluster.rebalance(self.servers[:1],
                            self.servers[1:self.nodes_init], [],
                            services=self.services)
     self.add_built_in_server_user(node=self.master)
     if self.read_loadgen:
         self.bucket_size = 100
     # super(DiskAutoFailoverBasetest,self)._bucket_creation()
     self._load_all_buckets(self.servers[0], self.initial_load_gen,
                            "create", 0)
     self.failover_actions['disk_failure'] = self.fail_disk_via_disk_failure
     self.failover_actions['disk_full'] = self.fail_disk_via_disk_full
     self.loadgen_tasks = []
     self.log.info(
         "=============Finished Diskautofailover base setup=============")