def setUp(self): self.log = logger.Logger.get_logger() self.input = TestInputSingleton.input self.assertTrue(self.input, msg="input parameters missing...") self.servers = self.input.servers self.master = self.servers[0] rest = RestConnection(self.master) rest.init_cluster(username=self.master.rest_username, password=self.master.rest_password) info = rest.get_nodes_self() node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers) rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio)) BucketOperationHelper.delete_all_buckets_or_assert(servers=self.servers, test_case=self) ClusterOperationHelper.cleanup_cluster(servers=self.servers) credentials = self.input.membase_settings ClusterOperationHelper.add_all_nodes_or_assert(master=self.master, all_servers=self.servers, rest_settings=credentials, test_case=self) rest = RestConnection(self.master) nodes = rest.node_statuses() otpNodeIds = [] for node in nodes: otpNodeIds.append(node.id) rebalanceStarted = rest.rebalance(otpNodeIds, []) self.assertTrue(rebalanceStarted, "unable to start rebalance on master node {0}".format(self.master.ip)) self.log.info('started rebalance operation on master node {0}'.format(self.master.ip)) rebalanceSucceeded = rest.monitorRebalance()
def common_setup(input, testcase): log.info("============== common_setup was started for test #{0} {1}==============" \ .format(testcase.case_number, testcase._testMethodName)) servers = input.servers RemoteUtilHelper.common_basic_setup(servers) BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase) ClusterOperationHelper.cleanup_cluster(servers) ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase) # Add built-in user testuser = [{ 'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': '******' }] RbacBase().create_user_source(testuser, 'builtin', servers[0]) # Assign user to role role_list = [{ 'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin' }] RbacBase().add_user_role(role_list, RestConnection(servers[0]), 'builtin') log.info("============== common_setup was finished for test #{0} {1} ==============" \ .format(testcase.case_number, testcase._testMethodName))
def tearDown(self): if not self.input.param("skip_cleanup", True): if self.times_teardown_called > 1 : self.shell.disconnect() if self.input.param("skip_cleanup", True): if self.case_number > 1 or self.times_teardown_called > 1: self.shell.disconnect() self.times_teardown_called += 1 serverInfo = self.servers[0] rest = RestConnection(serverInfo) zones = rest.get_zone_names() for zone in zones: if zone != "Group 1": rest.delete_zone(zone) self.clusters_dic = self.input.clusters if self.clusters_dic: if len(self.clusters_dic) > 1: self.dest_nodes = self.clusters_dic[1] self.dest_master = self.dest_nodes[0] if self.dest_nodes and len(self.dest_nodes) > 1: self.log.info("======== clean up destination cluster =======") rest = RestConnection(self.dest_nodes[0]) rest.remove_all_remote_clusters() rest.remove_all_replications() BucketOperationHelper.delete_all_buckets_or_assert(self.dest_nodes, self) ClusterOperationHelper.cleanup_cluster(self.dest_nodes) elif len(self.clusters_dic) == 1: self.log.error("=== need 2 cluster to setup xdcr in ini file ===") else: self.log.info("**** If run xdcr test, need cluster config is setup in ini file. ****") super(CliBaseTest, self).tearDown()
def setUp(self): self.log = logger.Logger.get_logger() self.master = TestInputSingleton.input.servers[0] ClusterOperationHelper.cleanup_cluster([self.master]) BucketOperationHelper.delete_all_buckets_or_assert([self.master], self) self._bucket_name = 'default' serverInfo = self.master rest = RestConnection(serverInfo) info = rest.get_nodes_self() self._bucket_port = info.moxi rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password) rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved) bucket_ram = info.memoryQuota * 2 / 3 rest.create_bucket(bucket=self._bucket_name, ramQuotaMB=bucket_ram, proxyPort=info.memcached) msg = 'create_bucket succeeded but bucket "default" does not exist' self.assertTrue(BucketOperationHelper.wait_for_bucket_creation( self._bucket_name, rest), msg=msg) ready = BucketOperationHelper.wait_for_memcached( serverInfo, self._bucket_name) self.assertTrue(ready, "wait_for_memcached failed") self._log_start()
def setUp_bucket(self, unittest): self.log = logger.Logger.get_logger() self.input = TestInputSingleton.input unittest.assertTrue(self.input, msg="input parameters missing...") self.test = unittest self.master = self.input.servers[0] rest = RestConnection(self.master) rest.init_cluster(username=self.master.rest_username, password=self.master.rest_password) rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved) ClusterOperationHelper.cleanup_cluster([self.master]) BucketOperationHelper.delete_all_buckets_or_assert([self.master], self.test) serverInfo = self.master rest = RestConnection(serverInfo) info = rest.get_nodes_self() rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password) rest.init_cluster_memoryQuota(memoryQuota=info.memoryQuota) # Add built-in user testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': '******'}] RbacBase().create_user_source(testuser, 'builtin', self.master) time.sleep(10) # Assign user to role role_list = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin'}] RbacBase().add_user_role(role_list, RestConnection(self.master), 'builtin') time.sleep(10)
def _cleanup_cluster(self): BucketOperationHelper.delete_all_buckets_or_assert([self.servers[0]], test_case=self) ClusterOperationHelper.cleanup_cluster(self.servers) ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers, self) BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
def test_insert_x_delete_y_docs_destroy_cluster(self): num_docs = self.helper.input.param("num-docs", 100000) num_deleted_docs = self.helper.input.param("num-deleted-docs", 10000) msg = "description : have a cluster, insert {0} docs, delete "\ "{1} docs while destroying the cluster into a single node "\ "and query it" self.log.info(msg.format(num_docs, num_deleted_docs)) design_name = "dev_test_delete_{0}_docs_destroy_cluster".format( num_deleted_docs) prefix = str(uuid.uuid4())[:7] # Make sure we are fully clustered ClusterOperationHelper.add_and_rebalance(self.helper.servers) self.helper.create_index_fun(design_name, prefix) inserted_keys = self.helper.insert_docs(num_docs, prefix) # Start destroying the cluster and rebalancing it without waiting # until it's finished ClusterOperationHelper.cleanup_cluster(self.helper.servers, False) deleted_keys = self.helper.delete_docs(num_deleted_docs, prefix) self._wait_for_rebalance() # Verify that the docs got delete and are no longer part of the # spatial view results = self.helper.get_results(design_name, num_docs) result_keys = self.helper.get_keys(results) self.assertEqual(len(result_keys), num_docs - len(deleted_keys)) self.helper.verify_result(inserted_keys, deleted_keys + result_keys)
def test_insert_x_docs_during_rebalance(self): num_docs = self.helper.input.param("num-docs", 100000) msg = "description : have a single node, insert {0} docs, "\ "query it, add another node, start rebalancing, insert {0} "\ "docs, finish rebalancing, keep on adding nodes..." self.log.info(msg.format(num_docs)) design_name = "dev_test_insert_{0}_docs_during_rebalance".format( num_docs) prefix = str(uuid.uuid4())[:7] # Make sure we are fully de-clustered ClusterOperationHelper.cleanup_cluster(self.helper.servers) self.helper.create_index_fun(design_name) inserted_keys = self.helper.insert_docs(num_docs, prefix) # Add all servers to the master server one by one and start # rebalacing for server in self.helper.servers[1:]: ClusterOperationHelper.add_and_rebalance( [self.helper.master, server], False) # Docs with the same prefix are overwritten and not newly created prefix = str(uuid.uuid4())[:7] inserted_keys.extend( self.helper.insert_docs(num_docs, prefix, wait_for_persistence=False)) self._wait_for_rebalance() # Make sure data is persisted self.helper.wait_for_persistence() # Verify that all documents got inserted self.helper.query_index_for_verification(design_name, inserted_keys)
def tearDown(self): try: if self.driver: path_screen = self.input.ui_conf[ 'screenshots'] or 'logs/screens' full_path = '{1}/screen_{0}.png'.format( time.time(), path_screen) self.log.info('screenshot is available: %s' % full_path) if not os.path.exists(path_screen): os.mkdir(path_screen) self.driver.get_screenshot_as_file(os.path.abspath(full_path)) rest = RestConnection(self.servers[0]) if rest._rebalance_progress_status() == 'running': stopped = rest.stop_rebalance() self.assertTrue(stopped, msg="unable to stop rebalance") BucketOperationHelper.delete_all_buckets_or_assert( self.servers, self) for server in self.servers: ClusterOperationHelper.cleanup_cluster([server]) ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers, self) if self.driver: self.driver.close() finally: if self.driver: self.shell.disconnect() self.cluster.shutdown()
def setUp(self): self.log = logger.Logger.get_logger() self.master = TestInputSingleton.input.servers[0] self.input = TestInputSingleton.input self.servers = self.input.servers self.num_of_docs = self.input.param("num_of_docs", 1000) rest = RestConnection(self.master) for server in self.servers: rest.init_cluster(server.rest_username, server.rest_password) info = rest.get_nodes_self() for server in self.servers: rest.init_cluster_memoryQuota( server.rest_username, server.rest_password, memoryQuota=info.mcdMemoryReserved ) ClusterOperationHelper.cleanup_cluster(self.servers) BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) self._create_default_bucket() # Rebalance the nodes ClusterOperationHelper.begin_rebalance_in(self.master, self.servers) ClusterOperationHelper.end_rebalance(self.master) self._log_start()
def cleanup(self): rest = RestConnection(self.master) rest.stop_rebalance() BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) for server in self.servers: ClusterOperationHelper.cleanup_cluster([server]) ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
def common_setup(self, replica): self._input = TestInputSingleton.input self._servers = self._input.servers first = self._servers[0] self.log = logger.Logger().get_logger() self.log.info(self._input) rest = RestConnection(first) for server in self._servers: RestHelper(RestConnection(server)).is_ns_server_running() ClusterOperationHelper.cleanup_cluster(self._servers) BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self) ClusterOperationHelper.add_all_nodes_or_assert(self._servers[0], self._servers, self._input.membase_settings, self) nodes = rest.node_statuses() otpNodeIds = [] for node in nodes: otpNodeIds.append(node.id) info = rest.get_nodes_self() bucket_ram = info.mcdMemoryReserved * 3 / 4 rest.create_bucket(bucket="default", ramQuotaMB=int(bucket_ram), replicaNumber=replica, proxyPort=rest.get_nodes_self().moxi) msg = "wait_for_memcached fails" ready = BucketOperationHelper.wait_for_memcached(first, "default"), self.assertTrue(ready, msg) rebalanceStarted = rest.rebalance(otpNodeIds, []) self.assertTrue(rebalanceStarted, "unable to start rebalance on master node {0}".format(first.ip)) self.log.info('started rebalance operation on master node {0}'.format(first.ip)) rebalanceSucceeded = rest.monitorRebalance() # without a bucket this seems to fail self.assertTrue(rebalanceSucceeded, "rebalance operation for nodes: {0} was not successful".format(otpNodeIds)) self.awareness = VBucketAwareMemcached(rest, "default")
def tearDown(self): self.times_teardown_called += 1 serverInfo = self.servers[0] rest = RestConnection(serverInfo) self.clusters_dic = self.input.clusters if self.clusters_dic: if len(self.clusters_dic) > 1: self.dest_nodes = self.clusters_dic[1] self.dest_master = self.dest_nodes[0] if self.dest_nodes and len(self.dest_nodes) > 1: self.log.info( "======== clean up destination cluster =======") rest = RestConnection(self.dest_nodes[0]) rest.remove_all_remote_clusters() rest.remove_all_replications() BucketOperationHelper.delete_all_buckets_or_assert( self.dest_nodes, self) ClusterOperationHelper.cleanup_cluster(self.dest_nodes) elif len(self.clusters_dic) == 1: self.log.error( "=== need 2 cluster to setup xdcr in ini file ===") else: self.log.info( "**** If run xdcr test, need cluster config is setup in ini file. ****" ) super(AltAddrBaseTest, self).tearDown()
def tearDown(self): try: self._cluster_helper.shutdown() log = logger.Logger.get_logger() log.info("============== tearDown was started for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) RemoteUtilHelper.common_basic_setup(self._servers) log.info("10 seconds delay to wait for membase-server to start") time.sleep(10) for server in self._cleanup_nodes: shell = RemoteMachineShellConnection(server) o, r = shell.execute_command("iptables -F") shell.log_command_output(o, r) o, r = shell.execute_command("/sbin/iptables -A INPUT -p tcp -i eth0 --dport 1000:60000 -j ACCEPT") shell.log_command_output(o, r) o, r = shell.execute_command("/sbin/iptables -A OUTPUT -p tcp -o eth0 --dport 1000:60000 -j ACCEPT") shell.log_command_output(o, r) o, r = shell.execute_command("/etc/init.d/couchbase-server start") shell.log_command_output(o, r) shell.disconnect() BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self) ClusterOperationHelper.cleanup_cluster(self._servers) ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self) log.info("============== tearDown was finished for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) finally: pass
def test_insert_x_docs_during_rebalance(self): num_docs = self.helper.input.param("num-docs", 100000) msg = "description : have a single node, insert {0} docs, "\ "query it, add another node, start rebalancing, insert {0} "\ "docs, finish rebalancing, keep on adding nodes..." self.log.info(msg.format(num_docs)) design_name = "dev_test_insert_{0}_docs_during_rebalance".format( num_docs) prefix = str(uuid.uuid4())[:7] # Make sure we are fully de-clustered ClusterOperationHelper.cleanup_cluster(self.helper.servers) self.helper.create_index_fun(design_name) inserted_keys = self.helper.insert_docs(num_docs, prefix) # Add all servers to the master server one by one and start # rebalacing for server in self.helper.servers[1:]: ClusterOperationHelper.add_and_rebalance( [self.helper.master, server], False) # Docs with the same prefix are overwritten and not newly created prefix = str(uuid.uuid4())[:7] inserted_keys.extend(self.helper.insert_docs( num_docs, prefix, wait_for_persistence=False)) self._wait_for_rebalance() # Make sure data is persisted self.helper.wait_for_persistence() # Verify that all documents got inserted self.helper.query_index_for_verification(design_name, inserted_keys)
def tearDown(self): try: test_failed = len(self._resultForDoCleanups.errors) if self.driver and test_failed: BaseHelper(self).create_screenshot() if self.driver: self.driver.close() if test_failed and TestInputSingleton.input.param("stop-on-failure", False): print "test fails, teardown will be skipped!!!" return rest = RestConnection(self.servers[0]) try: reb_status = rest._rebalance_progress_status() except ValueError as e: if e.message == 'No JSON object could be decoded': print "cluster not initialized!!!" return if reb_status == 'running': stopped = rest.stop_rebalance() self.assertTrue(stopped, msg="unable to stop rebalance") BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) for server in self.servers: ClusterOperationHelper.cleanup_cluster([server]) ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self) except Exception as e: raise e finally: if self.driver: self.shell.disconnect()
def tearDown(self): try: test_failed = len(self._resultForDoCleanups.errors) if self.driver and test_failed: BaseHelper(self).create_screenshot() if self.driver: self.driver.close() if test_failed and TestInputSingleton.input.param( "stop-on-failure", False): print "test fails, teardown will be skipped!!!" return rest = RestConnection(self.servers[0]) try: reb_status = rest._rebalance_progress_status() except ValueError as e: if e.message == 'No JSON object could be decoded': print "cluster not initialized!!!" return if reb_status == 'running': stopped = rest.stop_rebalance() self.assertTrue(stopped, msg="unable to stop rebalance") BucketOperationHelper.delete_all_buckets_or_assert( self.servers, self) for server in self.servers: ClusterOperationHelper.cleanup_cluster([server]) ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers, self) except Exception as e: raise e finally: if self.driver: self.shell.disconnect()
def common_setup(input, testcase): servers = input.servers RemoteUtilHelper.common_basic_setup(servers) BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase) for server in servers: ClusterOperationHelper.cleanup_cluster([server]) ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase)
def setUp(self): self.log = logger.Logger.get_logger() self.input = TestInputSingleton.input self.cluster = Cluster() self.servers = self.input.servers self.buckets = {} self.default_bucket = self.input.param("default_bucket", True) self.standard_buckets = self.input.param("standard_buckets", 0) self.sasl_buckets = self.input.param("sasl_buckets", 0) self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets self.num_servers = self.input.param("servers", len(self.servers)) self.num_replicas = self.input.param("replicas", 1) self.num_items = self.input.param("items", 1000) self.dgm_run = self.input.param("dgm_run", False) if not self.input.param("skip_cleanup", False): BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) for server in self.servers: ClusterOperationHelper.cleanup_cluster([server]) ClusterOperationHelper.wait_for_ns_servers_or_assert([self.servers[0]], self) self.quota = self._initialize_nodes(self.cluster, self.servers) if self.dgm_run: self.quota = 256 self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets) if self.default_bucket: self.cluster.create_default_bucket(self.servers[0], self.bucket_size, self.num_replicas) self.buckets['default'] = {1 : KVStore()} self._create_sasl_buckets(self.servers[0], self.sasl_buckets)
def tearDown(self): try: if (hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \ and TestInputSingleton.input.param("stop-on-failure", False))\ or self.input.param("skip_cleanup", False): self.log.warn("CLEANUP WAS SKIPPED") else: self.log.info("============== basetestcase cleanup was started for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) rest = RestConnection(self.master) alerts = rest.get_alerts() if alerts is not None and len(alerts) != 0: self.log.warn("Alerts were found: {0}".format(alerts)) if rest._rebalance_progress_status() == 'running': self.log.warning( "rebalancing is still running, test should be verified" ) stopped = rest.stop_rebalance() self.assertTrue(stopped, msg="unable to stop rebalance") BucketOperationHelper.delete_all_buckets_or_assert( self.servers, self) ClusterOperationHelper.cleanup_cluster(self.servers) time.sleep(10) ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers, self) self.log.info("============== basetestcase cleanup was finished for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) finally: #stop all existing task manager threads self.cluster.shutdown() self._log_finish(self)
def setUp_bucket(self, unittest): self.log = logger.Logger.get_logger() self.input = TestInputSingleton.input unittest.assertTrue(self.input, msg="input parameters missing...") self.test = unittest self.master = self.input.servers[0] rest = RestConnection(self.master) rest.init_cluster(username=self.master.rest_username, password=self.master.rest_password) rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved) ClusterOperationHelper.cleanup_cluster([self.master]) BucketOperationHelper.delete_all_buckets_or_assert([self.master], self.test) serverInfo = self.master rest = RestConnection(serverInfo) info = rest.get_nodes_self() rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password) rest.init_cluster_memoryQuota(memoryQuota=info.memoryQuota) # Add built-in user testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': '******'}] RbacBase().create_user_source(testuser, 'builtin', self.master) # Assign user to role role_list = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin'}] RbacBase().add_user_role(role_list, RestConnection(self.master), 'builtin')
def cleanup_cluster(self): if not "skip_cleanup" in TestInputSingleton.input.test_params: BucketOperationHelper.delete_all_buckets_or_assert( self.servers, self.testcase) ClusterOperationHelper.cleanup_cluster(self.servers) ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers, self.testcase)
def setUp(self): self.log = logger.Logger.get_logger() self.master = TestInputSingleton.input.servers[0] ClusterOperationHelper.cleanup_cluster([self.master]) BucketOperationHelper.delete_all_buckets_or_assert([self.master], self) self._bucket_name = 'default' serverInfo = self.master rest = RestConnection(serverInfo) info = rest.get_nodes_self() self._bucket_port = info.moxi rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password) rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved) bucket_ram = info.memoryQuota * 2 / 3 # Add built-in user testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': '******'}] RbacBase().create_user_source(testuser, 'builtin', self.master) time.sleep(10) # Assign user to role role_list = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin'}] RbacBase().add_user_role(role_list, RestConnection(self.master), 'builtin') time.sleep(10) rest.create_bucket(bucket=self._bucket_name, ramQuotaMB=bucket_ram, proxyPort=info.memcached) msg = 'create_bucket succeeded but bucket "default" does not exist' self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(self._bucket_name, rest), msg=msg) ready = BucketOperationHelper.wait_for_memcached(serverInfo, self._bucket_name) self.assertTrue(ready, "wait_for_memcached failed") self._log_start()
def tearDown(self): try: self._cluster_helper.shutdown() log = logger.Logger.get_logger() log.info("============== tearDown was started for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) RemoteUtilHelper.common_basic_setup(self._servers) log.info("10 seconds delay to wait for membase-server to start") time.sleep(10) for server in self._cleanup_nodes: shell = RemoteMachineShellConnection(server) o, r = shell.execute_command("iptables -F") shell.log_command_output(o, r) o, r = shell.execute_command( "/sbin/iptables -A INPUT -p tcp -i eth0 --dport 1000:60000 -j ACCEPT" ) shell.log_command_output(o, r) o, r = shell.execute_command( "/sbin/iptables -A OUTPUT -p tcp -o eth0 --dport 1000:60000 -j ACCEPT" ) shell.log_command_output(o, r) o, r = shell.execute_command( "/etc/init.d/couchbase-server start") shell.log_command_output(o, r) shell.disconnect() BucketOperationHelper.delete_all_buckets_or_assert( self._servers, self) ClusterOperationHelper.cleanup_cluster(self._servers) ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self) log.info("============== tearDown was finished for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) finally: pass
def common_setup(input, testcase): servers = input.servers RemoteUtilHelper.common_basic_setup(servers) BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase) for server in servers: ClusterOperationHelper.cleanup_cluster([server]) ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase)
def common_tearDown(servers, testcase): log = logger.Logger.get_logger() log.info( "============== common_tearDown was started for test #{0} {1} ==============".format( testcase.case_number, testcase._testMethodName ) ) RemoteUtilHelper.common_basic_setup(servers) log.info("10 seconds delay to wait for couchbase-server to start") time.sleep(10) ClusterOperationHelper.wait_for_ns_servers_or_assert( servers, testcase, wait_time=AutoFailoverBaseTest.MAX_FAIL_DETECT_TIME * 15, wait_if_warmup=True ) try: rest = RestConnection(self._servers[0]) buckets = rest.get_buckets() for bucket in buckets: MemcachedClientHelper.flush_bucket(servers[0], bucket.name) except Exception: pass BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase) ClusterOperationHelper.cleanup_cluster(servers) log.info( "============== common_tearDown was finished for test #{0} {1} ==============".format( testcase.case_number, testcase._testMethodName ) )
def cleanup_cluster(self): if not "skip_cleanup" in TestInputSingleton.input.test_params: BucketOperationHelper.delete_all_buckets_or_assert( self.servers, self.testcase) ClusterOperationHelper.cleanup_cluster(self.servers) ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers, self.testcase)
def tearDown(self): # super(Rebalance, self).tearDown() try: self.log.info("============== XDCRbasetests stats for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) self._end_replication_flag = 1 if hasattr(self, '_stats_thread1'): self._stats_thread1.join() if hasattr(self, '_stats_thread2'): self._stats_thread2.join() if hasattr(self, '_stats_thread3'): self._stats_thread3.join() if self._replication_direction_str in "bidirection": if hasattr(self, '_stats_thread4'): self._stats_thread4.join() if hasattr(self, '_stats_thread5'): self._stats_thread5.join() if hasattr(self, '_stats_thread6'): self._stats_thread6.join() if self._replication_direction_str in "bidirection": self.log.info("Type of run: BIDIRECTIONAL XDCR") else: self.log.info("Type of run: UNIDIRECTIONAL XDCR") self._print_stats(self.src_master) if self._replication_direction_str in "bidirection": self._print_stats(self.dest_master) self.log.info("============== = = = = = = = = END = = = = = = = = = = ==============") self.log.info("============== rebalanceXDCR cleanup was started for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) for nodes in [self.src_nodes, self.dest_nodes]: for node in nodes: BucketOperationHelper.delete_all_buckets_or_assert([node], self) ClusterOperationHelper.cleanup_cluster([node], self) ClusterOperationHelper.wait_for_ns_servers_or_assert([node], self) self.log.info("============== rebalanceXDCR cleanup was finished for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) finally: self.cluster.shutdown(force=True) self._log_finish(self)
def setUp(self): self.log = logger.Logger.get_logger() self.master = TestInputSingleton.input.servers[0] self.input = TestInputSingleton.input self.servers = self.input.servers self.num_of_docs = self.input.param("num_of_docs", 1000) rest = RestConnection(self.master) for server in self.servers: rest.init_cluster(server.rest_username, server.rest_password) info = rest.get_nodes_self() for server in self.servers: rest.init_cluster_memoryQuota(server.rest_username, server.rest_password, memoryQuota=info.mcdMemoryReserved) ClusterOperationHelper.cleanup_cluster(self.servers) BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) self._create_default_bucket() #Rebalance the nodes ClusterOperationHelper.begin_rebalance_in(self.master, self.servers) ClusterOperationHelper.end_rebalance(self.master) self._log_start()
def tearDown(self): try: super(AlternateAddressTests, self).tearDown() except Exception as e: print(e) BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) ClusterOperationHelper.cleanup_cluster(self.servers, self.servers[0])
def tearDown(self): if not self.input.param("skip_cleanup", True): if self.times_teardown_called > 1 : self.shell.disconnect() if self.input.param("skip_cleanup", True): if self.case_number > 1 or self.times_teardown_called > 1: self.shell.disconnect() self.times_teardown_called += 1 serverInfo = self.servers[0] rest = RestConnection(serverInfo) zones = rest.get_zone_names() for zone in zones: if zone != "Group 1": rest.delete_zone(zone) self.clusters_dic = self.input.clusters if self.clusters_dic: if len(self.clusters_dic) > 1: self.dest_nodes = self.clusters_dic[1] self.dest_master = self.dest_nodes[0] if self.dest_nodes and len(self.dest_nodes) > 1: self.log.info("======== clean up destination cluster =======") rest = RestConnection(self.dest_nodes[0]) rest.remove_all_remote_clusters() rest.remove_all_replications() BucketOperationHelper.delete_all_buckets_or_assert(self.dest_nodes, self) ClusterOperationHelper.cleanup_cluster(self.dest_nodes) elif len(self.clusters_dic) == 1: self.log.error("=== need 2 cluster to setup xdcr in ini file ===") else: self.log.info("**** If run xdcr test, need cluster config is setup in ini file. ****") super(CliBaseTest, self).tearDown()
def setUp(self): self.log = logger.Logger.get_logger() self.master = TestInputSingleton.input.servers[0] ClusterOperationHelper.cleanup_cluster([self.master]) BucketOperationHelper.delete_all_buckets_or_assert([self.master], self) self._bucket_name = 'default' serverInfo = self.master rest = RestConnection(serverInfo) info = rest.get_nodes_self() self._bucket_port = info.moxi rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password) rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved) bucket_ram = info.memoryQuota * 2 / 3 # Add built-in user testuser = [{ 'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': '******' }] RbacBase().create_user_source(testuser, 'builtin', self.master) # Assign user to role role_list = [{ 'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin' }] RbacBase().add_user_role(role_list, RestConnection(self.master), 'builtin') rest.create_bucket(bucket=self._bucket_name, ramQuotaMB=bucket_ram, proxyPort=info.memcached) msg = 'create_bucket succeeded but bucket "default" does not exist' if (testconstants.TESTRUNNER_CLIENT in os.environ.keys() ) and os.environ[ testconstants.TESTRUNNER_CLIENT] == testconstants.PYTHON_SDK: self.client = SDKSmartClient( serverInfo, self._bucket_name, compression=TestInputSingleton.input.param( "sdk_compression", True)) else: self.client = MemcachedClientHelper.direct_client( serverInfo, self._bucket_name) self.assertTrue(BucketOperationHelper.wait_for_bucket_creation( self._bucket_name, rest), msg=msg) ready = BucketOperationHelper.wait_for_memcached( serverInfo, self._bucket_name) self.assertTrue(ready, "wait_for_memcached failed") self._log_start()
def test_insert_x_delete_y_docs_destroy_cluster(self): num_docs = self.helper.input.param("num-docs", 100000) num_deleted_docs = self.helper.input.param("num-deleted-docs", 10000) msg = "description : have a cluster, insert {0} docs, delete "\ "{1} docs while destroying the cluster into a single node "\ "and query it" self.log.info(msg.format(num_docs, num_deleted_docs)) design_name = "dev_test_delete_{0}_docs_destroy_cluster".format( num_deleted_docs) prefix = str(uuid.uuid4())[:7] # Make sure we are fully clustered ClusterOperationHelper.add_and_rebalance(self.helper.servers) self.helper.create_index_fun(design_name, prefix) inserted_keys = self.helper.insert_docs(num_docs, prefix) # Start destroying the cluster and rebalancing it without waiting # until it's finished ClusterOperationHelper.cleanup_cluster(self.helper.servers, False) deleted_keys = self.helper.delete_docs(num_deleted_docs, prefix) self._wait_for_rebalance() # Verify that the docs got delete and are no longer part of the # spatial view results = self.helper.get_results(design_name, num_docs) result_keys = self.helper.get_keys(results) self.assertEqual(len(result_keys), num_docs - len(deleted_keys)) self.helper.verify_result(inserted_keys, deleted_keys + result_keys)
def reset(self): self.log.info( "============== SwapRebalanceBase cleanup was started for test #{0} {1} ==============".format( self.case_number, self._testMethodName ) ) self.log.info("Stopping load in Teardown") SwapRebalanceBase.stop_load(self.loaders) for server in self.servers: rest = RestConnection(server) if rest._rebalance_progress_status() == "running": self.log.warning("rebalancing is still running, test should be verified") stopped = rest.stop_rebalance() self.assertTrue(stopped, msg="unable to stop rebalance") BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) for server in self.servers: ClusterOperationHelper.cleanup_cluster([server]) if server.data_path: rest = RestConnection(server) rest.set_data_path(data_path=server.data_path) ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self) self.log.info( "============== SwapRebalanceBase cleanup was finished for test #{0} {1} ==============".format( self.case_number, self._testMethodName ) )
def tearDown(self): if hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \ and 'stop-on-failure' in TestInputSingleton.input.test_params and \ str(TestInputSingleton.input.test_params['stop-on-failure']).lower() == 'true': # supported starting with python2.7 log.warn("CLEANUP WAS SKIPPED") self.cluster.shutdown(force=True) self._log_finish(self) else: try: self.log.info("============== tearDown was started for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) RemoteUtilHelper.common_basic_setup(self.servers) BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) for node in self.servers: master = node try: ClusterOperationHelper.cleanup_cluster(self.servers, master=master) except: continue self.log.info("============== tearDown was finished for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) finally: super(FailoverBaseTest, self).tearDown()
def tearDown(self): try: if (hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \ and TestInputSingleton.input.param("stop-on-failure", False))\ or self.input.param("skip_cleanup", False): self.log.warn("CLEANUP WAS SKIPPED") else: self.log.info("============== basetestcase cleanup was started for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) rest = RestConnection(self.master) alerts = rest.get_alerts() if alerts is not None and len(alerts) != 0: self.log.warn("Alerts were found: {0}".format(alerts)) if rest._rebalance_progress_status() == 'running': self.log.warning("rebalancing is still running, test should be verified") stopped = rest.stop_rebalance() self.assertTrue(stopped, msg="unable to stop rebalance") BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) ClusterOperationHelper.cleanup_cluster(self.servers) self.sleep(10) ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self) self.log.info("============== basetestcase cleanup was finished for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) except BaseException: # increase case_number to retry tearDown in setup for the next test self.case_number += 1000 finally: # stop all existing task manager threads self.cluster.shutdown() self._log_finish(self)
def tearDown(self): # super(Rebalance, self).tearDown() try: self.log.info("============== XDCRbasetests stats for test #{0} {1} =============="\ .format(self._case_number, self._testMethodName)) self._end_replication_flag = 1 if hasattr(self, '_stats_thread1'): self._stats_thread1.join() if hasattr(self, '_stats_thread2'): self._stats_thread2.join() if hasattr(self, '_stats_thread3'): self._stats_thread3.join() if self._replication_direction_str in "bidirection": if hasattr(self, '_stats_thread4'): self._stats_thread4.join() if hasattr(self, '_stats_thread5'): self._stats_thread5.join() if hasattr(self, '_stats_thread6'): self._stats_thread6.join() if self._replication_direction_str in "bidirection": self.log.info("Type of run: BIDIRECTIONAL XDCR") else: self.log.info("Type of run: UNIDIRECTIONAL XDCR") self._print_stats(self.src_master) if self._replication_direction_str in "bidirection": self._print_stats(self.dest_master) self.log.info("============== = = = = = = = = END = = = = = = = = = = ==============") self.log.info("============== rebalanceXDCR cleanup was started for test #{0} {1} =============="\ .format(self._case_number, self._testMethodName)) for nodes in [self.src_nodes, self.dest_nodes]: for node in nodes: BucketOperationHelper.delete_all_buckets_or_assert([node], self) ClusterOperationHelper.cleanup_cluster([node], self) ClusterOperationHelper.wait_for_ns_servers_or_assert([node], self) self.log.info("============== rebalanceXDCR cleanup was finished for test #{0} {1} =============="\ .format(self._case_number, self._testMethodName)) finally: self.cluster.shutdown() self._log_finish(self)
def tearDown(self): if hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \ and 'stop-on-failure' in TestInputSingleton.input.test_params and \ str(TestInputSingleton.input.test_params['stop-on-failure']).lower() == 'true': # supported starting with python2.7 log.warn("CLEANUP WAS SKIPPED") self.cluster.shutdown(force=True) self._log_finish(self) else: try: self.log.info("============== tearDown was started for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) RemoteUtilHelper.common_basic_setup(self.servers) BucketOperationHelper.delete_all_buckets_or_assert( self.servers, self) for node in self.servers: master = node try: ClusterOperationHelper.cleanup_cluster(self.servers, master=master) except: continue self.log.info("============== tearDown was finished for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) finally: super(FailoverBaseTest, self).tearDown()
def common_setup(input, testcase, bucket_ram_ratio=(2.8 / 3.0), replica=0): log = logger.Logger.get_logger() servers = input.servers BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase) ClusterOperationHelper.cleanup_cluster(servers) ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase) serverInfo = servers[0] log.info('picking server : {0} as the master'.format(serverInfo)) #if all nodes are on the same machine let's have the bucket_ram_ratio as bucket_ram_ratio * 1/len(servers) node_ram_ratio = BucketOperationHelper.base_bucket_ratio(servers) rest = RestConnection(serverInfo) info = rest.get_nodes_self() rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password) rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio)) if "ascii" in TestInputSingleton.input.test_params\ and TestInputSingleton.input.test_params["ascii"].lower() == "true": BucketOperationHelper.create_multiple_buckets(serverInfo, replica, node_ram_ratio * bucket_ram_ratio, howmany=1, sasl=False) else: BucketOperationHelper.create_multiple_buckets(serverInfo, replica, node_ram_ratio * bucket_ram_ratio, howmany=1, sasl=True) buckets = rest.get_buckets() for bucket in buckets: ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket.name) testcase.assertTrue(ready, "wait_for_memcached failed")
def tearDown(self): try: if self.driver: path_screen = self.input.ui_conf['screenshots'] or 'logs/screens' full_path = '{1}/screen_{0}.png'.format(time.time(), path_screen) self.log.info('screenshot is available: %s' % full_path) if not os.path.exists(path_screen): os.mkdir(path_screen) self.driver.get_screenshot_as_file(os.path.abspath(full_path)) rest = RestConnection(self.servers[0]) if rest._rebalance_progress_status() == 'running': stopped = rest.stop_rebalance() self.assertTrue(stopped, msg="unable to stop rebalance") BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) for server in self.servers: ClusterOperationHelper.cleanup_cluster([server]) ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self) if self.driver: self.driver.close() except Exception as e: raise e finally: if self.driver: self.shell.disconnect() self.cluster.shutdown()
def _test_cluster_topology_change_body(self): bucket = "default" BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self) ready = BucketOperationHelper.wait_for_memcached(self.master, bucket) self.assertTrue(ready, "wait_for_memcached failed") self.add_nodes_and_rebalance() distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05} inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys( servers=[self.master], ram_load_ratio=1, value_size_distribution=distribution, moxi=True, write_only=True, number_of_threads=2) self.log.info("Sleep after data load") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") #let's create a unique folder in the remote location for server in self.servers: shell = RemoteMachineShellConnection(server) output, error = shell.execute_command(self.perm_command) shell.log_command_output(output, error) node = RestConnection(server).get_nodes_self() BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder) shell.disconnect() ClusterOperationHelper.cleanup_cluster(self.servers) BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) servers = [] for i in range(0, len(self.servers) - 1): servers.append(self.servers[i]) self.add_node_and_rebalance(servers[0], servers) BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self) BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self) ready = BucketOperationHelper.wait_for_memcached(self.master, bucket) self.assertTrue(ready, "wait_for_memcached failed") for server in self.servers: BackupHelper(server, self).restore(self.remote_tmp_folder) time.sleep(10) BucketOperationHelper.verify_data(self.master, inserted_keys, False, False, 11210, self)
def _common_clenup(self): rest = RestConnection(self.servers[0]) if rest._rebalance_progress_status() == 'running': stopped = rest.stop_rebalance() self.assertTrue(stopped, msg="unable to stop rebalance") BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) ClusterOperationHelper.cleanup_cluster(self.servers) ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
def common_setUp(self, with_buckets): ClusterOperationHelper.cleanup_cluster(self.servers) server = self.servers[0] if with_buckets: BucketOperationHelper.delete_all_buckets_or_assert(self.servers, test_case=self) ok = BucketOperationHelper.create_multiple_buckets(server, 1) if not ok: self.fail("unable to create multiple buckets on this node : {0}".format(server))
def common_tearDown(self): if self.load_started: self.log.info("Stopping load in Teardown") SwapRebalanceBase.stop_load(self.loaders) BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) for server in self.servers: ClusterOperationHelper.cleanup_cluster([server]) ClusterHelper.wait_for_ns_servers_or_assert(self.servers, self)
def cleanup(self): rest = RestConnection(self.master) rest.stop_rebalance() BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) for server in self.servers: ClusterOperationHelper.cleanup_cluster([server]) ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers, self)
def common_setUp(self, with_buckets): ClusterOperationHelper.cleanup_cluster(self.servers) server = self.servers[0] if with_buckets: BucketOperationHelper.delete_all_buckets_or_assert(self.servers, test_case=self) ok = BucketOperationHelper.create_multiple_buckets(server, 1) if not ok: self.fail("unable to create multiple buckets on this node : {0}".format(server))
def tearDown(self): #super(Upgrade_EpTests, self).tearDown() self.testcase = '2' if not "skip_cleanup" in TestInputSingleton.input.test_params: BucketOperationHelper.delete_all_buckets_or_assert( self.servers, self.testcase) ClusterOperationHelper.cleanup_cluster(self.servers) ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers, self.testcase)
def tearDown(self): super(BucketConfig, self).tearDown() return if not "skip_cleanup" in TestInputSingleton.input.test_params: BucketOperationHelper.delete_all_buckets_or_assert( self.servers, self.testcase) ClusterOperationHelper.cleanup_cluster(self.servers) ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers, self.testcase)
def cleanup_cluster(self): if not "skip_cleanup" in TestInputSingleton.input.test_params: # Cleanup all indexes that were create with this helper class for name in self._indexes: self.rest.delete_spatial(self.bucket, name) self.log.info("deleted spatial {0} from bucket {1}".format(name, self.bucket)) BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self.testcase) ClusterOperationHelper.cleanup_cluster(self.servers) ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self.testcase)
def common_tearDown(servers, testcase): RemoteUtilHelper.common_basic_setup(servers) log = logger.Logger.get_logger() log.info("10 seconds delay to wait for membase-server to start") time.sleep(10) BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase) ClusterOperationHelper.cleanup_cluster(servers) ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase)
def tearDown(self): #super(Upgrade_EpTests, self).tearDown() self.testcase = '2' if not "skip_cleanup" in TestInputSingleton.input.test_params: BucketOperationHelper.delete_all_buckets_or_assert( self.servers, self.testcase) ClusterOperationHelper.cleanup_cluster(self.servers) ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers, self.testcase)
def _deinitialize_api(self): self.log.info("Cleaning up replications and remote clusters") for server in self.servers: rest = RestConnection(server) rest.remove_all_replications() rest.remove_all_remote_clusters() self.log.info("Sleeping for 10 seconds after cleaning up replications and remote clusters") time.sleep(10) ClusterOperationHelper.cleanup_cluster(self.servers, master=self.master)
def common_tearDown(servers, testcase): RemoteUtilHelper.common_basic_setup(servers) log = logger.Logger.get_logger() log.info("10 seconds delay to wait for membase-server to start") time.sleep(10) BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase) ClusterOperationHelper.cleanup_cluster(servers) ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase)
def _deinitialize_api(self): self.log.info("Cleaning up replications and remote clusters") for server in self.servers: rest = RestConnection(server) rest.remove_all_replications() rest.remove_all_remote_clusters() self.log.info("Sleeping for 30 seconds after cleaning up replications and remote clusters") time.sleep(30) ClusterOperationHelper.cleanup_cluster(self.servers, master=self.master)
def reset(self): BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) for server in self.servers: ClusterOperationHelper.cleanup_cluster([server]) rest = RestConnection(server) if server.data_path: rest.set_data_path(data_path=server.data_path) self.log.info("Stopping load in Teardown") SwapRebalanceBase.stop_load(self.loaders) ClusterHelper.wait_for_ns_servers_or_assert(self.servers, self)
def common_setup(input, testcase): log.info("============== common_setup was started for test #{0} {1}=============="\ .format(testcase.case_number, testcase._testMethodName)) servers = input.servers RemoteUtilHelper.common_basic_setup(servers) BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase) ClusterOperationHelper.cleanup_cluster(servers) ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase) log.info("============== common_setup was finished for test #{0} {1} =============="\ .format(testcase.case_number, testcase._testMethodName))
def common_setup(input, testcase): log.info("============== common_setup was started for test #{0} {1}=============="\ .format(testcase.case_number, testcase._testMethodName)) servers = input.servers RemoteUtilHelper.common_basic_setup(servers) ClusterOperationHelper.cleanup_cluster(servers) ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase) BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase) log.info("============== common_setup was finished for test #{0} {1} =============="\ .format(testcase.case_number, testcase._testMethodName))
def _cluster_cleanup(self, bucket_util): rest = RestConnection(self.cluster.master) alerts = rest.get_alerts() if rest._rebalance_progress_status() == 'running': self.kill_memcached() self.log.warning("rebalancing is still running, test should be verified") stopped = rest.stop_rebalance() self.assertTrue(stopped, msg="unable to stop rebalance") bucket_util.delete_all_buckets(self.cluster.servers) ClusterOperationHelper.cleanup_cluster(self.cluster.servers, master=self.cluster.master) ClusterOperationHelper.wait_for_ns_servers_or_assert(self.cluster.servers, self)
def reset(self): rest = RestConnection(self.servers[0]) if rest._rebalance_progress_status() == 'running': self.log.warning("rebalancing is still running, test should be verified") stopped = rest.stop_rebalance() self.assertTrue(stopped, msg="unable to stop rebalance") BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) for server in self.servers: ClusterOperationHelper.cleanup_cluster([server]) self.log.info("Stopping load in Teardown") ClusterHelper.wait_for_ns_servers_or_assert(self.servers, self)