def tearDown(self): try: test_failed = len(self._resultForDoCleanups.errors) if self.driver and test_failed: BaseHelper(self).create_screenshot() if self.driver: self.driver.close() if test_failed and TestInputSingleton.input.param( "stop-on-failure", False): print "test fails, teardown will be skipped!!!" return rest = RestConnection(self.servers[0]) try: reb_status = rest._rebalance_progress_status() except ValueError as e: if e.message == 'No JSON object could be decoded': print "cluster not initialized!!!" return if reb_status == 'running': stopped = rest.stop_rebalance() self.assertTrue(stopped, msg="unable to stop rebalance") BucketOperationHelper.delete_all_buckets_or_assert( self.servers, self) for server in self.servers: ClusterOperationHelper.cleanup_cluster([server]) ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers, self) except Exception as e: raise e finally: if self.driver: self.shell.disconnect()
def tearDown(self): # super(Rebalance, self).tearDown() try: self.log.info("============== XDCRbasetests stats for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) self._end_replication_flag = 1 if hasattr(self, '_stats_thread1'): self._stats_thread1.join() if hasattr(self, '_stats_thread2'): self._stats_thread2.join() if hasattr(self, '_stats_thread3'): self._stats_thread3.join() if self._replication_direction_str in "bidirection": if hasattr(self, '_stats_thread4'): self._stats_thread4.join() if hasattr(self, '_stats_thread5'): self._stats_thread5.join() if hasattr(self, '_stats_thread6'): self._stats_thread6.join() if self._replication_direction_str in "bidirection": self.log.info("Type of run: BIDIRECTIONAL XDCR") else: self.log.info("Type of run: UNIDIRECTIONAL XDCR") self._print_stats(self.src_master) if self._replication_direction_str in "bidirection": self._print_stats(self.dest_master) self.log.info("============== = = = = = = = = END = = = = = = = = = = ==============") self.log.info("============== rebalanceXDCR cleanup was started for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) for nodes in [self.src_nodes, self.dest_nodes]: for node in nodes: BucketOperationHelper.delete_all_buckets_or_assert([node], self) ClusterOperationHelper.cleanup_cluster([node], self) ClusterOperationHelper.wait_for_ns_servers_or_assert([node], self) self.log.info("============== rebalanceXDCR cleanup was finished for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) finally: self.cluster.shutdown(force=True) self._log_finish(self)
def replication_while_rebooting_a_non_master_src_dest_node(self): bucket_type = self._input.param("bucket_type", "membase") if bucket_type == "ephemeral": self.log.info("Test case does not apply to ephemeral") return self.setup_xdcr_and_load() self.async_perform_update_delete() self.sleep(self._wait_timeout) reboot_node_dest = self.dest_cluster.reboot_one_node(self) NodeHelper.wait_node_restarted(reboot_node_dest, self, wait_time=self._wait_timeout * 4, wait_if_warmup=True) reboot_node_src = self.src_cluster.reboot_one_node(self) NodeHelper.wait_node_restarted(reboot_node_src, self, wait_time=self._wait_timeout * 4, wait_if_warmup=True) self.sleep(120) ClusterOperationHelper.wait_for_ns_servers_or_assert( [reboot_node_dest], self, wait_if_warmup=True) ClusterOperationHelper.wait_for_ns_servers_or_assert( [reboot_node_src], self, wait_if_warmup=True) self.verify_results()
def tearDown(self): try: self._cluster_helper.shutdown() log = logger.Logger.get_logger() log.info("============== tearDown was started for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) RemoteUtilHelper.common_basic_setup(self._servers) log.info("10 seconds delay to wait for membase-server to start") time.sleep(10) for server in self._cleanup_nodes: shell = RemoteMachineShellConnection(server) o, r = shell.execute_command("iptables -F") shell.log_command_output(o, r) o, r = shell.execute_command("/sbin/iptables -A INPUT -p tcp -i eth0 --dport 1000:60000 -j ACCEPT") shell.log_command_output(o, r) o, r = shell.execute_command("/sbin/iptables -A OUTPUT -p tcp -o eth0 --dport 1000:60000 -j ACCEPT") shell.log_command_output(o, r) o, r = shell.execute_command("/etc/init.d/couchbase-server start") shell.log_command_output(o, r) shell.disconnect() BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self) ClusterOperationHelper.cleanup_cluster(self._servers) ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self) log.info("============== tearDown was finished for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) finally: pass
def _cleanup_cluster(self): BucketOperationHelper.delete_all_buckets_or_assert([self.servers[0]], test_case=self) ClusterOperationHelper.cleanup_cluster(self.servers) ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers, self) BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
def tearDown(self): try: if self.driver: path_screen = self.input.ui_conf[ 'screenshots'] or 'logs/screens' full_path = '{1}/screen_{0}.png'.format( time.time(), path_screen) self.log.info('screenshot is available: %s' % full_path) if not os.path.exists(path_screen): os.mkdir(path_screen) self.driver.get_screenshot_as_file(os.path.abspath(full_path)) rest = RestConnection(self.servers[0]) if rest._rebalance_progress_status() == 'running': stopped = rest.stop_rebalance() self.assertTrue(stopped, msg="unable to stop rebalance") BucketOperationHelper.delete_all_buckets_or_assert( self.servers, self) for server in self.servers: ClusterOperationHelper.cleanup_cluster([server]) ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers, self) if self.driver: self.driver.close() finally: if self.driver: self.shell.disconnect() self.cluster.shutdown()
def common_setup(input, testcase): servers = input.servers RemoteUtilHelper.common_basic_setup(servers) BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase) for server in servers: ClusterOperationHelper.cleanup_cluster([server]) ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase)
def cleanup(self): rest = RestConnection(self.master) rest.stop_rebalance() BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) for server in self.servers: ClusterOperationHelper.cleanup_cluster([server]) ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
def test_prepared_with_warmup(self): try: num_srv_warm_up = self.input.param("srv_warm_up", 1) if self.input.tuq_client is None: self.fail("For this test external tuq server is requiered. " +\ "Please specify one in conf") self.test_union_all() for server in self.servers[self.nodes_init - num_srv_warm_up:self.nodes_init]: remote = RemoteMachineShellConnection(server) remote.stop_server() remote.start_server() remote.disconnect() #run query, result may not be as expected, but tuq shouldn't fail try: self.test_union_all() except: pass ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers, self, wait_if_warmup=True) self.verify_cluster_stats(self.servers[:self.nodes_init]) self.sleep(50) self.verify_cluster_stats(self.servers[:self.nodes_init]) self.log.info("-" * 100) self.log.info( "Querying alternate query node to test the encoded_prepare ...." ) self.test_prepared_union() self.log.info("-" * 100) finally: self.log.info("Done with encoded_prepare ....")
def replication_while_rebooting_a_non_master_destination_node(self): self._load_all_buckets(self.src_master, self.gen_create, "create", 0) self._load_all_buckets(self.dest_master, self.gen_create2, "create", 0) self._async_update_delete_data() self.sleep(self._timeout) i = len(self.dest_nodes) - 1 shell = RemoteMachineShellConnection(self.dest_nodes[i]) if shell.extract_remote_info().type.lower() == 'windows': o, r = shell.execute_command("shutdown -r -f -t 0") elif shell.extract_remote_info().type.lower() == 'linux': o, r = shell.execute_command("reboot") shell.log_command_output(o, r) i = len(self.src_nodes) - 1 shell = RemoteMachineShellConnection(self.src_nodes[i]) if shell.extract_remote_info().type.lower() == 'windows': o, r = shell.execute_command("shutdown -r -f -t 0") elif shell.extract_remote_info().type.lower() == 'linux': o, r = shell.execute_command("reboot") shell.log_command_output(o, r) self.merge_buckets(self.src_master, self.dest_master, bidirection=True) ClusterOperationHelper.wait_for_ns_servers_or_assert( [self.dest_nodes[i]], self, wait_if_warmup=True) self.verify_results(verify_src=True)
def test_warmup(self): index_field = self.input.param("index_field", 'name') indexes = [] try: indexes = self._create_multiple_indexes(index_field) num_srv_warm_up = self.input.param("srv_warm_up", 1) if self.input.tuq_client is None: self.fail("For this test external tuq server is requiered. " + \ "Please specify one in conf") self.test_union_all() for server in self.servers[self.nodes_init - num_srv_warm_up:self.nodes_init]: remote = RemoteMachineShellConnection(server) remote.stop_server() remote.start_server() remote.disconnect() #run query, result may not be as expected, but tuq shouldn't fail try: self.test_union_all() except: pass ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers, self, wait_if_warmup=True) self.verify_cluster_stats(self.servers[:self.nodes_init]) self.sleep(50) self.verify_cluster_stats(self.servers[:self.nodes_init]) self.test_union_all() finally: self._delete_multiple_indexes(indexes)
def test_full_eviction_changed_to_value_eviction(self): KEY_NAME = 'key1' gen_create = BlobGenerator('eviction', 'eviction-', self.value_size, end=self.num_items) gen_create2 = BlobGenerator('eviction2', 'eviction2-', self.value_size, end=self.num_items) self._load_all_buckets(self.master, gen_create, "create", 0) self._wait_for_stats_all_buckets(self.servers[:self.nodes_init]) self._verify_stats_all_buckets(self.servers[:self.nodes_init]) remote = RemoteMachineShellConnection(self.master) for bucket in self.buckets: output, _ = remote.execute_couchbase_cli(cli_command='bucket-edit', cluster_host="localhost", user=self.master.rest_username, password=self.master.rest_password, options='--bucket=%s --bucket-eviction-policy=valueOnly' % bucket.name) self.assertTrue(' '.join(output).find('SUCCESS') != -1, 'Eviction policy wasn\'t changed') ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers[:self.nodes_init], self, wait_time=self.wait_timeout, wait_if_warmup=True) self.sleep(10, 'Wait some time before next load') #self._load_all_buckets(self.master, gen_create2, "create", 0) #import pdb;pdb.set_trace() rest = RestConnection(self.master) client = VBucketAwareMemcached(rest, 'default') mcd = client.memcached(KEY_NAME) try: rc = mcd.set(KEY_NAME, 0,0, json.dumps({'value':'value2'})) self.fail('Bucket is incorrectly functional') except MemcachedError, e: pass # this is the exception we are hoping for
def test_warmup(self): index_field = self.input.param("index_field", 'name') indexes = [] try: indexes = self._create_multiple_indexes(index_field) num_srv_warm_up = self.input.param("srv_warm_up", 1) if self.input.tuq_client is None: self.fail("For this test external tuq server is requiered. " + \ "Please specify one in conf") self.test_union_all() for server in self.servers[self.nodes_init - num_srv_warm_up:self.nodes_init]: remote = RemoteMachineShellConnection(server) remote.stop_server() remote.start_server() remote.disconnect() #run query, result may not be as expected, but tuq shouldn't fail try: self.test_union_all() except: pass ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self) self.sleep(5) self.test_union_all() finally: self._delete_multiple_indexes(indexes)
def cleanup_cluster(self): if not "skip_cleanup" in TestInputSingleton.input.test_params: BucketOperationHelper.delete_all_buckets_or_assert( self.servers, self.testcase) ClusterOperationHelper.cleanup_cluster(self.servers) ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers, self.testcase)
def _reboot_server(self): try: for server in self.servers[:]: shell = RemoteMachineShellConnection(server) if shell.extract_remote_info().type.lower() == 'windows': o, r = shell.execute_command("shutdown -r -f -t 0") shell.log_command_output(o, r) shell.disconnect() self.log.info("Node {0} is being stopped".format(server.ip)) elif shell.extract_remote_info().type.lower() == 'linux': o, r = shell.execute_command("reboot") shell.log_command_output(o, r) shell.disconnect() self.log.info("Node {0} is being stopped".format(server.ip)) ClusterOperationHelper.wait_for_ns_servers_or_assert([server], self, wait_if_warmup=True) shell = RemoteMachineShellConnection(server) command = "/sbin/iptables -F" o, r = shell.execute_command(command) shell.log_command_output(o, r) shell.disconnect() self.log.info("Node {0} backup".format(server.ip)) finally: self.log.info("Warming-up servers ..") ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self, wait_if_warmup=True)
def reboot_server(server, test_case, wait_timeout=120): """Reboot a server and wait for couchbase server to run. @param server: server object, which needs to be rebooted. @param test_case: test case object, since it has assert() function which is used by wait_for_ns_servers_or_assert to throw assertion. @param wait_timeout: timeout to whole reboot operation. """ # self.log.info("Rebooting server '{0}'....".format(server.ip)) shell = RemoteMachineShellConnection(server) if shell.extract_remote_info().type.lower() == OS.WINDOWS: o, r = shell.execute_command("{0} -r -f -t 0".format( COMMAND.SHUTDOWN)) elif shell.extract_remote_info().type.lower() == OS.LINUX: o, r = shell.execute_command(COMMAND.REBOOT) shell.log_command_output(o, r) # wait for restart and warmup on all server if shell.extract_remote_info().type.lower() == OS.WINDOWS: time.sleep(wait_timeout * 5) else: time.sleep(wait_timeout / 6) while True: try: # disable firewall on these nodes NodeHelper.disable_firewall(server) break except BaseException: print "Node not reachable yet, will try after 10 secs" time.sleep(10) # wait till server is ready after warmup ClusterOperationHelper.wait_for_ns_servers_or_assert( [server], test_case, wait_if_warmup=True)
def common_setup(input, testcase): servers = input.servers RemoteUtilHelper.common_basic_setup(servers) BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase) for server in servers: ClusterOperationHelper.cleanup_cluster([server]) ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase)
def tearDown(self): try: self._cluster_helper.shutdown() log = logger.Logger.get_logger() log.info("============== tearDown was started for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) RemoteUtilHelper.common_basic_setup(self._servers) log.info("10 seconds delay to wait for membase-server to start") time.sleep(10) for server in self._cleanup_nodes: shell = RemoteMachineShellConnection(server) o, r = shell.execute_command("iptables -F") shell.log_command_output(o, r) o, r = shell.execute_command( "/sbin/iptables -A INPUT -p tcp -i eth0 --dport 1000:60000 -j ACCEPT" ) shell.log_command_output(o, r) o, r = shell.execute_command( "/sbin/iptables -A OUTPUT -p tcp -o eth0 --dport 1000:60000 -j ACCEPT" ) shell.log_command_output(o, r) o, r = shell.execute_command( "/etc/init.d/couchbase-server start") shell.log_command_output(o, r) shell.disconnect() BucketOperationHelper.delete_all_buckets_or_assert( self._servers, self) ClusterOperationHelper.cleanup_cluster(self._servers) ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self) log.info("============== tearDown was finished for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) finally: pass
def common_tearDown(servers, testcase): log = logger.Logger.get_logger() log.info( "============== common_tearDown was started for test #{0} {1} ==============".format( testcase.case_number, testcase._testMethodName ) ) RemoteUtilHelper.common_basic_setup(servers) log.info("10 seconds delay to wait for couchbase-server to start") time.sleep(10) ClusterOperationHelper.wait_for_ns_servers_or_assert( servers, testcase, wait_time=AutoFailoverBaseTest.MAX_FAIL_DETECT_TIME * 15, wait_if_warmup=True ) try: rest = RestConnection(self._servers[0]) buckets = rest.get_buckets() for bucket in buckets: MemcachedClientHelper.flush_bucket(servers[0], bucket.name) except Exception: pass BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase) ClusterOperationHelper.cleanup_cluster(servers) log.info( "============== common_tearDown was finished for test #{0} {1} ==============".format( testcase.case_number, testcase._testMethodName ) )
def cleanup_cluster(self): if not "skip_cleanup" in TestInputSingleton.input.test_params: BucketOperationHelper.delete_all_buckets_or_assert( self.servers, self.testcase) ClusterOperationHelper.cleanup_cluster(self.servers) ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers, self.testcase)
def offline_cluster_upgrade_and_reboot(self): self._install(self.servers[:self.nodes_init]) self.operations(self.servers[:self.nodes_init]) if self.ddocs_num: self.create_ddocs_and_views() if self.during_ops: for opn in self.during_ops: getattr(self, opn)() num_stoped_nodes = self.input.param('num_stoped_nodes', self.nodes_init) stoped_nodes = self.servers[self.nodes_init - num_stoped_nodes :self.nodes_init] self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for upgrade") for upgrade_version in self.upgrade_versions: for server in stoped_nodes: remote = RemoteMachineShellConnection(server) remote.stop_server() remote.disconnect() self.sleep(self.sleep_time) upgrade_threads = self._async_update(upgrade_version, stoped_nodes) for upgrade_thread in upgrade_threads: upgrade_thread.join() success_upgrade = True while not self.queue.empty(): success_upgrade &= self.queue.get() if not success_upgrade: self.fail("Upgrade failed!") for server in stoped_nodes: remote = RemoteMachineShellConnection(server) remote.stop_server() self.sleep(5) remote.start_couchbase() remote.disconnect() ClusterOperationHelper.wait_for_ns_servers_or_assert(stoped_nodes, self) self.verification(self.servers[:self.nodes_init])
def tearDown(self): try: test_failed = len(self._resultForDoCleanups.errors) if self.driver and test_failed: BaseHelper(self).create_screenshot() if self.driver: self.driver.close() if test_failed and TestInputSingleton.input.param("stop-on-failure", False): print "test fails, teardown will be skipped!!!" return rest = RestConnection(self.servers[0]) try: reb_status = rest._rebalance_progress_status() except ValueError as e: if e.message == 'No JSON object could be decoded': print "cluster not initialized!!!" return if reb_status == 'running': stopped = rest.stop_rebalance() self.assertTrue(stopped, msg="unable to stop rebalance") BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) for server in self.servers: ClusterOperationHelper.cleanup_cluster([server]) ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self) except Exception as e: raise e finally: if self.driver: self.shell.disconnect()
def setUp(self): self.log = logger.Logger.get_logger() self.input = TestInputSingleton.input self.cluster = Cluster() self.servers = self.input.servers self.buckets = {} self.default_bucket = self.input.param("default_bucket", True) self.standard_buckets = self.input.param("standard_buckets", 0) self.sasl_buckets = self.input.param("sasl_buckets", 0) self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets self.num_servers = self.input.param("servers", len(self.servers)) self.num_replicas = self.input.param("replicas", 1) self.num_items = self.input.param("items", 1000) self.dgm_run = self.input.param("dgm_run", False) if not self.input.param("skip_cleanup", False): BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) for server in self.servers: ClusterOperationHelper.cleanup_cluster([server]) ClusterOperationHelper.wait_for_ns_servers_or_assert([self.servers[0]], self) self.quota = self._initialize_nodes(self.cluster, self.servers) if self.dgm_run: self.quota = 256 self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets) if self.default_bucket: self.cluster.create_default_bucket(self.servers[0], self.bucket_size, self.num_replicas) self.buckets['default'] = {1 : KVStore()} self._create_sasl_buckets(self.servers[0], self.sasl_buckets)
def tearDown(self): try: if (hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \ and TestInputSingleton.input.param("stop-on-failure", False))\ or self.input.param("skip_cleanup", False): self.log.warn("CLEANUP WAS SKIPPED") else: self.log.info("============== basetestcase cleanup was started for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) rest = RestConnection(self.master) alerts = rest.get_alerts() if alerts is not None and len(alerts) != 0: self.log.warn("Alerts were found: {0}".format(alerts)) if rest._rebalance_progress_status() == 'running': self.log.warning("rebalancing is still running, test should be verified") stopped = rest.stop_rebalance() self.assertTrue(stopped, msg="unable to stop rebalance") BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) ClusterOperationHelper.cleanup_cluster(self.servers) self.sleep(10) ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self) self.log.info("============== basetestcase cleanup was finished for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) except BaseException: # increase case_number to retry tearDown in setup for the next test self.case_number += 1000 finally: # stop all existing task manager threads self.cluster.shutdown() self._log_finish(self)
def offline_cluster_upgrade_and_rebalance(self): num_stoped_nodes = self.input.param('num_stoped_nodes', self.nodes_init) stoped_nodes = self.servers[self.nodes_init - num_stoped_nodes :self.nodes_init] servs_out = self.servers[self.nodes_init - num_stoped_nodes - self.nodes_out :self.nodes_init - num_stoped_nodes] servs_in = self.servers[self.nodes_init:self.nodes_init + self.nodes_in] self._install(self.servers) self.operations(self.servers[:self.nodes_init]) if self.ddocs_num: self.create_ddocs_and_views() if self.during_ops: for opn in self.during_ops: getattr(self, opn)() for upgrade_version in self.upgrade_versions: self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for upgrade to {0} version".\ format(upgrade_version)) for server in stoped_nodes: remote = RemoteMachineShellConnection(server) remote.stop_server() remote.disconnect() upgrade_threads = self._async_update(upgrade_version, stoped_nodes) try: self.cluster.rebalance(self.servers[:self.nodes_init], servs_in, servs_out) except RebalanceFailedException: self.log.info("rebalance failed as expected") for upgrade_thread in upgrade_threads: upgrade_thread.join() success_upgrade = True while not self.queue.empty(): success_upgrade &= self.queue.get() if not success_upgrade: self.fail("Upgrade failed!") ClusterOperationHelper.wait_for_ns_servers_or_assert(stoped_nodes, self) self.cluster.rebalance(self.servers[:self.nodes_init], [], servs_out) self.dcp_rebalance_in_offline_upgrade_from_version2_to_version3() self.verification(list(set(self.servers[:self.nodes_init] + servs_in) - set(servs_out)))
def reset(self): self.log.info( "============== SwapRebalanceBase cleanup was started for test #{0} {1} ==============".format( self.case_number, self._testMethodName ) ) self.log.info("Stopping load in Teardown") SwapRebalanceBase.stop_load(self.loaders) for server in self.servers: rest = RestConnection(server) if rest._rebalance_progress_status() == "running": self.log.warning("rebalancing is still running, test should be verified") stopped = rest.stop_rebalance() self.assertTrue(stopped, msg="unable to stop rebalance") BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) for server in self.servers: ClusterOperationHelper.cleanup_cluster([server]) if server.data_path: rest = RestConnection(server) rest.set_data_path(data_path=server.data_path) ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self) self.log.info( "============== SwapRebalanceBase cleanup was finished for test #{0} {1} ==============".format( self.case_number, self._testMethodName ) )
def tearDown(self): # super(Rebalance, self).tearDown() try: self.log.info("============== XDCRbasetests stats for test #{0} {1} =============="\ .format(self._case_number, self._testMethodName)) self._end_replication_flag = 1 if hasattr(self, '_stats_thread1'): self._stats_thread1.join() if hasattr(self, '_stats_thread2'): self._stats_thread2.join() if hasattr(self, '_stats_thread3'): self._stats_thread3.join() if self._replication_direction_str in "bidirection": if hasattr(self, '_stats_thread4'): self._stats_thread4.join() if hasattr(self, '_stats_thread5'): self._stats_thread5.join() if hasattr(self, '_stats_thread6'): self._stats_thread6.join() if self._replication_direction_str in "bidirection": self.log.info("Type of run: BIDIRECTIONAL XDCR") else: self.log.info("Type of run: UNIDIRECTIONAL XDCR") self._print_stats(self.src_master) if self._replication_direction_str in "bidirection": self._print_stats(self.dest_master) self.log.info("============== = = = = = = = = END = = = = = = = = = = ==============") self.log.info("============== rebalanceXDCR cleanup was started for test #{0} {1} =============="\ .format(self._case_number, self._testMethodName)) for nodes in [self.src_nodes, self.dest_nodes]: for node in nodes: BucketOperationHelper.delete_all_buckets_or_assert([node], self) ClusterOperationHelper.cleanup_cluster([node], self) ClusterOperationHelper.wait_for_ns_servers_or_assert([node], self) self.log.info("============== rebalanceXDCR cleanup was finished for test #{0} {1} =============="\ .format(self._case_number, self._testMethodName)) finally: self.cluster.shutdown() self._log_finish(self)
def wait_node_restarted(self, server, wait_time=120, wait_if_warmup=False, check_service=False): now = time.time() if check_service: self.wait_service_started(server, wait_time) wait_time = now + wait_time - time.time() num = 0 while num < wait_time / 10: try: ClusterOperationHelper.wait_for_ns_servers_or_assert( [server], self, wait_time=wait_time - num * 10, wait_if_warmup=wait_if_warmup) break except BaseException, e: if e.message.find( 'couchApiBase doesn') != -1 or e.message.find( 'unable to reach') != -1: num += 1 self.sleep(10) else: raise e
def common_setup(input, testcase, bucket_ram_ratio=(2.8 / 3.0), replica=0): log = logger.Logger.get_logger() servers = input.servers BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase) ClusterOperationHelper.cleanup_cluster(servers) ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase) serverInfo = servers[0] log.info('picking server : {0} as the master'.format(serverInfo)) #if all nodes are on the same machine let's have the bucket_ram_ratio as bucket_ram_ratio * 1/len(servers) node_ram_ratio = BucketOperationHelper.base_bucket_ratio(servers) rest = RestConnection(serverInfo) info = rest.get_nodes_self() rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password) rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio)) if "ascii" in TestInputSingleton.input.test_params\ and TestInputSingleton.input.test_params["ascii"].lower() == "true": BucketOperationHelper.create_multiple_buckets(serverInfo, replica, node_ram_ratio * bucket_ram_ratio, howmany=1, sasl=False) else: BucketOperationHelper.create_multiple_buckets(serverInfo, replica, node_ram_ratio * bucket_ram_ratio, howmany=1, sasl=True) buckets = rest.get_buckets() for bucket in buckets: ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket.name) testcase.assertTrue(ready, "wait_for_memcached failed")
def common_setup(input, testcase): log.info("============== common_setup was started for test #{0} {1}==============" \ .format(testcase.case_number, testcase._testMethodName)) servers = input.servers RemoteUtilHelper.common_basic_setup(servers) BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase) ClusterOperationHelper.cleanup_cluster(servers) ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase) # Add built-in user testuser = [{ 'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': '******' }] RbacBase().create_user_source(testuser, 'builtin', servers[0]) # Assign user to role role_list = [{ 'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin' }] RbacBase().add_user_role(role_list, RestConnection(servers[0]), 'builtin') log.info("============== common_setup was finished for test #{0} {1} ==============" \ .format(testcase.case_number, testcase._testMethodName))
def test_reproducer_MB_11698(self): gen_create = BlobGenerator('eviction', 'eviction-', self.value_size, end=self.num_items) gen_create2 = BlobGenerator('eviction2', 'eviction2-', self.value_size, end=self.num_items) self._load_all_buckets(self.master, gen_create, "create", 0) self._wait_for_stats_all_buckets(self.servers[:self.nodes_init]) self._verify_stats_all_buckets(self.servers[:self.nodes_init]) remote = RemoteMachineShellConnection(self.master) for bucket in self.buckets: output, _ = remote.execute_couchbase_cli( cli_command='bucket-edit', cluster_host="localhost:8091", user=self.master.rest_username, password=self.master.rest_password, options='--bucket=%s --bucket-eviction-policy=valueOnly' % bucket.name) self.assertTrue(' '.join(output).find('SUCCESS') != -1, 'Eviction policy wasn\'t changed') ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers[:self.nodes_init], self, wait_time=self.wait_timeout, wait_if_warmup=True) self.sleep(10, 'Wait some time before next load') self._load_all_buckets(self.master, gen_create2, "create", 0) self._wait_for_stats_all_buckets(self.servers[:self.nodes_init], timeout=self.wait_timeout * 5) self._verify_stats_all_buckets(self.servers[:self.nodes_init])
def test_full_eviction_changed_to_value_eviction(self): KEY_NAME = 'key1' gen_create = BlobGenerator('eviction', 'eviction-', self.value_size, end=self.num_items) gen_create2 = BlobGenerator('eviction2', 'eviction2-', self.value_size, end=self.num_items) self._load_all_buckets(self.master, gen_create, "create", 0) self._wait_for_stats_all_buckets(self.servers[:self.nodes_init]) self._verify_stats_all_buckets(self.servers[:self.nodes_init]) remote = RemoteMachineShellConnection(self.master) for bucket in self.buckets: output, _ = remote.execute_couchbase_cli(cli_command='bucket-edit', cluster_host="localhost:8091", user=self.master.rest_username, password=self.master.rest_password, options='--bucket=%s --bucket-eviction-policy=valueOnly' % bucket.name) self.assertTrue(' '.join(output).find('SUCCESS') != -1, 'Eviction policy wasn\'t changed') ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers[:self.nodes_init], self, wait_time=self.wait_timeout, wait_if_warmup=True) self.sleep(10, 'Wait some time before next load') # self._load_all_buckets(self.master, gen_create2, "create", 0) rest = RestConnection(self.master) client = VBucketAwareMemcached(rest, 'default') mcd = client.memcached(KEY_NAME) try: rc = mcd.set(KEY_NAME, 0, 0, json.dumps({'value': 'value2'})) self.fail('Bucket is incorrectly functional') except MemcachedError as e: pass # this is the exception we are hoping for
def tearDown(self): try: if self.driver: path_screen = self.input.ui_conf['screenshots'] or 'logs/screens' full_path = '{1}/screen_{0}.png'.format(time.time(), path_screen) self.log.info('screenshot is available: %s' % full_path) if not os.path.exists(path_screen): os.mkdir(path_screen) self.driver.get_screenshot_as_file(os.path.abspath(full_path)) rest = RestConnection(self.servers[0]) if rest._rebalance_progress_status() == 'running': stopped = rest.stop_rebalance() self.assertTrue(stopped, msg="unable to stop rebalance") BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) for server in self.servers: ClusterOperationHelper.cleanup_cluster([server]) ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self) if self.driver: self.driver.close() except Exception as e: raise e finally: if self.driver: self.shell.disconnect() self.cluster.shutdown()
def test_prepared_with_warmup(self): try: num_srv_warm_up = self.input.param("srv_warm_up", 1) if self.input.tuq_client is None: self.fail("For this test external tuq server is requiered. " +\ "Please specify one in conf") self.test_union_all() for server in self.servers[self.nodes_init - num_srv_warm_up:self.nodes_init]: remote = RemoteMachineShellConnection(server) remote.stop_server() remote.start_server() remote.disconnect() #run query, result may not be as expected, but tuq shouldn't fail try: self.test_union_all() except: pass ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self, wait_if_warmup=True) self.verify_cluster_stats(self.servers[:self.nodes_init]) self.sleep(50) self.verify_cluster_stats(self.servers[:self.nodes_init]) self.log.info("-"*100) self.log.info("Querying alternate query node to test the encoded_prepare ....") self.test_prepared_union() self.log.info("-"*100) finally: self.log.info("Done with encoded_prepare ....")
def tearDown(self): try: if (hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \ and TestInputSingleton.input.param("stop-on-failure", False))\ or self.input.param("skip_cleanup", False): self.log.warn("CLEANUP WAS SKIPPED") else: self.log.info("============== basetestcase cleanup was started for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) rest = RestConnection(self.master) alerts = rest.get_alerts() if alerts is not None and len(alerts) != 0: self.log.warn("Alerts were found: {0}".format(alerts)) if rest._rebalance_progress_status() == 'running': self.log.warning( "rebalancing is still running, test should be verified" ) stopped = rest.stop_rebalance() self.assertTrue(stopped, msg="unable to stop rebalance") BucketOperationHelper.delete_all_buckets_or_assert( self.servers, self) ClusterOperationHelper.cleanup_cluster(self.servers) time.sleep(10) ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers, self) self.log.info("============== basetestcase cleanup was finished for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) finally: #stop all existing task manager threads self.cluster.shutdown() self._log_finish(self)
def replication_while_rebooting_a_non_master_destination_node(self): self._load_all_buckets(self.src_master, self.gen_create, "create", 0) self._load_all_buckets(self.dest_master, self.gen_create2, "create", 0) self._async_update_delete_data() self.sleep(self._timeout) reboot_node_dest = self.dest_nodes[len(self.dest_nodes) - 1] shell = RemoteMachineShellConnection(reboot_node_dest) if shell.extract_remote_info().type.lower() == 'windows': o, r = shell.execute_command("shutdown -r -f -t 0") elif shell.extract_remote_info().type.lower() == 'linux': o, r = shell.execute_command("reboot") shell.log_command_output(o, r) reboot_node_src = self.src_nodes[len(self.src_nodes) - 1] shell = RemoteMachineShellConnection(reboot_node_src) if shell.extract_remote_info().type.lower() == 'windows': o, r = shell.execute_command("shutdown -r -f -t 0") elif shell.extract_remote_info().type.lower() == 'linux': o, r = shell.execute_command("reboot") shell.log_command_output(o, r) self.sleep(360) ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_dest], self, wait_if_warmup=True) ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_src], self, wait_if_warmup=True) self.merge_buckets(self.src_master, self.dest_master, bidirection=True) self.verify_results(verify_src=True)
def test_delete_create_bucket_and_query(self): #Initialization operation self.run_multi_operations( buckets=self.buckets, query_definitions=self.query_definitions, create_index=True, drop_index=False, query_with_explain=self.run_query_with_explain, query=self.run_query) #Remove bucket and recreate it for bucket in self.buckets: self.rest.delete_bucket(bucket.name) self.sleep(2) #Flush bucket and recreate it self._bucket_creation() self.sleep(2) ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers, self) #Verify the result set is empty self.verify_index_absence(query_definitions=self.query_definitions, buckets=self.buckets) index_map = self.get_index_stats() self.assertTrue( len(index_map) == 0, "Index Stats still show {0}".format(index_map))
def test_warm_up_with_eviction(self): gen_create = BlobGenerator('eviction', 'eviction-', self.value_size, end=self.num_items) gen_create2 = BlobGenerator('eviction2', 'eviction2-', self.value_size, end=self.num_items) self._load_all_buckets(self.master, gen_create, "create", 0) self._wait_for_stats_all_buckets(self.servers[:self.nodes_init]) self._verify_stats_all_buckets(self.servers[:self.nodes_init]) self.timeout = self.wait_timeout self.without_access_log = False self._stats_befor_warmup(self.buckets[0]) self._restart_memcache(self.buckets[0]) ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers[:self.nodes_init], self, wait_time=self.wait_timeout, wait_if_warmup=True) self.sleep(10, 'Wait some time before next load') self._load_all_buckets(self.master, gen_create2, "create", 0) self._wait_for_stats_all_buckets(self.servers[:self.nodes_init], timeout=self.wait_timeout * 5) self._verify_stats_all_buckets(self.servers[:self.nodes_init])
def common_tearDown(self): if self.load_started: self.log.info("Stopping load in Teardown") SwapRebalanceBase.stop_load(self.loaders) BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) for server in self.servers: ClusterOperationHelper.cleanup_cluster([server]) ClusterHelper.wait_for_ns_servers_or_assert(self.servers, self)
def cleanup(self): rest = RestConnection(self.master) rest.stop_rebalance() BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) for server in self.servers: ClusterOperationHelper.cleanup_cluster([server]) ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers, self)
def _restart_server(self, servers): for server in servers: shell = RemoteMachineShellConnection(server) shell.stop_couchbase() time.sleep(10) shell.start_couchbase() shell.disconnect() ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, self, wait_if_warmup=True)
def _common_clenup(self): rest = RestConnection(self.servers[0]) if rest._rebalance_progress_status() == 'running': stopped = rest.stop_rebalance() self.assertTrue(stopped, msg="unable to stop rebalance") BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) ClusterOperationHelper.cleanup_cluster(self.servers) ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
def test_warmup(self): for server in self.nodes_out_list: remote = RemoteMachineShellConnection(server) remote.stop_server() remote.start_server() remote.disconnect() self.test_mutation_operations() ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self) self.run_verification(self.buckets[0], self.kv_store)
def cleanup_cluster(self): if not "skip_cleanup" in TestInputSingleton.input.test_params: # Cleanup all indexes that were create with this helper class for name in self._indexes: self.rest.delete_spatial(self.bucket, name) self.log.info("deleted spatial {0} from bucket {1}".format(name, self.bucket)) BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self.testcase) ClusterOperationHelper.cleanup_cluster(self.servers) ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self.testcase)
def tearDown(self): #super(Upgrade_EpTests, self).tearDown() self.testcase = '2' if not "skip_cleanup" in TestInputSingleton.input.test_params: BucketOperationHelper.delete_all_buckets_or_assert( self.servers, self.testcase) ClusterOperationHelper.cleanup_cluster(self.servers) ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers, self.testcase)
def common_tearDown(servers, testcase): RemoteUtilHelper.common_basic_setup(servers) log = logger.Logger.get_logger() log.info("10 seconds delay to wait for membase-server to start") time.sleep(10) BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase) ClusterOperationHelper.cleanup_cluster(servers) ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase)
def _recover_from_error(self, error_condition): if error_condition == "stop_server" or error_condition == "kill_erlang": self.start_server(self.servers[1]) elif error_condition == "enable_firewall": self.stop_firewall_on_node(self.servers[1]) elif error_condition == "reboot_server": # wait till node is ready after warmup ClusterOperationHelper.wait_for_ns_servers_or_assert( [self.servers[1]], self, wait_if_warmup=True)
def common_tearDown(servers, testcase): RemoteUtilHelper.common_basic_setup(servers) log = logger.Logger.get_logger() log.info("10 seconds delay to wait for membase-server to start") time.sleep(10) BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase) ClusterOperationHelper.cleanup_cluster(servers) ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase)
def tearDown(self): super(BucketConfig, self).tearDown() return if not "skip_cleanup" in TestInputSingleton.input.test_params: BucketOperationHelper.delete_all_buckets_or_assert( self.servers, self.testcase) ClusterOperationHelper.cleanup_cluster(self.servers) ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers, self.testcase)
def tearDown(self): #super(Upgrade_EpTests, self).tearDown() self.testcase = '2' if not "skip_cleanup" in TestInputSingleton.input.test_params: BucketOperationHelper.delete_all_buckets_or_assert( self.servers, self.testcase) ClusterOperationHelper.cleanup_cluster(self.servers) ClusterOperationHelper.wait_for_ns_servers_or_assert( self.servers, self.testcase)
def reset(self): BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) for server in self.servers: ClusterOperationHelper.cleanup_cluster([server]) rest = RestConnection(server) if server.data_path: rest.set_data_path(data_path=server.data_path) self.log.info("Stopping load in Teardown") SwapRebalanceBase.stop_load(self.loaders) ClusterHelper.wait_for_ns_servers_or_assert(self.servers, self)
def common_setup(input, testcase): log.info("============== common_setup was started for test #{0} {1}=============="\ .format(testcase.case_number, testcase._testMethodName)) servers = input.servers RemoteUtilHelper.common_basic_setup(servers) BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase) ClusterOperationHelper.cleanup_cluster(servers) ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase) log.info("============== common_setup was finished for test #{0} {1} =============="\ .format(testcase.case_number, testcase._testMethodName))
def common_setup(input, testcase): log.info("============== common_setup was started for test #{0} {1}=============="\ .format(testcase.case_number, testcase._testMethodName)) servers = input.servers RemoteUtilHelper.common_basic_setup(servers) ClusterOperationHelper.cleanup_cluster(servers) ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase) BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase) log.info("============== common_setup was finished for test #{0} {1} =============="\ .format(testcase.case_number, testcase._testMethodName))
def tearDown(self): try: if hasattr(self, 'skip_buckets_handle') and self.skip_buckets_handle: return test_failed = (hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures or self._resultForDoCleanups.errors)) \ or (hasattr(self, '_exc_info') and self._exc_info()[1] is not None) if test_failed and TestInputSingleton.input.param("stop-on-failure", False)\ or self.input.param("skip_cleanup", False): self.log.warn("CLEANUP WAS SKIPPED") else: if test_failed and TestInputSingleton.input.param('get_trace', None): for server in self.servers: try: shell = RemoteMachineShellConnection(server) output, _ = shell.execute_command("ps -aef|grep %s" % TestInputSingleton.input.param('get_trace', None)) output = shell.execute_command("pstack %s" % output[0].split()[1].strip()) print output[0] except: pass if test_failed and self.input.param('BUGS', False): self.log.warn("Test failed. Possible reason is: {0}".format(self.input.param('BUGS', False))) self.log.info("============== basetestcase cleanup was started for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) rest = RestConnection(self.master) alerts = rest.get_alerts() if alerts is not None and len(alerts) != 0: self.log.warn("Alerts were found: {0}".format(alerts)) if rest._rebalance_progress_status() == 'running': self.log.warning("rebalancing is still running, test should be verified") stopped = rest.stop_rebalance() self.assertTrue(stopped, msg="unable to stop rebalance") BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) if self.input.param("forceEject", False): for server in self.servers: if server != self.servers[0]: try: rest = RestConnection(server) rest.force_eject_node() except BaseException, e: self.log.error(e) ClusterOperationHelper.cleanup_cluster(self.servers) self.sleep(10) ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self) self.log.info("============== basetestcase cleanup was finished for test #{0} {1} =============="\ .format(self.case_number, self._testMethodName)) except BaseException: # increase case_number to retry tearDown in setup for the next test self.case_number += 1000 finally: # stop all existing task manager threads self.cluster.shutdown(force=True) self._log_finish(self)
def reset(self): rest = RestConnection(self.servers[0]) if rest._rebalance_progress_status() == 'running': self.log.warning("rebalancing is still running, test should be verified") stopped = rest.stop_rebalance() self.assertTrue(stopped, msg="unable to stop rebalance") BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self) for server in self.servers: ClusterOperationHelper.cleanup_cluster([server]) self.log.info("Stopping load in Teardown") ClusterHelper.wait_for_ns_servers_or_assert(self.servers, self)
def _verify_data(self, server): query = {"stale" : "false", "full_set" : "true"} self.sleep(60, "Node {0} should be warming up".format(server.ip)) ClusterOperationHelper.wait_for_ns_servers_or_assert([server], self, wait_if_warmup=True) self._wait_for_stats_all_buckets(self.servers) for bucket, ddoc_view_map in self.bucket_ddoc_map.items(): for ddoc_name, view_list in ddoc_view_map.items(): for view in view_list: self.cluster.query_view(self.master, ddoc_name, view.name, query) self._verify_ddoc_ops_all_buckets() self._verify_ddoc_data_all_buckets()
def perform_warm_up(self): warmup_nodes = self.servers[-self.warmup_nodes:] for warmup_node in warmup_nodes: shell = RemoteMachineShellConnection(warmup_node) shell.stop_couchbase() shell.disconnect() self.sleep(20) for warmup_node in warmup_nodes: shell = RemoteMachineShellConnection(warmup_node) shell.start_couchbase() shell.disconnect() ClusterOperationHelper.wait_for_ns_servers_or_assert(warmup_nodes, self)