def test_failover_console(self, test_case, timeout=_TIMEOUT): """Tests failover console. test_case[in] Test case. """ server = test_case[0] cmd = test_case[1] kill_console = test_case[2] log_filename = test_case[3] comment = test_case[4] key_phrase = test_case[5] unregister = test_case[6] server_version = server.get_version() if unregister: # Unregister any failover instance from server try: server.exec_query("DROP TABLE IF EXISTS " "mysql.failover_console") except UtilError: pass # Since this test case expects the console to stop, we can launch it # via a subprocess and wait for it to finish. if self.debug: print(comment) print("# COMMAND: {0}".format(cmd)) # Cleanup in case previous test case failed if os.path.exists(self.failover_dir): try: os.system("rmdir {0}".format(self.failover_dir)) except OSError: pass # Launch the console in stealth mode proc, f_out = self.start_process(cmd) # Wait for console to load if self.debug: print("# Waiting for console to start.") i = 1 time.sleep(1) while proc.poll() is not None: time.sleep(1) i += 1 if i > timeout: if self.debug: print("# Timeout console to start.") raise MUTLibError("{0}: failed - timeout waiting for " "console to start.".format(comment)) # Wait for the failover console to register on master and start # its monitoring process phrase = "Failover console started" if self.debug: print("Waiting for failover console to register master and start " "its monitoring process") # Wait because of the warning message that may appear due to # mixing hostnames and IP addresses time.sleep(WARNING_SLEEP_TIME + 1) i = 0 # Wait for logfile file to be created if self.debug: print("# Waiting for logfile to be created.") for i in range(timeout): if os.path.exists(log_filename): break else: time.sleep(1) else: raise MUTLibError("{0}: failed - timeout waiting for " "logfile '{1}' to be " "created.".format(comment, log_filename)) with open(log_filename, 'r') as file_: while i < timeout: line = file_.readline() if not line: i += 1 time.sleep(1) elif phrase in line: break else: if self.debug: print("# Timeout waiting for failover console to register " "master and start its monitoring process") raise MUTLibError("{0}: failed - timeout waiting for console " "to register master and start its " "monitoring process".format(comment)) # Now, kill the master - wha-ha-ha! res = server.show_server_variable('pid_file') pid_file = open(res[0][1]) pid = int(pid_file.readline().strip('\n')) if self.debug: print("# Terminating server {0} via pid = {1}".format(server.port, pid)) pid_file.close() # Get server datadir to clean directory after kill. res = server.show_server_variable("datadir") datadir = res[0][1] # Stop the server server.disconnect() self.kill(pid) # Need to wait until the process is really dead. if self.debug: print("# Waiting for master to stop.") i = 0 while self.is_process_alive(pid, int(server.port) - 1, int(server.port) + 1): time.sleep(1) i += 1 if i > timeout: if self.debug: print("# Timeout master to fail.") raise MUTLibError("{0}: failed - timeout waiting for " "master to end.".format(comment)) # Remove server from the list (and clean data directory). if self.debug: print("# Removing server name '{0}'.".format(server.role)) delete_directory(datadir) self.servers.remove_server(server.role) # Now wait for interval to occur. if self.debug: print("# Waiting for failover to complete.") i = 0 while not os.path.isdir(self.failover_dir): time.sleep(5) i += 1 if i > timeout: if self.debug: print("# Timeout console failover.") raise MUTLibError("{0}: failed - timeout waiting for " "exec_post_fail.".format(comment)) # Need to poll here and wait for console to really end. ret_val = self.stop_process(proc, f_out, kill_console) # Wait for console to end if self.debug: print("# Waiting for console to end.") i = 0 while proc.poll() is None: time.sleep(1) i += 1 if i > timeout: if self.debug: print("# Timeout console to end.") raise MUTLibError("{0}: failed - timeout waiting for " "console to end.".format(comment)) if self.debug: print("# Return code from console termination = " "{0}".format(ret_val)) # Check result code from stop_process then read the log to find the # key phrase. found_row = False log_file = open(log_filename) rows = log_file.readlines() if self.debug: print("# Looking in log for: {0}".format(key_phrase)) for row in rows: if key_phrase in row: found_row = True if self.debug: print("# Found in row = '{0}'.".format(row[:len(row) - 1])) # Find MySQL Utilities version in the log if self.debug: print("# Looking in log for: {0}" "".format(_UTILITIES_VERSION_PHRASE)) for row in rows: if _UTILITIES_VERSION_PHRASE in row: found_row = True if self.debug: print("# Found in row = '{0}'.".format(row[:-1])) break # Find MySQL server version in the log host_port = "{host}:{port}".format(**get_connection_dictionary(server)) key_phrase = MSG_MYSQL_VERSION.format(server=host_port, version=server_version) if self.debug: print("# Looking in log for: {0}".format(key_phrase)) for row in rows: if key_phrase in row: found_row = True if self.debug: print("# Found in row = '{0}'.".format(row[:-1])) break log_file.close() if not found_row: print("# ERROR: Cannot find entry in log:") for row in rows: print row, # Cleanup after test case try: os.unlink(log_filename) except OSError: pass if os.path.exists(self.failover_dir): try: os.system("rmdir {0}".format(self.failover_dir)) except OSError: pass return comment, found_row
def run(self): master_conn = self.build_connection_string(self.server1).strip(' ') slave_conn = self.build_connection_string(self.server2).strip(' ') # For this test, it's OK when master and slave are the same master_str = "--master={0}".format(master_conn) slave_str = "--slave={0}".format(slave_conn) # command used in test cases: replace 3 element with location of # log file. cmd = [ "mysqlrpladmin.py", master_str, slave_str, "--log={0}".format(_LOGNAME), "health", ] # Test Case 1 test_num = 1 comment = "Test Case {0} - Log file is newly created".format(test_num) res = mutlib.System_test.run_test_case(self, 0, ' '.join(cmd), comment) if not res: raise MUTLibError("{0}: failed".format(comment)) # Test Case 2 test_num += 1 comment = "Test Case {0} - Log file is reopened".format(test_num) res = mutlib.System_test.run_test_case(self, 0, ' '.join(cmd), comment) if not res: raise MUTLibError("{0}: failed".format(comment)) # Test Case 3 test_num += 1 comment = ("Test Case {0} - Log file can not be " "written to".format(test_num)) os.chmod(_LOGNAME, stat.S_IREAD) # Make log read-only res = mutlib.System_test.run_test_case(self, 2, ' '.join(cmd), comment) if not res: raise MUTLibError("{0}: failed".format(comment)) # Find MySQL and Utilities versions in the log self.find_stop_phrase( _LOGNAME, comment, MSG_UTILITIES_VERSION.format(utility="mysqlrpladmin", version=VERSION_STRING)) # Find master MySQL server version in the log master_host_port = ("{host}:{port}".format( **get_connection_dictionary(master_conn))) self.find_stop_phrase( _LOGNAME, comment, MSG_MYSQL_VERSION.format(server=master_host_port, version=self.server1.get_version())) # Find slave MySQL server version in the log slave_host_port = ("{host}:{port}".format( **get_connection_dictionary(slave_conn))) self.find_stop_phrase( _LOGNAME, comment, MSG_MYSQL_VERSION.format(server=slave_host_port, version=self.server2.get_version())) # Mask out non-deterministic data rpl_admin.test.do_masks(self) self.remove_result("NOTE: Log file 'temp_log.txt' does not exist. " "Will be created.") return True
def run(self): master_conn = self.build_connection_string(self.server1).strip(' ') slave_conn = self.build_connection_string(self.server2).strip(' ') # For this test, it's OK when master and slave are the same master_str = "--master={0}".format(master_conn) slave_str = "--slave={0}".format(slave_conn) # command used in test cases: replace 3 element with location of # log file. cmd = [ "mysqlrpladmin.py", master_str, slave_str, "--log={0}".format(_LOGNAME), "health", ] # Test Case 1 test_num = 1 comment = "Test Case {0} - Log file is newly created".format(test_num) res = mutlib.System_test.run_test_case( self, 0, ' '.join(cmd), comment) if not res: raise MUTLibError("{0}: failed".format(comment)) # Test Case 2 test_num += 1 comment = "Test Case {0} - Log file is reopened".format(test_num) res = mutlib.System_test.run_test_case( self, 0, ' '.join(cmd), comment) if not res: raise MUTLibError("{0}: failed".format(comment)) # Test Case 3 test_num += 1 comment = ("Test Case {0} - Log file can not be " "written to".format(test_num)) os.chmod(_LOGNAME, stat.S_IREAD) # Make log read-only res = mutlib.System_test.run_test_case( self, 2, ' '.join(cmd), comment) if not res: raise MUTLibError("{0}: failed".format(comment)) # Find MySQL and Utilities versions in the log self.find_stop_phrase(_LOGNAME, comment, MSG_UTILITIES_VERSION.format( utility="mysqlrpladmin", version=VERSION_STRING)) # Find master MySQL server version in the log master_host_port = ( "{host}:{port}".format(**get_connection_dictionary(master_conn))) self.find_stop_phrase(_LOGNAME, comment, MSG_MYSQL_VERSION.format( server=master_host_port, version=self.server1.get_version())) # Find slave MySQL server version in the log slave_host_port = ( "{host}:{port}".format(**get_connection_dictionary(slave_conn))) self.find_stop_phrase(_LOGNAME, comment, MSG_MYSQL_VERSION.format( server=slave_host_port, version=self.server2.get_version())) # Mask out non-deterministic data rpl_admin.test.do_masks(self) self.remove_result("NOTE: Log file 'temp_log.txt' does not exist. " "Will be created.") return True
def test_rplms(self, cmd, logfile, comment, kill_process=True, stop_phrase=None): """Test multi-source replication. cmd[in] Command to be executed. logfile[in] Log filename. comment[in] Test comment. kill_process[in] True if the process is to be killed. stop_phrase[in] Stop phrase to be searched in the log. This method create a process by executing the command and waits for the round-robin scheduling to switch all the masters. At the end compares the databases. """ # Since this test case expects the process to stop, we can launch it # via a subprocess and wait for it to finish. if self.debug: print(comment) print("# COMMAND: {0}".format(cmd)) # Run command proc, f_out = self.start_process(cmd) # Wait for process to load if self.debug: print("# Waiting for process to start.") i = 1 time.sleep(1) while proc.poll() is not None: time.sleep(1) i += 1 if i > _TIMEOUT: if self.debug: print("# Timeout process to start.") raise MUTLibError("{0}: failed - timeout waiting for " "process to start.".format(comment)) # Wait for logfile file to be created if self.debug: print("# Waiting for logfile to be created.") for i in range(_TIMEOUT): if os.path.exists(logfile): break else: time.sleep(1) else: raise MUTLibError("{0}: failed - timeout waiting for " "logfile '{1}' to be " "created.".format(comment, logfile)) # Wait for switching all the masters self.wait_for_switching_all_masters(logfile, comment) # Compare databases self.compare_databases(comment) # Kill process if kill_process: self.kill_process(proc, f_out, comment) # Find stop phrase if stop_phrase: self.find_stop_phrase(logfile, comment, stop_phrase) # Find MySQL Utilities version in the log utils_phrase = MSG_UTILITIES_VERSION.format(utility="mysqlrplms", version=VERSION_STRING) self.find_stop_phrase(logfile, comment, utils_phrase) # Find MySQL servers versions in the log for server in (self.server1, self.server2, self.server3,): host_port = "{host}:{port}".format( **get_connection_dictionary(server)) server_version = server.get_version() mysql_phrase = MSG_MYSQL_VERSION.format(server=host_port, version=server_version) self.find_stop_phrase(logfile, comment, mysql_phrase) # Cleanup after test case try: os.unlink(logfile) except OSError: pass
def test_failover_console(self, test_case): """Tests failover console. test_case[in] Test case. """ server = test_case[0] cmd = test_case[1] kill_console = test_case[2] log_filename = test_case[3] comment = test_case[4] key_phrase = test_case[5] unregister = test_case[6] server_version = server.get_version() if unregister: # Unregister any failover instance from server try: server.exec_query("DROP TABLE IF EXISTS " "mysql.failover_console") except UtilError: pass # Since this test case expects the console to stop, we can launch it # via a subprocess and wait for it to finish. if self.debug: print(comment) print("# COMMAND: {0}".format(cmd)) # Cleanup in case previous test case failed if os.path.exists(self.failover_dir): try: os.system("rmdir {0}".format(self.failover_dir)) except OSError: pass # Launch the console in stealth mode proc, f_out = self.start_process(cmd) # Wait for console to load if self.debug: print("# Waiting for console to start.") i = 1 time.sleep(1) while proc.poll() is not None: time.sleep(1) i += 1 if i > _TIMEOUT: if self.debug: print("# Timeout console to start.") raise MUTLibError("{0}: failed - timeout waiting for " "console to start.".format(comment)) # Wait for the failover console to register on master and start # its monitoring process phrase = "Failover console started" if self.debug: print("Waiting for failover console to register master and start " "its monitoring process") # Wait because of the warning message that may appear due to # mixing hostnames and IP addresses time.sleep(WARNING_SLEEP_TIME + 1) i = 0 # Wait for logfile file to be created if self.debug: print("# Waiting for logfile to be created.") for i in range(_TIMEOUT): if os.path.exists(log_filename): break else: time.sleep(1) else: raise MUTLibError("{0}: failed - timeout waiting for " "logfile '{1}' to be " "created.".format(comment, log_filename)) with open(log_filename, 'r') as file_: while i < _TIMEOUT: line = file_.readline() if not line: i += 1 time.sleep(1) elif phrase in line: break else: if self.debug: print("# Timeout waiting for failover console to register " "master and start its monitoring process") raise MUTLibError("{0}: failed - timeout waiting for console " "to register master and start its " "monitoring process".format(comment)) # Now, kill the master - wha-ha-ha! res = server.show_server_variable('pid_file') pid_file = open(res[0][1]) pid = int(pid_file.readline().strip('\n')) if self.debug: print("# Terminating server {0} via pid = {1}".format(server.port, pid)) pid_file.close() # Get server datadir to clean directory after kill. res = server.show_server_variable("datadir") datadir = res[0][1] # Stop the server server.disconnect() self.kill(pid) # Need to wait until the process is really dead. if self.debug: print("# Waiting for master to stop.") i = 0 while self.is_process_alive(pid, int(server.port) - 1, int(server.port) + 1): time.sleep(1) i += 1 if i > _TIMEOUT: if self.debug: print("# Timeout master to fail.") raise MUTLibError("{0}: failed - timeout waiting for " "master to end.".format(comment)) # Remove server from the list (and clean data directory). if self.debug: print("# Removing server name '{0}'.".format(server.role)) delete_directory(datadir) self.servers.remove_server(server.role) # Now wait for interval to occur. if self.debug: print("# Waiting for failover to complete.") i = 0 while not os.path.isdir(self.failover_dir): time.sleep(5) i += 1 if i > _TIMEOUT: if self.debug: print("# Timeout console failover.") raise MUTLibError("{0}: failed - timeout waiting for " "exec_post_fail.".format(comment)) # Need to poll here and wait for console to really end. ret_val = self.stop_process(proc, f_out, kill_console) # Wait for console to end if self.debug: print("# Waiting for console to end.") i = 0 while proc.poll() is None: time.sleep(1) i += 1 if i > _TIMEOUT: if self.debug: print("# Timeout console to end.") raise MUTLibError("{0}: failed - timeout waiting for " "console to end.".format(comment)) if self.debug: print("# Return code from console termination = " "{0}".format(ret_val)) # Check result code from stop_process then read the log to find the # key phrase. found_row = False log_file = open(log_filename) rows = log_file.readlines() if self.debug: print("# Looking in log for: {0}".format(key_phrase)) for row in rows: if key_phrase in row: found_row = True if self.debug: print("# Found in row = '{0}'.".format(row[:len(row) - 1])) # Find MySQL Utilities version in the log if self.debug: print("# Looking in log for: {0}" "".format(_UTILITIES_VERSION_PHRASE)) for row in rows: if _UTILITIES_VERSION_PHRASE in row: found_row = True if self.debug: print("# Found in row = '{0}'.".format(row[:-1])) break # Find MySQL server version in the log host_port = "{host}:{port}".format(**get_connection_dictionary(server)) key_phrase = MSG_MYSQL_VERSION.format(server=host_port, version=server_version) if self.debug: print("# Looking in log for: {0}".format(key_phrase)) for row in rows: if key_phrase in row: found_row = True if self.debug: print("# Found in row = '{0}'.".format(row[:-1])) break log_file.close() if not found_row: print("# ERROR: Cannot find entry in log:") for row in rows: print row, # Cleanup after test case try: os.unlink(log_filename) except OSError: pass if os.path.exists(self.failover_dir): try: os.system("rmdir {0}".format(self.failover_dir)) except OSError: pass return comment, found_row