def _flush_messages(self): while len(self._log_queue) > 0: mtype, text, detail = self._log_queue.popleft() if "\n" in detail: detail = "\n "+("\n ".join(detail.split("\n"))) if mtype == "INFO": if detail: self._log_text.append_text_and_scroll("%s: %s\n" % (text, detail), True) else: self._log_text.append_text_and_scroll("%s\n" % (text), True) elif mtype == "OUTPUT": # output text must already come with newlines if detail: self._log_text.append_text_and_scroll("%s: %s" % (text, detail), True) else: self._log_text.append_text_and_scroll("%s" % (text), True) elif mtype == "PROGRESS": pct, y, text = text.partition(":") try: self.update_progress(locale.atof(pct), text) except Exception: grt.log_debug('Wizard', 'Exception raised when converting "%s" to float using locale.atof(). Exception ignored\n' % pct) else: if mtype == "ERROR": self._errors += 1 self._task_errors.append(text) elif mtype == "WARNING": self._warnings += 1 self._task_warnings.append(text) if detail: self._log_text.append_text_and_scroll("%s: %s: %s\n" % (mtype, text, detail), True) else: self._log_text.append_text_and_scroll("%s: %s\n" % (mtype, text), True)
def reader(self, ssh_session): what = None out = "" timeouts = 12 while self.running[0]: # running is passed in list via "refernce" try: ch = ssh_session.recv(1) timeouts = 12 if ch == "C": what = self.parse_cpu elif ch == "\r" or ch == "\n": if what is not None: what(out) what = None out = "" elif ch in "0123456789. ": out += ch elif ch == ",": out += "." else: what = None out = "" except socket.timeout: timeouts -= 1 if timeouts <= 0: ssh_session.close() raise Exception("Can't read from remote Windows script") log_debug('%s:%s.reader()' % (_this_file, self.__class__.__name__), 'Leaving monitor thread which polls remote windows\n')
def poll_sources(self): while self.running[0] and self.ctrl_be.running: #sleep here for cmd in self.sources: cmd.poll() time.sleep(self.interval) log_debug('%s:%s.poll_sources()' % (_this_file, self.__class__.__name__), 'Exiting monitor thread...\n')
def wait_connection(self, port): tunnel = self.tunnel_by_port.get(port) if not tunnel: return 'Could not find a tunnel for port %d' % port error = None if tunnel.isAlive(): while True: # Process any message in queue. Every retrieved message is printed. # If an error is detected in the queue, exit returning its message: try: msg_type, msg = tunnel.q.get_nowait() except Queue.Empty: pass else: _msg = msg if type(msg) is tuple: msg = '\n' + ''.join(traceback.format_exception(*msg)) _msg = str(_msg[1]) log_debug(_this_file, "%s: %s\n" % (msg_type, msg)) if msg_type == 'ERROR': error = _msg break # Exit returning the error message if not tunnel.is_connecting() or not tunnel.isAlive(): break time.sleep(0.3) log_debug(_this_file, "returning from wait_connection(%s): %s\n" % (port, error)) return error
def save_file_content_and_backup(self, path, content, backup_extension, as_admin = False, admin_password = None): # Check if dir, where config file will be stored is writable dirname, filename = splitpath(path) if not as_admin and not self.is_dir_writable(dirname.strip(" \r\t\n")): raise PermissionDeniedError("Cannot write to directory %s" % dirname) if self.ssh is not None: ## Get temp dir for using as tmpdir tmpdir, status = self.process_ops.get_cmd_output("echo %temp%") if type(tmpdir) is unicode: tmpdir = tmpdir.encode("utf8") if type(tmpdir) is str: tmpdir = tmpdir.strip(" \r\t\n") if tmpdir[1] == ":": tmpdir = tmpdir[2:] else: log_debug(_this_file, '%s: Temp directory path "%s" is not in expected form. The expected form is something like "C:\\Windows\\Temp"\n' % (self.__class__.__name__, tmpdir) ) tmpdir = None log_debug2(_this_file, '%s: Got temp dir: "%s"\n' % (self.__class__.__name__, tmpdir) ) else: tmpdir = None if not tmpdir: tmpdir = dirname tmpfilename = tmpdir + r"\workbench-temp-file.ini" log_debug(_this_file, '%s: Remotely writing contents to temporary file "%s"\n' % (self.__class__.__name__, tmpfilename) ) log_debug3(_this_file, '%s: %s\n' % (self.__class__.__name__, content) ) self.ssh.set_contents(tmpfilename, content) if backup_extension: log_debug(_this_file, '%s: Backing up "%s"\n' % (self.__class__.__name__, path) ) backup_cmd = "copy /y " + quote_path_win(path) + " " + quote_path_win(path+backup_extension) msg, code = self.process_ops.get_cmd_output(backup_cmd) if code != 0: print backup_cmd, "->", msg log_error(_this_file, '%s: Error backing up file: %s\n' % (self.__class__.__name__, backup_cmd+'->'+msg) ) raise RuntimeError("Error backing up file: %s" % msg) copy_to_dest = "copy /y " + quote_path_win(tmpfilename) + " " + quote_path_win(path) delete_tmp = "del " + quote_path_win(tmpfilename) log_debug(_this_file, '%s: Copying file to final destination: "%s"\n' % (self.__class__.__name__, copy_to_dest) ) msg, code = self.process_ops.get_cmd_output(copy_to_dest) if code != 0: print copy_to_dest, "->", msg log_error(_this_file, '%s: Error copying temporary file over destination file: %s\n%s to %s\n' % (self.__class__.__name__, msg, tmpfilename, path) ) raise RuntimeError("Error copying temporary file over destination file: %s\n%s to %s" % (msg, tmpfilename, path)) log_debug(_this_file, '%s: Deleting tmp file: "%s"\n' % (self.__class__.__name__, delete_tmp) ) msg, code = self.process_ops.get_cmd_output(delete_tmp) if code != 0: print "Could not delete temporary file %s: %s" % (tmpfilename, msg) log_info(_this_file, '%s: Could not delete temporary file "%s": %s\n' % (self.__class__.__name__, tmpfilename, msg) ) else: raise Exception("No SSH session active, cannot save file remotely")
def fetch_windows_shell_info(self): # get some info from the remote shell result, code = self.get_cmd_output("chcp.com") if code == 0: result = result.strip(" .\r\n").split() if len(result) > 0: self.cmd_output_encoding = "cp" + result[-1] else: print "WARNING: Unable to determine codepage from shell: %s" % result log_warning(_this_file, '%s.fetch_windows_shell_info(): WARNING: Unable to determine codepage from shell: "%s"\n' % (self.__class__.__name__, str(result)) ) result, code = self.get_cmd_output("echo %PROCESSOR_ARCHITECTURE%") if result: result = result.strip() ProgramFilesVar = None x86var = None if result != "x86":#we are on x64 win in x64 mode x86var = WIN_PROGRAM_FILES_X86_VAR ProgramFilesVar = WIN_PROGRAM_FILES_VAR else: result, code = self.get_cmd_output("echo %PROCESSOR_ARCHITEW6432%") if result: result = result.strip() if result == "%PROCESSOR_ARCHITEW6432%":#we are on win 32 x86var = WIN_PROGRAM_FILES_VAR ProgramFilesVar = WIN_PROGRAM_FILES_VAR else:#32bit app on x64 win x86var = WIN_PROGRAM_FILES_VAR ProgramFilesVar = WIN_PROGRAM_FILES_X64_VAR result, code = self.get_cmd_output("echo "+ ProgramFilesVar) if code == 0: self.target_shell_variables["%ProgramFiles%"] = result.strip("\r\n") if ProgramFilesVar != "%ProgramFiles%": self.target_shell_variables[ProgramFilesVar] = result.strip("\r\n") else: print "WARNING: Unable to fetch ProgramFiles value in Windows machine: %s"%result log_warning(_this_file, '%s.fetch_windows_shell_info(): WARNING: Unable to fetch ProgramFiles value in Windows machine: "%s"\n' % (self.__class__.__name__, str(result)) ) # this one only exists in 64bit windows result, code = self.get_cmd_output("echo "+ x86var) if code == 0: self.target_shell_variables["%ProgramFiles(x86)%"] = result.strip("\r\n") else: print "WARNING: Unable to fetch ProgramFiles(x86) value in local Windows machine: %s"%result log_warning(_this_file, '%s.fetch_windows_shell_info(): WARNING: Unable to fetch ProgramFiles(x86) value in local Windows machine: "%s"\n' % (self.__class__.__name__, str(result)) ) log_debug(_this_file, '%s.fetch_windows_shell_info(): Encoding: "%s", Shell Variables: "%s"\n' % (self.__class__.__name__, self.cmd_output_encoding, str(self.target_shell_variables)))
def poll(self): output = StringIO.StringIO() if self.ctrl_be.server_helper.execute_command("/usr/bin/uptime", output_handler=output.write) == 0: data = output.getvalue().strip(" \r\t\n,:.") load_value = data.split()[-3] # in some systems, the format is x.xx x.xx x.xx and in others, it's x.xx, x.xx, x.xx load_value = load_value.rstrip(",") try: result = float(load_value.replace(',','.')) except (ValueError, TypeError): log_error(_this_file, "Shell source %s returned wrong value. Expected int or float but got %s\n" % (self.name, load_value)) result = 0 if self.widget is not None: self.widget.set_value(self.calc_cb(result) if self.calc_cb else result) if self.label_cb is not None: self.ctrl_be.uitask(self.label.set_text, self.label_cb(result)) else: log_debug(_this_file, "CPU stat command returned error: %s\n" % output.getvalue())
def reverseEngineer(cls, connection, catalog_name, schemata_list, context): from grt.modules import MysqlSqlFacade grt.send_progress(0, "Reverse engineering catalog information") cls.check_interruption() catalog = cls.reverseEngineerCatalog(connection, catalog_name) # calculate total workload 1st grt.send_progress(0.1, 'Preparing...') get_tables = context.get("reverseEngineerTables", True) # 10% of the progress is for preparation total = 1e-10 # total should not be zero to avoid DivisionByZero exceptions accumulated_progress = 0.1 total += len(cls.getTableNames(connection, catalog_name, '')) if get_tables else 0 grt.send_progress(0.1, "Gathered stats") # Now the second pass for reverse engineering tables: if get_tables: idx = 0 for object_type, name, tbl_name, _, sql in cls.execute_query( connection, "SELECT * FROM sqlite_master"): if type in ('view', 'trigger' ) or not sql or tbl_name.startswith('sqlite_'): continue sql = sql.replace('[', '').replace(']', '') grt.log_debug('SQLiteReverseEngineering', 'Procesing this sql:\n%s;' % sql) MysqlSqlFacade.parseSqlScriptString(catalog, sql) cls.check_interruption() grt.send_progress(0.1 + idx / total, 'Object %s reverse engineered!' % name) idx += 1 grt.send_progress(1.0, 'Reverse engineering completed!') return catalog
def testInstanceSettingByName(what, connection, server_instance): global test_ssh_connection log_debug(_this_file, "Test %s in %s\n" % (what, connection.name)) profile = ServerProfile(connection, server_instance) if what == "connect_to_host": if test_ssh_connection: test_ssh_connection = None log_info(_this_file, "Instance test: Connecting to %s\n" % profile.ssh_hostname) try: test_ssh_connection = wb_admin_control.WbAdminControl(profile, connect_sql=False) test_ssh_connection.init() grt.send_info("connected.") except Exception, exc: import traceback traceback.print_exc() return "ERROR "+str(exc) except:
def reverseEngineer(cls, connection, catalog_name, schemata_list, context): from grt.modules import MysqlSqlFacade grt.send_progress(0, "Reverse engineering catalog information") cls.check_interruption() catalog = cls.reverseEngineerCatalog(connection, catalog_name) # calculate total workload 1st grt.send_progress(0.1, 'Preparing...') get_tables = context.get("reverseEngineerTables", True) # 10% of the progress is for preparation total = 1e-10 # total should not be zero to avoid DivisionByZero exceptions accumulated_progress = 0.1 total += len(cls.getTableNames(connection, catalog_name, '')) if get_tables else 0 grt.send_progress(0.1, "Gathered stats") # Now the second pass for reverse engineering tables: if get_tables: idx = 0 for object_type, name, tbl_name, _, sql in cls.execute_query(connection, "SELECT * FROM sqlite_master"): if type in ('view', 'trigger') or not sql or tbl_name.startswith('sqlite_'): continue sql = sql.replace('[', '').replace(']', '') grt.log_debug('SQLiteReverseEngineering', 'Procesing this sql:\n%s;' % sql) MysqlSqlFacade.parseSqlScriptString(catalog, sql) cls.check_interruption() grt.send_progress(0.1 + idx / total, 'Object %s reverse engineered!' % name) idx += 1 grt.send_progress(1.0, 'Reverse engineering completed!') return catalog
def save_file_content_and_backup(self, filename, content, backup_extension, as_admin = False, admin_password = None): log_debug(_this_file, '%s: Saving file "%s" with backup (sudo="%s")\n' % (self.__class__.__name__, filename, as_admin) ) if as_admin: # The delete argument is only available starting from py 2.6 (NamedTemporaryFile deletes files on close in all cases, unless you pass delete=False) tmp = tempfile.NamedTemporaryFile(dir=self.tempdir) tmp_name = tmp.name try: log_debug(_this_file, '%s: Writing file contents to tmp file "%s"\n' % (self.__class__.__name__, tmp_name) ) tmp.write(content) tmp.flush() if backup_extension and os.path.exists(filename): log_debug(_this_file, '%s: Creating backup of "%s" to "%s"\n' % (self.__class__.__name__, filename, filename+backup_extension)) self._copy_file(source = filename, dest = filename + backup_extension, as_admin = as_admin, admin_password = admin_password) log_debug(_this_file, '%s: Copying over tmp file to final filename using sudo: %s -> %s\n' % (self.__class__.__name__, tmp_name, filename) ) self._copy_file(source = tmp_name, dest = filename, as_admin = as_admin, admin_password = admin_password) log_debug(_this_file, '%s: Copying file done\n' % self.__class__.__name__) tmp.close() except Exception, exc: log_error(_this_file, '%s: Exception caught: %s\n' % (self.__class__.__name__, str(exc)) ) if tmp: tmp.close() raise
def save_file_content_and_backup(self, path, content, backup_extension, as_admin = False, admin_password = None): # Check if dir, where config file will be stored is writable dirname, filename = splitpath(path) if not as_admin and not self.is_dir_writable(dirname.strip(" \r\t\n")): raise PermissionDeniedError("Cannot write to directory %s" % dirname) if self.ssh is not None: ## Get home dir for using as tmpdir homedir, status = self.process_ops.get_cmd_output("echo ~") if type(homedir) is unicode: homedir = homedir.encode("utf8") if type(homedir) is str: homedir = homedir.strip(" \r\t\n") else: homedir = None log_debug2(_this_file, '%s: Got home dir: "%s"\n' % (self.__class__.__name__, homedir) ) if not homedir: raise Exception("Unable to get path for remote home directory") tmpfilename = homedir + "/.wba.temp" log_debug(_this_file, '%s: Remotely writing contents to temporary file "%s"\n' % (self.__class__.__name__, tmpfilename) ) log_debug3(_this_file, '%s: %s\n' % (self.__class__.__name__, content) ) self.ssh.set_contents(tmpfilename, content) if backup_extension: log_debug(_this_file, '%s: Backing up %s\n' % (self.__class__.__name__, path) ) backup_cmd = "/bin/cp " + quote_path(path) + " " + quote_path(path+backup_extension) self.process_ops.exec_cmd(backup_cmd, as_admin, admin_password) copy_to_dest = "/bin/cp " + quote_path(tmpfilename) + " " + quote_path(path) delete_tmp = "/bin/rm " + quote_path(tmpfilename) log_debug(_this_file, '%s: Copying file to final destination: "%s"\n' % (self.__class__.__name__, copy_to_dest) ) self.process_ops.exec_cmd(copy_to_dest, as_admin, admin_password) log_debug(_this_file, '%s: Deleting tmp file: "%s"\n' % (self.__class__.__name__, delete_tmp) ) self.process_ops.exec_cmd(delete_tmp) else: raise Exception("No SSH session active, cannot save file remotely")
def send_warning(self, msg): grt.log_debug("Wizard", "WARNING: %s\n" % msg) self._handle_task_output("WARNING", msg, "")
def send_error(self, msg): grt.log_debug("Wizard", "ERROR: %s\n" % msg) self._handle_task_output("ERROR", msg, "")
def send_info(self, msg): grt.log_debug("Wizard", msg + "\n") self._handle_task_output("INFO", msg, "")
def get_log_destination(self): dest = {} if self.ctrl_be.is_sql_connected( ): # If server is up, query the destination from there if self.ctrl_be.server_version < ( 5, 1, 6): # Logging to TABLE was introduced in v5.1.6 try: result = self.ctrl_be.exec_query( "SHOW VARIABLES LIKE 'log'") if not result.nextRow(): return dest except: return dest self.server_profile.log_output = 'FILE' if result.stringByName( "Value") == 'ON' else 'NONE' log_debug( _this_file, '%s: log_output = %s\n' % (self.__class__.__name__, self.server_profile.log_output)) else: try: result = self.ctrl_be.exec_query( "SHOW VARIABLES LIKE 'log_output'") if not result.nextRow(): return dest except: return dest self.server_profile.log_output = result.stringByName("Value") if 'FILE' in self.server_profile.log_output and 'TABLE' in self.server_profile.log_output: def open_remote_file(path): import wb_admin_ssh, wb_server_control ssh = wb_admin_ssh.WbAdminSSH() ssh.wrapped_connect( self.server_profile, wb_server_control.PasswordHandler( self.server_profile)) sftp = ssh.client.open_sftp() if not ssh.is_connected(): raise IOError, '' sftp.open(path) # Can't read logs from files if admin is disabled: if not self.server_profile.admin_enabled: dest['general_log'] = 'TABLE' dest['slow_log'] = 'TABLE' log_debug( _this_file, '%s: log_output = %s\n' % (self.__class__.__name__, dest)) return dest # Try to prioritize the files if they are readable if not getattr(self, 'stored_general_log_source_choice', None): if self.server_profile.general_log_file_path: try: open( self.server_profile.general_log_file_path ) if self.server_profile.is_local else open_remote_file( self.server_profile.general_log_file_path) dest['general_log'] = 'FILE' except: dest['general_log'] = 'TABLE' else: dest['general_log'] = 'TABLE' self.stored_general_log_source_choice = dest[ 'general_log'] else: dest[ 'general_log'] = self.stored_general_log_source_choice if not getattr(self, 'stored_slow_log_source_choice', None): if self.server_profile.slow_log_file_path: try: open( self.server_profile.slow_log_file_path ) if self.server_profile.is_local else open_remote_file( self.server_profile.slow_log_file_path) dest['slow_log'] = 'FILE' except: dest['slow_log'] = 'TABLE' else: dest['slow_log'] = 'TABLE' self.stored_slow_log_source_choice = dest['slow_log'] else: dest['slow_log'] = self.stored_slow_log_source_choice log_debug( _this_file, '%s: log_output = %s\n' % (self.__class__.__name__, dest)) return dest
as_admin = as_admin, admin_password = admin_password) log_debug(_this_file, '%s: Copying over tmp file to final filename using sudo: %s -> %s\n' % (self.__class__.__name__, tmp_name, filename) ) self._copy_file(source = tmp_name, dest = filename, as_admin = as_admin, admin_password = admin_password) log_debug(_this_file, '%s: Copying file done\n' % self.__class__.__name__) tmp.close() except Exception, exc: log_error(_this_file, '%s: Exception caught: %s\n' % (self.__class__.__name__, str(exc)) ) if tmp: tmp.close() raise else: target_dir = splitpath(filename)[0] if not os.path.exists(target_dir): log_debug(_this_file, '%s: Target directory "%s" does not exist\n' % (self.__class__.__name__, target_dir ) ) raise InvalidPathError("The directory %s does not exist" % target_dir) if not self.is_dir_writable(target_dir): log_debug(_this_file, '%s: Target directory "%s" is not writable\n' % (self.__class__.__name__, target_dir) ) raise PermissionDeniedError("Cannot write to target directory") if os.path.exists(filename) and backup_extension: log_debug(_this_file, '%s: Target file "%s" exists, creating backup\n' % (self.__class__.__name__, filename) ) # backup config file self._copy_file(filename, filename+backup_extension) try: f = open(filename, 'w') except OSError, err: if err.errno == errno.EACCES: raise PermissionDeniedError("Could not open file %s for writing" % filename)
def get_log_destination(self): dest = {} if self.ctrl_be.is_sql_connected(): # If server is up, query the destination from there if self.ctrl_be.server_version < (5, 1, 6): # Logging to TABLE was introduced in v5.1.6 try: result = self.ctrl_be.exec_query("SHOW VARIABLES LIKE 'log'") if not result.nextRow(): return dest except: return dest self.server_profile.log_output = 'FILE' if result.stringByName("Value")=='ON' else 'NONE' log_debug(_this_file, '%s: log_output = %s\n' % (self.__class__.__name__, self.server_profile.log_output) ) else: try: result = self.ctrl_be.exec_query("SHOW VARIABLES LIKE 'log_output'") if not result.nextRow(): return dest except: return dest self.server_profile.log_output = result.stringByName("Value") if 'FILE' in self.server_profile.log_output and 'TABLE' in self.server_profile.log_output: def open_remote_file(path): import wb_admin_ssh, wb_server_control ssh = wb_admin_ssh.WbAdminSSH() ssh.wrapped_connect(self.server_profile, wb_server_control.PasswordHandler(self.server_profile)) sftp = ssh.client.open_sftp() if not ssh.is_connected(): raise IOError, '' sftp.open(path) # Can't read logs from files if admin is disabled: if not self.server_profile.admin_enabled: dest['general_log'] = 'TABLE' dest['slow_log'] = 'TABLE' log_debug(_this_file, '%s: log_output = %s\n' % (self.__class__.__name__, dest) ) return dest # Try to prioritize the files if they are readable if not getattr(self, 'stored_general_log_source_choice', None): if self.server_profile.general_log_file_path: try: open(self.server_profile.general_log_file_path) if self.server_profile.is_local else open_remote_file(self.server_profile.general_log_file_path) dest['general_log'] = 'FILE' except: dest['general_log'] = 'TABLE' else: dest['general_log'] = 'TABLE' self.stored_general_log_source_choice = dest['general_log'] else: dest['general_log'] = self.stored_general_log_source_choice if not getattr(self, 'stored_slow_log_source_choice', None): if self.server_profile.slow_log_file_path: try: open(self.server_profile.slow_log_file_path) if self.server_profile.is_local else open_remote_file(self.server_profile.slow_log_file_path) dest['slow_log'] = 'FILE' except: dest['slow_log'] = 'TABLE' else: dest['slow_log'] = 'TABLE' self.stored_slow_log_source_choice = dest['slow_log'] else: dest['slow_log'] = self.stored_slow_log_source_choice log_debug(_this_file, '%s: log_output = %s\n' % (self.__class__.__name__, dest) ) return dest
def save_file_content_and_backup(self, filename, content, backup_extension, as_admin = False, admin_password = None): log_debug(_this_file, '%s: Saving file "%s" with backup (sudo="%s")\n' % (self.__class__.__name__, filename, str(as_admin)) ) if as_admin: tmp_name = self.tempdir+"\\wbfilesavetmp" tmp = open(tmp_name, "w+b") try: log_debug(_this_file, '%s: Writing file contents to tmp file "%s"\n' % (self.__class__.__name__, tmp_name) ) tmp.write(content) tmp.close() if backup_extension and os.path.exists(filename): #dprint_ex(1, "Creating backup of %s to %s" % (filename, filename+backup_extension)) #self._copy_file(source = filename, dest = filename + backup_extension, # as_admin = as_admin, admin_password = admin_password) # Create backup and copy over file to final destination in a single command # This is done because running copy twice, would bring up the UAC dialog twice script = "copy /Y %s %s && copy /Y %s %s" % (quote_path_win(filename), quote_path_win(filename + backup_extension), quote_path_win(tmp_name), quote_path_win(filename)) log_debug(_this_file, '%s: Creating backup and commiting tmp file: "%s"\n' % (self.__class__.__name__, script) ) output = [] res = self.process_ops.exec_cmd(script, as_admin = True, admin_password = admin_password, output_handler = lambda line, l= output: l.append(line) ) if res != 0: output = "\n".join(output) raise RuntimeError("Error while executing '%s'. Output = '%s'" % (script, output)) else: log_debug(_this_file, '%s: Copying over tmp file to final filename using sudo: %s -> %s\n' % (self.__class__.__name__, tmp_name, filename) ) self._copy_file(source = tmp_name, dest = filename, as_admin = as_admin, admin_password = admin_password) log_debug(_this_file, '%s: Delete tmp file "%s"\n' % (self.__class__.__name__, tmp_name) ) # delete tmp file ## BIZARRE STUFF GOING ON HERE # commenting out the following line, will make something in committing config file change fail # even tho the copies happen before this line.. wtf # os.remove(tmp_name) log_debug(_this_file, '%s: Done.\n' % self.__class__.__name__) except Exception, exc: log_error(_this_file, '%s: Exception caught: %s\n' % (self.__class__.__name__, str(exc)) ) if tmp: tmp.close() raise
def __init__(self, ctrl_be, server_profile, running, cpu_widget): self.ctrl_be = ctrl_be self.ssh = None self.cpu = 0 self.mtx = threading.Lock() self.running = running self.cpu_widget = cpu_widget self.settings = server_profile self.remote_admin_enabled = self.settings.uses_ssh if not self.remote_admin_enabled: return self.ctrl_be.add_me_for_event("shutdown", self) #upload script. Get local name, open ftp session and upload to the directory # where mysql.ini is. self.script = None self.ssh = ctrl_be.open_ssh_session_for_monitoring() (dirpath, code) = self.ssh.exec_cmd("cmd /C echo %USERPROFILE%") # %APPDATA% is n/a for LocalService # which is a user sshd can be run dirpath = dirpath.strip(" \r\t\n") if code == 0 and dirpath is not None and dirpath != "%USERPROFILE%": script_path = App.get().get_resource_path("mysql_system_status_rmt.vbs") filename = "\"" + dirpath + "\\mysql_system_status_rmt.vbs\"" log_debug('%s:%s.__init__()' % (_this_file, self.__class__.__name__), 'Script local path is "%s". Will be uploaded to "%s"\n' % (script_path, filename) ) if script_path is not None and script_path != "": #print "Uploading file to ", filename try: f = open(script_path) self.ssh.exec_cmd("cmd /C echo. > " + filename) maxsize = 1800 cmd = "" for line in f: line = line.strip("\r\n") tline = line.strip(" \t") if len(tline) > 0: if tline[0] != "'": if len(cmd) > maxsize: self.ssh.exec_cmd("cmd /C " + cmd.strip(" &")) self.ssh.exec_cmd("cmd /C echo " + line + " >> " + filename) cmd = "" else: cmd += "echo " + line + " >> " + filename cmd += " && " if len(cmd) > 0: self.ssh.exec_cmd("cmd /C " + cmd.strip(" &")) cmd = "" self.script = "cscript //NoLogo " + filename + " /DoStdIn" #run ssh in a thread log_debug2('%s:%s.__init__()' % (_this_file, self.__class__.__name__), 'About to run "%s"\n' % self.script) self.chan = None self.out = "" self.read_thread = threading.Thread(target=self.ssh.exec_cmd, args=(self.script, Users.CURRENT, None, self.reader, 1, self.save_channel)) self.read_thread.setDaemon(True) self.read_thread.start() except IOError, e: self.ssh.close() self.ssh = None raise e
def __del__(self): log_debug(_this_file, "Closing SSH connection") self.close()
def log_debug(msg): tb = traceback.extract_stack(limit=2) grt.log_debug("%s:%s:%s"%(os.path.basename(tb[-2][0]),tb[-2][2],tb[-2][1]), msg)
def send_info(self, msg): grt.log_debug("Wizard", msg) self._handle_task_output("INFO", msg, "")
def send_error(self, msg): grt.log_debug("Wizard", "ERROR: "+msg) self._handle_task_output("ERROR", msg, "")
def send_warning(self, msg): grt.log_debug("Wizard", "WARNING: "+msg) self._handle_task_output("WARNING", msg, "")