def _execute_now(): for ip in ips: Common.create_thread(func=self.exec_start_impl, args=(ip, uploads[ip], params[ip])) logger.info( "execute_start {} for {}, params:{}, uploads:{}".format( self.task, ip, params[ip], uploads[ip]))
def ssh_keepalive(self, args=None): def ssh_check(): for t in range(1, retry_times + 1): ret1 = SSHUtil.user_login(ssh, user)[0] ret2 = SSHUtil.user_login(ssh, 'root')[0] ret3 = SSHUtil.exec_ret(ssh, 'echo')[0] ret4 = SSHUtil.upload_file(ssh, Global.G_SETTINGS_FILE, remote_file)[0] if all([ret1, ret2, ret3, ret4]): return True return False logger.debug('[timer] ssh keepalive timer start...') remote_file = "{0}/__SSH__/1".format( model_gate.settings_data.server_dir) retry_times = model_gate.settings_data.retry_times remind_dict, success_ip_ssh, failed_ip_ssh = {}, {}, {} login_state_data = model_gate.login_state_data.get_data() for ip, data in login_state_data.items(): ssh = data['SSH'] user, upwd, rpwd = data['PWD'] if not ssh_check(): if (ip not in self.remind_dict) or (not self.remind_dict[ip]): logger.warn( "(keepalive) ssh instance of {0} is invalid, rebuild now" .format(ip)) model_gate.insert_text_data.set_data([ "{0} is disconnected, Re-login now".format(ip), 'WARN' ]) self.remind_dict[ip] = True # 尝试重新建立ssh del ssh ssh = SSH(ip, user, upwd, rpwd) if ssh_check(): logger.info( "(keepalive) rebuild ssh instance of {0} success". format(ip)) model_gate.insert_text_data.set_data( ["{0} Re-login success".format(ip), 'INFO']) self.remind_dict[ip] = False success_ip_ssh[ip] = ssh else: failed_ip_ssh[ip] = ssh else: if login_state_data[ip]['STATE'] != 'SUCCESS': success_ip_ssh[ip] = ssh # 刷新ssh实例和状态 for ip, ssh in success_ip_ssh.items(): login_state_data[ip]['SSH'] = ssh login_state_data[ip]['STATE'] = 'SUCCESS' for ip, ssh in failed_ip_ssh.items(): login_state_data[ip]['SSH'] = ssh login_state_data[ip]['STATE'] = 'FAILED' if success_ip_ssh or failed_ip_ssh: # 有数据变更才更新, 避免频繁刷新 model_gate.login_state_data.set_data(login_state_data) logger.debug('[timer] ssh keepalive end, login_state_data: {}'.format( login_state_data))
def execute_stop(self, data): ips, script = data self.init(script, True, True) self.is_break = True self._mutex('DELAY', False) self._mutex('LOOP', False) for ip in ips: self.kill_shell(self.get_ssh(ip)) self._mutex(ip, False) self.insert_text_info(ip, 0, "kill success") self.return_exec_start_result(ip, 100, '', False) logger.info("execute_stop {} for {}".format(self.task, ip))
def check_file(self): Global.G_PID_DIR = "{}\\{}".format(Global.G_RUN_DIR, Common.get_pid()) Common.mkdir(Global.G_RUN_DIR) Common.mkdir(Global.G_DOWNLOAD_DIR) Common.mkdir(Global.G_PID_DIR) logger.info(Global.G_TEXT_LOGO) for path in [ Global.G_RESOURCE_DIR, Global.G_DEPENDENCE_FILE, Global.G_SETTINGS_FILE, Global.G_SCRIPTS_DIR ]: if not Common.is_exists(path): model_gate.exception_data.set_data( "{} is not exist".format(path)) logger.error("{} is not exist".format(path)) return False return True
def record_change(key, last, curr): if last != curr: logger.info('[change] {} changed to {}'.format(key, curr))
def refresh_json_data(self, args=None): def record_change(key, last, curr): if last != curr: logger.info('[change] {} changed to {}'.format(key, curr)) logger.debug('[timer] refresh json data start...') last_settings = deepcopy(model_gate.settings_data) last_widgets = deepcopy(model_gate.dependence_data.widget_data) last_trees = deepcopy(model_gate.dependence_data.tree_data) if not loader.json_parser(True): model_gate.settings_data = deepcopy(last_settings) model_gate.dependence_data.widget_data = deepcopy(last_widgets) model_gate.dependence_data.tree_data = deepcopy(last_trees) del last_settings del last_widgets del last_trees return curr_settings = model_gate.settings_data curr_dependence = model_gate.dependence_data try: logger.change_level(curr_settings.log_level) record_change('log_level', last_settings.log_level, curr_settings.log_level) record_change('tool_alias', last_settings.tool_alias, curr_settings.tool_alias) record_change('tool_version', last_settings.tool_version, curr_settings.tool_version) record_change('keepalive_period', self.ssh_timer.period, curr_settings.keepalive_period) record_change('refresh_json_period', self.json_timer.period, curr_settings.refresh_json_period) record_change("refresh_cache['period']", self.cache_timer.period, curr_settings.refresh_cache['period']) record_change("refresh_cache['scripts']", last_settings.refresh_cache['scripts'], curr_settings.refresh_cache['scripts']) record_change("refresh_file['period']", self.file_timer.period, curr_settings.refresh_file['period']) record_change("refresh_cache['scripts']", last_settings.refresh_file['scripts'], curr_settings.refresh_file['scripts']) self.ssh_timer.update_period(curr_settings.keepalive_period) self.json_timer.update_period(curr_settings.refresh_json_period) self.cache_timer.update_period( curr_settings.refresh_cache['period']) self.file_timer.update_period(curr_settings.refresh_file['period']) if last_settings.tool_alias != curr_settings.tool_alias or \ last_settings.tool_version != curr_settings.tool_version: title = '{} v{}'.format(curr_settings.tool_alias, curr_settings.tool_version) model_gate.app_title_data.set_data((title, None)) logger.info('[change] tool title changed to {}'.format(title)) if last_trees != curr_dependence.tree_data: model_gate.app_trees_data.set_data(curr_dependence.tree_data) logger.info('[change] tool trees changed') if last_widgets != curr_dependence.widget_data: model_gate.app_widgets_data.set_data( curr_dependence.widget_data) logger.info('[change] tool widgets changed') except: logger.error('Exception apply: {}'.format(traceback.format_exc())) del last_settings del last_widgets del last_trees logger.debug('[timer] refresh json data end')
def execute_enter(self, data): ips, script = data self.init(script, True, False) for ip in ips: Common.create_thread(func=self.exec_enter_impl, args=(ip, )) logger.info("execute_enter {} for {}".format(self.task, ip))
def login_server(self, ip_data, pack_path): def ssh_login(): ret, err = SSHUtil.user_login(ssh, user) if not ret: raise Exception("{} {}登录失败\n重试次数:{}\n详细信息:{}".format( ip, user, times, err)) ret, err = SSHUtil.user_login(ssh, 'root') if not ret: raise Exception("{} root登录失败\n重试次数:{}\n详细信息:{}".format( ip, times, err)) def init_server(): # 如果上次登录用户跟这次不一致,会导致后面解压失败; 这里每次登录都清空目录 cmd = "rm -rf {0}/*; mkdir {0}; chmod 777 {0}".format(server_dir) SSHUtil.exec_ret(ssh, cmd, root=True) def upload_package(): remote_path = "{0}/{1}".format(server_dir, Global.G_PACK_ZIP) unzip_cmd = "cd {0} && unzip -o {1} && chmod 777 {0}/*".format( server_dir, Global.G_PACK_ZIP) ret, err = SSHUtil.upload_file(ssh, pack_path, remote_path) if not ret: raise Exception( "{} 登录失败\n重试次数:{}\nUpload package failed:{}".format( ip, times, err)) ret, err = SSHUtil.exec_ret(ssh, unzip_cmd, root=True) if not ret: raise Exception( "{} 登录失败\n重试次数:{}\nDecompression failed:{}".format( ip, times, err)) def post_handle(): # dos2unix dos2unix_cmd = ''' for file in {0}/*.sh do cp $file ${{file}}_tmp cat ${{file}}_tmp | tr -d "\\r" >$file rm ${{file}}_tmp & done '''.format(server_dir) SSHUtil.exec_ret(ssh, dos2unix_cmd) def update_login_data(state): login_state_data[ip] = { 'PWD': [user, upwd, rpwd], 'SSH': ssh, 'STATE': state } model_gate.login_state_data.set_data(login_state_data) ip, user, upwd, rpwd = ip_data login_state_data = model_gate.login_state_data.get_data() retry_times_limit = model_gate.settings_data.retry_times server_dir = model_gate.settings_data.server_dir for times in range(1, retry_times_limit + 1): ssh = SSH(ip, user, upwd, rpwd) update_login_data('LOGGING') try: ssh_login() init_server() upload_package() post_handle() except Exception as e: logger.warn(e) if times == retry_times_limit: update_login_data('FAILED') return False, str(e) logger.info('{} retry login times: {}'.format(ip, times)) continue # login success update_login_data('SUCCESS') logger.info('{} login success'.format(ip)) break return True, None