def run_cmd(self, command, pattern): res_play = self.runner.run(self.host_list, 'shell', command) # 记录日志 execlog = ExecLog(host=self.asset_name_str, cmd=command, user=self.user.username, remote_ip=self.remote_ip, result=self.runner.results) se = get_dbsession() se.add(execlog) newline_pattern = re.compile(r'\n') for k, v in res_play.items(): for host, output in v.items(): output = newline_pattern.sub('<br />', output) if k == 'success': header = "<span style='color: green'>[ %s => %s]</span>\n" % (host, 'Ok') else: header = "<span style='color: red'>[ %s => %s]</span>\n" % (host, 'failed') self.write_message(header) self.write_message(output) self.write_message('\n~o~ Task finished ~o~\n')
def open(self): logger.info('Websocket: Open exec request') role_name = self.get_argument('role', 'sb') self.remote_ip = self.request.headers.get("X-Real-IP") if not self.remote_ip: self.remote_ip = self.request.remote_ip logger.info('Web exec cmd: request user %s' % role_name) # 1.根据角色获取权限 se = get_dbsession() self.role = se.query(PermRole).filter_by(name=role_name).all() # 2.根据用户获取权限 self.perm = get_group_user_perm(se, self.user) # 3.验证用户是否满足权限 roles = self.perm.get('role').keys() if self.role not in roles: self.write_message('No perm that role %s' % role_name) self.close() self.assets = self.perm.get('role').get(self.role).get('asset') # 4.获取用户可用资产 res = gen_resource(se, {'user': self.user, 'asset': self.assets, 'role': self.role}) for r in res: self.host_list.append(r['ip']) # 5.输出可操作资产 self.runner = MyWSRunner(res) message = ', '.join([asset.hostname for asset in self.assets]) self.__class__.clients.append(self) self.write_message(message)
def get(self, *args, **kwargs): ''' 根据指定Log的id查询所有的Ttylog信息 ''' try: log_id = self.get_argument('log_id', None) # 获取数据库中信息 se = get_dbsession() ttylog = se.query(TtyLog) if log_id: ttylog = ttylog.filter_by(log_id=log_id) ttylog = ttylog.order_by(TtyLog.id.desc()).all() if not ttylog: raise HTTPError(404, 'Ttylog Not Found') ttylog_list = [row.to_dict() for row in ttylog] self.set_status(200) self.finish({'message': 'success', 'data': ttylog_list}) except ValueError: logger.error(traceback.format_exc()) self.set_status(400, 'value error') self.finish({'messege': 'value error'}) except HTTPError, http_error: logger.error(traceback.format_exc()) self.set_status(http_error.status_code, http_error.log_message) self.finish({'messege': http_error.log_message})
def get(self, *args, **kwargs): try: # 1获取ID log_id = kwargs.get('log_id') # 2数据库获取信息 if log_id: se = get_dbsession() log = se.query(Log).filter_by(id=log_id).first() if not log: raise HTTPError(404) # 3根据获取数据,整理日志内容 scriptf = log.log_path + '.log' timef = log.log_path + '.time' if os.path.isfile(scriptf) and os.path.isfile(timef): content = renderJSON(scriptf, timef) self.set_status(200) self.finish({'content': content}) else: raise HTTPError(404) else: raise HTTPError(400) except ValueError: logger.error(traceback.format_exc()) self.set_status(400, 'value error') self.finish({'messege': 'value error'}) except HTTPError, http_error: logger.error(traceback.format_exc()) self.set_status(http_error.status_code, http_error.log_message) self.finish({'messege': http_error.log_message})
def save(self, path=LOG_DIR): date = datetime.datetime.now().strftime('%Y%m%d') filename = str(uuid.uuid4()) self.filename = filename filepath = os.path.join(path, 'tty', date, filename + '.zip') if not os.path.isdir(os.path.join(path, 'tty', date)): os.makedirs(os.path.join(path, 'tty', date), mode=0777) while os.path.isfile(filepath): filename = str(uuid.uuid4()) filepath = os.path.join(path, 'tty', date, filename + '.zip') password = str(uuid.uuid4()) try: se = get_dbsession() se.begin() try: zf = zipfile.ZipFile(filepath, 'w', zipfile.ZIP_DEFLATED) zf.setpassword(password) zf.writestr(filename, json.dumps(self.log)) zf.close() record = TermLog(logpath=filepath, logpwd=password, filename=filename, history=json.dumps(self.CMD), timestamp=int(self.recoderStartTime), user_id=self.user) except: record = TermLog(logpath='locale', logpwd=password, log=json.dumps(self.log), filename=filename, history=json.dumps(self.CMD), timestamp=int(self.recoderStartTime), user_id=self.user) se.add(record) se.flush() se.commit() except Exception, e: logger.error(traceback.format_exc()) se.rollback()
def get(self): try: ws_id = self.get_argument('id') se = get_dbsession() EXSITS = False for ws in WebTerminalHandler.clients: if ws.id == int(ws_id): logger.info("Kill log id %s" % ws_id) EXSITS = True ws.on_close() if not EXSITS: raise HTTPError(404, 'WebTerminal Not Found') log = se.query(Log).get(ws_id).to_dict() self.set_status(200) self.finish({'message': 'success', 'data': log}) except ValueError: logger.error(traceback.format_exc()) self.set_status(400, 'value error') self.finish({'messege': 'value error'}) except HTTPError, http_error: logger.error(traceback.format_exc()) self.set_status(http_error.status_code, http_error.log_message) self.finish({'messege': http_error.log_message})
def get(self): # 验证link是否有效 link_id = self.get_argument('link_id', None) if not link_id: self.set_status(404) self.write("""<html> <head> <title> 下载文件结果 </title> </head> <body> 文件下载失败,请检查下载连接是否正确或者重新进行下载。 </body> </html>""") self.finish() return se = get_dbsession() fd = se.query(FileDownload).filter( FileDownload.id == link_id, FileDownload.status == True).first() if not fd: self.set_status(404) self.write("""<html> <head> <title> 下载文件结果 </title> </head> <body> 文件下载失败,请检查下载连接是否正确或者重新进行下载。 </body> </html>""") self.finish() return ftp_file_path = fd.link file_name = fd.link.split(os.sep)[-1] se.begin() fd.status = False se.commit() content_length = self.get_content_size(ftp_file_path) self.set_header("Content-Length", content_length) self.set_header("Content-Type", "application/octet-stream") self.set_header( "Content-Disposition", "attachment;filename=\"{0}\"".format(file_name)) #设置新的文件名 content = self.get_content(ftp_file_path) if isinstance(content, bytes): content = [content] for chunk in content: try: self.write(chunk) yield self.flush() except iostream.StreamClosedError: break return
def get(self): # 验证link是否有效 link_id = self.get_argument('link_id', None) if not link_id: self.set_status(404) self.write("""<html> <head> <title> 下载文件结果 </title> </head> <body> 文件下载失败,请检查下载连接是否正确或者重新进行下载。 </body> </html>""") self.finish() return se = get_dbsession() fd = se.query(FileDownload).filter(FileDownload.id == link_id, FileDownload.status == True).first() if not fd: self.set_status(404) self.write("""<html> <head> <title> 下载文件结果 </title> </head> <body> 文件下载失败,请检查下载连接是否正确或者重新进行下载。 </body> </html>""") self.finish() return ftp_file_path = fd.link file_name = fd.link.split(os.sep)[-1] se.begin() fd.status = False se.commit() content_length = self.get_content_size(ftp_file_path) self.set_header("Content-Length", content_length) self.set_header("Content-Type", "application/octet-stream") self.set_header("Content-Disposition", "attachment;filename=\"{0}\"".format(file_name)) #设置新的文件名 content = self.get_content(ftp_file_path) if isinstance(content, bytes): content = [content] for chunk in content: try: self.write(chunk) yield self.flush() except iostream.StreamClosedError: break return
def __init__(self, *args, **kwargs): self.term = None self.log_file_f = None self.log_time_f = None self.log = None self.id = 0 self.sendlog2browser = False self.user = None self.ssh = None self.channel = None self.threads = [] self.se = get_dbsession() super(WebTerminalHandler, self).__init__(*args, **kwargs)
def get_log(self): """ Logging user command and output. 记录用户的日志 """ tty_log_dir = os.path.join(LOG_DIR, 'tty') date_today = datetime.datetime.now() date_start = date_today.strftime('%Y%m%d') time_start = date_today.strftime('%H%M%S') today_connect_log_dir = os.path.join(tty_log_dir, date_start) log_file_path = os.path.join(today_connect_log_dir, '%s_%s_%s' % (self.user_id, self.node.id, time_start)) try: mkdir(os.path.dirname(today_connect_log_dir), mode=0777) mkdir(today_connect_log_dir, mode=0777) except OSError: logger.debug(u'创建目录 %s 失败,请修改%s目录权限' % (today_connect_log_dir, tty_log_dir)) raise ServerError(u'创建目录 %s 失败,请修改%s目录权限' % (today_connect_log_dir, tty_log_dir)) try: log_file_f = open(log_file_path + '.log', 'a') log_time_f = open(log_file_path + '.time', 'a') except IOError: logger.debug(u'创建tty日志文件失败, 请修改目录%s权限' % today_connect_log_dir) raise ServerError(u'创建tty日志文件失败, 请修改目录%s权限' % today_connect_log_dir) if self.login_type == 'ssh': # 如果是ssh连接过来,记录connect.py的pid,web terminal记录为日志的id pid = os.getpid() self.remote_ip = remote_ip # 获取远端IP else: pid = 0 log = Log(user_id=self.user_id, node_id=self.node.id, remote_ip=self.remote_ip, login_type=self.login_type, log_path=log_file_path, start_time=date_today, pid=pid) log_id = None se = get_dbsession() if self.login_type == 'web': log.pid = log.id # 设置log id为websocket的id, 然后kill时干掉websocket try: se.begin() se.add(log) se.commit() log_id = log.id except: se.rollback() finally: se.flush() se.close() log_file_f.write('Start at %s\r\n' % datetime.datetime.now()) return log_file_f, log_time_f, log_id
def get(self, *args, **kwargs): ''' ''' try: id = kwargs.get('task_id') se = get_dbsession() task = se.query(Apscheduler_Task).get(id) self.set_status(200) self.finish({'content': task.result}) except HTTPError, http_error: logger.error(traceback.format_exc()) self.set_status(http_error.status_code, http_error.log_message) self.finish({'messege': http_error.log_message})
def wrapper(**kwargs): se = None task_id = None try: # insert databse logger.info('task [{0}] start'.format(kwargs.get('job_id'))) se = get_dbsession() se.begin() ap_task = Apscheduler_Task(job_id=kwargs.get('job_id')) se.add(ap_task) se.flush() se.commit() task_id = ap_task.id # exec task func result = func(**kwargs) # update database logger.info('task [{0}] end'.format(kwargs.get('job_id'))) uap_task = Apscheduler_Task(id=task_id, end_time=datetime.datetime.now(), is_finished=True, status='complete', result=result) se.begin() se.merge(uap_task) se.flush() se.commit() except: logger.error(traceback.format_exc()) se.rollback() if task_id: uap_task = Apscheduler_Task(id=task_id, end_time=datetime.datetime.now(), is_finished=True, status='failed', result=traceback.format_exc()) se.begin() se.merge(uap_task) se.flush() se.commit() finally: if se: se.close()
def put(self, *args, **kwargs): """ 修改服务器 """ try: system_id = kwargs.get('system_id') params = json.loads(self.request.body) interfaces = params.get("interfaces", {}) system = System() system.modify(system_id, params) id_unique = params.pop("id_unique") ip = None # 暂时不考虑多网卡,故只取一个IP for k, inter_params in interfaces.items(): ip = inter_params.get('ip_address') # 修改数据库中参数 update_db = True if ip: se = None try: se = get_dbsession() se.begin() node = Node(id=id_unique, ip=ip) se.merge(node) se.commit() except: update_db = False se.rollback() finally: se.flush() se.close() self.set_status(200, 'success') self.finish({'messege': 'success', 'update_db': update_db}) except ValueError: logger.error(traceback.format_exc()) self.set_status(400, 'value error') self.finish({'messege': 'value error'}) except HTTPError, http_error: logger.error(traceback.format_exc()) self.set_status(http_error.status_code, http_error.log_message) self.finish({'messege': http_error.log_message})
def post(self, *args, **kwargs): ''' ''' try: task_kwargs = {} params = json.loads(self.request.body) app_uuid = task_kwargs['app_uuid'] = params.get('app_uuid') # 检查参数 self._check_params(params) se = get_dbsession() app = se.query(App).get(app_uuid) if not app: self.set_status(404) self.finish({'messege': 'app not exist, please upload app.'}) task_kwargs['host_list'] = params.get('host_list') task_kwargs['group_vars'] = params.get('group_vars') task_kwargs['groups'] = params.get('groups') # 从数据库获取playbookdir及playbooks列表 task_kwargs['playbook_basedir'] = app.basedir task_kwargs['playbooks'] = app.playbooks.strip(',').split(',') job_id = params.get('job_id', None) # 添加调度任务,本地生成id,为后续任务处理保证唯一,如果指定ID,则使用指定ID job_id = job_id if job_id else str(uuid.uuid1()) task_kwargs['job_id'] = job_id logger.info("add job:\n id-[{0}]\n ".format(job_id)) job = SCHEDULER.add_job(TASK['playbooks'], 'date', kwargs=task_kwargs, id=task_kwargs['job_id'], ) self.finish( {'message': 'add success', 'job': self._get_job_info(job)}) except ValueError, error: logger.error(traceback.format_exc()) self.set_status(400, error.message) self.finish({'messege': error.message})
def delete(self, *args, **kwargs): """ 删除服务器 """ se = None try: params = json.loads(self.request.body) system = System() system_names = params.get('names', None) id_unique = params.pop("id_unique") error_info = system.delete(system_names) self.set_status(200) if error_info: self.finish({'messege': error_info}) else: delete_db = True try: se = get_dbsession() se.begin() node = Node(id=id_unique) se.delete(node) se.commit() except: se.rollback() delete_db = False finally: se.flush() se.close() self.finish({'messege': 'success', 'delete_db': delete_db}) except ValueError: logger.error(traceback.format_exc()) self.set_status(400, 'value error') self.finish({'messege': 'value error'}) except HTTPError, http_error: logger.error(traceback.format_exc()) self.set_status(http_error.status_code, http_error.log_message) self.finish({'messege': http_error.log_message})
def get(self, *args, **kwargs): try: app_uuid = kwargs.get('app_uuid') # 查询过滤 if not app_uuid: self.set_status(400, 'app_uuid required') self.finish({"message": 'app_uuid required'}) return se = get_dbsession() app = se.query(App).filter(App.uuid == app_uuid).first() if not app: self.set_status(404) self.finish({"message": 'app not exists'}) return self.set_status(200) self.finish({"message": "ok", "app": app.to_dict()}) except HTTPError, http_error: logger.error(traceback.format_exc()) self.set_status(http_error.status_code, http_error.log_message) self.finish({'messege': http_error.log_message})
def get(self, *args, **kwargs): try: log_id = kwargs.get('log_id', '') se = get_dbsession() if log_id: log_info = se.query(ExecLog).get(log_id) if not log_info: raise HTTPError(404, "Log Not Found") self.set_status(200) self.finish({'messege': 'success', 'data': log_info.to_dict()}) except ValueError: logger.error(traceback.format_exc()) self.set_status(400, 'value error') self.finish({'messege':'value error'}) except HTTPError as http_error: logger.error(traceback.format_exc()) self.set_status(http_error.status_code, http_error.log_message) self.finish({'messege':http_error.log_message}) except: logger.error(traceback.format_exc()) self.set_status(500, 'failed') self.finish({'messege':'failed'})
def get(self, *args, **kwargs): try: # 获取某条日志信息 log_id = kwargs.get('log_id', None) se = get_dbsession() if log_id: log_info = se.query(Log).get(log_id) if not log_info: raise HTTPError(404, "Log Not Found") self.set_status(200) self.finish({'message': 'success', 'data': log_info.to_dict()}) return count = self.get_argument('count', False) if count: cnt = se.query(Log).count() self.set_status(200) self.finish({'message': 'success', 'count': cnt}) return # 查询日志信息,分页 pageno = int(self.get_argument('pageno', 1)) pagesize = int(self.get_argument('pagesize', 10)) # 获取数据库中信息 log = se.query(Log).order_by(Log.id.desc()).offset((pageno - 1) * pagesize).limit(pagesize).all() log_list = [row.to_dict() for row in log] self.set_status(200) self.finish({'message': 'success', 'data': log_list}) except ValueError: logger.error(traceback.format_exc()) self.set_status(400, 'value error') self.finish({'messege': 'value error'}) except HTTPError, http_error: logger.error(traceback.format_exc()) self.set_status(http_error.status_code, http_error.log_message) self.finish({'messege': http_error.log_message})
def post(self, *args, **kwargs): """ 创建服务器 """ se = None try: params = json.loads(self.request.body) # 本地数据记录,用户ssh登录 se = get_dbsession() se.begin() id_unique = params.pop("id_unique") interfaces = params.get("interfaces", {}) # 暂时不考虑多网卡,故只取一个IP ip = None for k, inter_params in interfaces.items(): ip = inter_params.get('ip_address') if not id_unique or not ip: raise ValueError("id_unique and ip is mandatory ") node = Node(id=id_unique, ip=ip) se.add(node) # 创建节点 system = System() system.create(params) se.commit() self.set_status(200, 'success') self.finish({'messege': 'created'}) except ValueError: se.rollback() logger.error(traceback.format_exc()) self.set_status(400, 'value error') self.finish({'messege': 'value error'}) except HTTPError, http_error: se.rollback() logger.error(traceback.format_exc()) self.set_status(http_error.status_code, http_error.log_message) self.finish({'messege': http_error.log_message})
def _upload_action(self, *args, **kwargs): """ 上传部署文件 1. 上传文件放入到上传目录 2. 解压文件放到相应目录 3. 数据库中记录下信息 """ filepath = None file_metas = self.request.files['file'] # 提取表单中name为file的文件元数据 for meta in file_metas: filename = meta['filename'].split(os.path.sep)[-1] filepath = os.path.join(UPLOAD_PATH, filename) with open(filepath, 'wb') as up: up.write(meta['body']) app_uuid = self.get_argument('app_uuid') if not zipfile.is_zipfile(filepath): raise HTTPError(status_code=400, log_message='file type must be zip') uuid_path = os.sep.join([ANSIBLE_PLAYBOOK_PATH, app_uuid]) z = ZFile(filepath) z.extract_to(uuid_path) z.close() # 存入数据库 se = get_dbsession() se.begin() app = App(uuid=app_uuid, desc=self.get_argument('desc'), type=self.get_argument('type'), basedir=os.sep.join([app_uuid, self.get_argument('basedir')]), playbooks=self.get_argument('playbooks') ) se.merge(app) se.commit()
def post(self, *args, **kwargs): try: try: # 下载 params = json.loads(self.request.body) except ValueError: # 上传 action = self.get_argument("action") params = {'action': action} logger.info("params => {0}".format(params)) if params.get('action') == 'download': ftp_file_path = params.get('file_path') file_name = os.sep.join([DOWNLOAD_PATH, params.get('file_name')]) # 创建ftp连接 ftp = MyFTP(host=params.get('ftp_host'), port=int(params.get('ftp_port', 21)), user=params.get('ftp_user'), passwd=params.get('ftp_pwd'), timeout=2000) ftp.download(ftp_file_path, file_name) ftp.close() fd = FileDownload(link=file_name) se = get_dbsession() se.begin() se.add(fd) se.commit() self.set_status(200, 'ok') self.finish({'link': fd.id}) elif params.get('action') == 'download_ansible': """resource/host_list""" logger.info("download_ansible params ==> {0}".format(params)) params['job_id'] = str(uuid.uuid1()) path = params.get('path') module_name = 'fetch' module_args = 'src=%s dest=%s' % (path, DOWNLOAD_PATH) host_list = params.get('host_list') # 如果成功,实际目录为 link = os.sep.join([DOWNLOAD_PATH, host_list[0], path]) runner = AnsibleRunner(**params) result = json.loads(runner.run_play(host_list, module_name, module_args)) result = sorted(result.items(), key=lambda x: x[0]) rsx = "" for t, line in result: # 筛选符合的字符串 rsx += line # 根据字符串结果进行匹配 rs = re.findall(r'([\d\.]*) \| ([\w]*) => (.*)', rsx, re.S) ip, rs1, info = rs[0] msg = "success" info = json.loads(info.replace('\r\n', '')) if info.get('msg'): msg = info.get('msg') self.set_status(404, 'Not Found') self.finish({'message': msg}) else: fd = FileDownload(link=link) se = get_dbsession() se.begin() se.add(fd) se.commit() self.set_status(200, 'ok') self.finish({'link': fd.id, 'message': msg}) else: file_metas = self.request.files['file'] # 提取表单中name为file的文件元数据 for meta in file_metas: filename = meta['filename'].split(os.path.sep)[-1] filepath = os.path.join(UPLOAD_PATH, filename) # 有些文件需要已二进制的形式存储,实际中可以更改 with open(filepath, 'wb') as up: up.write(meta['body']) self.set_status(200, 'ok') self.finish({'fp': filepath}) except ValueError: logger.error(traceback.format_exc()) self.set_status(400, 'value error') self.finish({'messege': 'value error'}) except HTTPError, http_error: logger.error(traceback.format_exc()) self.set_status(http_error.status_code, http_error.log_message) self.finish({'messege': http_error.log_message})
def post(self, *args, **kwargs): try: try: # 下载 params = json.loads(self.request.body) except ValueError: # 上传 action = self.get_argument("action") params = {'action': action} logger.info("params => {0}".format(params)) if params.get('action') == 'download': ftp_file_path = params.get('file_path') file_name = os.sep.join( [DOWNLOAD_PATH, params.get('file_name')]) # 创建ftp连接 ftp = MyFTP(host=params.get('ftp_host'), port=int(params.get('ftp_port', 21)), user=params.get('ftp_user'), passwd=params.get('ftp_pwd'), timeout=2000) ftp.download(ftp_file_path, file_name) ftp.close() fd = FileDownload(link=file_name) se = get_dbsession() se.begin() se.add(fd) se.commit() self.set_status(200, 'ok') self.finish({'link': fd.id}) elif params.get('action') == 'download_ansible': """resource/host_list""" logger.info("download_ansible params ==> {0}".format(params)) params['job_id'] = str(uuid.uuid1()) path = params.get('path') module_name = 'fetch' module_args = 'src=%s dest=%s' % (path, DOWNLOAD_PATH) host_list = params.get('host_list') # 如果成功,实际目录为 link = os.sep.join([DOWNLOAD_PATH, host_list[0], path]) runner = AnsibleRunner(**params) result = json.loads( runner.run_play(host_list, module_name, module_args)) result = sorted(result.items(), key=lambda x: x[0]) rsx = "" for t, line in result: # 筛选符合的字符串 rsx += line # 根据字符串结果进行匹配 rs = re.findall(r'([\d\.]*) \| ([\w]*) => (.*)', rsx, re.S) ip, rs1, info = rs[0] msg = "success" info = json.loads(info.replace('\r\n', '')) if info.get('msg'): msg = info.get('msg') self.set_status(404, 'Not Found') self.finish({'message': msg}) else: fd = FileDownload(link=link) se = get_dbsession() se.begin() se.add(fd) se.commit() self.set_status(200, 'ok') self.finish({'link': fd.id, 'message': msg}) else: file_metas = self.request.files['file'] # 提取表单中name为file的文件元数据 for meta in file_metas: filename = meta['filename'].split(os.path.sep)[-1] filepath = os.path.join(UPLOAD_PATH, filename) # 有些文件需要已二进制的形式存储,实际中可以更改 with open(filepath, 'wb') as up: up.write(meta['body']) self.set_status(200, 'ok') self.finish({'fp': filepath}) except ValueError: logger.error(traceback.format_exc()) self.set_status(400, 'value error') self.finish({'messege': 'value error'}) except HTTPError, http_error: logger.error(traceback.format_exc()) self.set_status(http_error.status_code, http_error.log_message) self.finish({'messege': http_error.log_message})
def get_log(self): """ Logging user command and output. 记录用户的日志 """ tty_log_dir = os.path.join(LOG_DIR, 'tty') date_today = datetime.datetime.now() date_start = date_today.strftime('%Y%m%d') time_start = date_today.strftime('%H%M%S') today_connect_log_dir = os.path.join(tty_log_dir, date_start) log_file_path = os.path.join( today_connect_log_dir, '%s_%s_%s' % (self.user_id, self.node.id, time_start)) try: mkdir(os.path.dirname(today_connect_log_dir), mode=0777) mkdir(today_connect_log_dir, mode=0777) except OSError: logger.debug(u'创建目录 %s 失败,请修改%s目录权限' % (today_connect_log_dir, tty_log_dir)) raise ServerError(u'创建目录 %s 失败,请修改%s目录权限' % (today_connect_log_dir, tty_log_dir)) try: log_file_f = open(log_file_path + '.log', 'a') log_time_f = open(log_file_path + '.time', 'a') except IOError: logger.debug(u'创建tty日志文件失败, 请修改目录%s权限' % today_connect_log_dir) raise ServerError(u'创建tty日志文件失败, 请修改目录%s权限' % today_connect_log_dir) if self.login_type == 'ssh': # 如果是ssh连接过来,记录connect.py的pid,web terminal记录为日志的id pid = os.getpid() self.remote_ip = remote_ip # 获取远端IP else: pid = 0 log = Log(user_id=self.user_id, node_id=self.node.id, remote_ip=self.remote_ip, login_type=self.login_type, log_path=log_file_path, start_time=date_today, pid=pid) log_id = None se = get_dbsession() if self.login_type == 'web': log.pid = log.id # 设置log id为websocket的id, 然后kill时干掉websocket try: se.begin() se.add(log) se.commit() log_id = log.id except: se.rollback() finally: se.flush() se.close() log_file_f.write('Start at %s\r\n' % datetime.datetime.now()) return log_file_f, log_time_f, log_id
def get(self, *args, **kwargs): ''' 单个job执行结果,支持参数 limit offset 给定job_id,查询结果如下 [ { id:'', start_time:'', end_time:'', status:'', result:'', }, { id:'', start_time:'', end_time:'', status:'', result:'', }, ... ] 如果不指定,则查询所有job信息 # todo { job_id1:{ last_exec_time: detail_url: }, job_id2:{ } ... } ''' try: job_id = kwargs.get('job_id') # 查询过滤 if job_id: result = {} # 获取分页信息 limit = int(self.get_argument('limit', 10)) offset = int(self.get_argument('offset', 0)) page = int(self.get_argument('page', 1)) # offset = (page - 1) * limit se = get_dbsession() tasks = se.query(Apscheduler_Task).filter( Apscheduler_Task.job_id == job_id).order_by( desc(Apscheduler_Task.id)) # 总条数 total_count = tasks.count() result['total_count'] = total_count logger.info('job [{0}] total_count [{1}]'.format( job_id, total_count)) tasks = tasks.limit(limit) if offset > 0: tasks = tasks.offset(offset) # 查看任务配置触发器已经完全失效,通过查看apscheduler的表中是否存在 job = se.execute( "select * from apscheduler_jobs where id = '{0}'".format( job_id)).first() result['job'] = {'next_run_time': job[1]} if job else () result['tasks'] = [task.to_dict() for task in tasks] self.finish({"message": 'get job success', "result": result}) else: pass except HTTPError, http_error: logger.error(traceback.format_exc()) self.set_status(http_error.status_code, http_error.log_message) self.finish({'messege': http_error.log_message})