Exemple #1
0
def log_error():
    req = request.values
    page = int(req.get("p", 1))
    date_from = req.get("date_from", DateHelper.getCurrentTime(fmt="%Y-%m-%d"))
    date_to = req.get("date_to", DateHelper.getCurrentTime(fmt="%Y-%m-%d"))
    query = AppErrLog.query.filter(
        AppErrLog.created_time.between(date_from, date_to + " 23:59:59"))

    page_params = {
        "total": query.count(),
        "page_size": CommonConstant.PAGE_SIZE,
        "page": page,
        "display": CommonConstant.PAGE_DISPLAY,
    }

    pages = UtilHelper.iPagination(page_params)
    offset = (page - 1) * CommonConstant.PAGE_SIZE
    limit = CommonConstant.PAGE_SIZE * page
    list = query.order_by(AppErrLog.id.desc())[offset:limit]

    sc = {'date_from': date_from, 'date_to': date_to}
    return UtilHelper.renderView("home/log/error.html", {
        "list": list,
        "pages": pages,
        "sc": sc
    })
Exemple #2
0
def tools_alert():
    req = request.values
    page = int(req.get("p", 1))
    date_from = req.get("date_from", DateHelper.getCurrentTime(fmt="%Y-%m-%d"))
    date_to = req.get("date_to", DateHelper.getCurrentTime(fmt="%Y-%m-%d"))
    status = int(req.get("status", CommonConstant.default_status_neg_99))

    query = JobAlertList.query.filter(
        JobAlertList.created_time.between(date_from, date_to + " 23:59:59"))
    if status > CommonConstant.default_status_neg_99:
        query = query.filter_by(status=status)

    page_params = {
        "total": query.count(),
        "page_size": CommonConstant.PAGE_SIZE,
        "page": page,
        "display": CommonConstant.PAGE_DISPLAY,
    }

    pages = UtilHelper.iPagination(page_params)
    offset = (page - 1) * CommonConstant.PAGE_SIZE
    limit = CommonConstant.PAGE_SIZE * page
    list = query.order_by(JobAlertList.id.desc())[offset:limit]
    data = []
    if list:
        job_ids = ModelHelper.getFieldList(list, "job_id")
        job_map = ModelHelper.getDictFilterField(JobList,
                                                 select_field=JobList.id,
                                                 id_list=job_ids.sort())
        for item in list:
            tmp_data = ModelHelper.model2Dict(item)
            tmp_job_info = ModelHelper.model2Dict(
                job_map.get(tmp_data['job_id']))
            tmp_data['status_desc'] = CommonConstant.common_status_map4[
                tmp_data['status']]
            tmp_data['job_name'] = tmp_job_info['name']

            data.append(tmp_data)

    sc = {'date_from': date_from, 'date_to': date_to, 'status': status}

    return UtilHelper.renderView(
        "home/job/tools/alert.html", {
            "list": data,
            "pages": pages,
            "sc": sc,
            "status_map": CommonConstant.common_status_map4,
            "current": "alert"
        })
Exemple #3
0
def home_index():
    date = DateHelper.getCurrentTime(fmt="%Y-%m-%d")
    job_count = JobList.query.filter_by(is_del=CommonConstant.default_status_false).count()
    server_count = JobServer.query.filter_by(status=CommonConstant.default_status_true).count()
    alert_count = JobAlertList.query.filter(JobAlertList.created_time.between(date, date + " 23:59:59")).count()

    cate_map = ModelHelper.getDictFilterField( JobCategory )
    cat_job_map = {}
    cat_job_list = JobList.query.with_entities(JobList.cate_id ,func.count( JobList.id) )\
        .filter_by(is_del=CommonConstant.default_status_false)\
        .group_by(JobList.cate_id).all()
    if cat_job_list:
        for _item in cat_job_list:
            cat_job_map[ _item[0] ] = _item[1]

    type_job_map = {}
    job_type_map = CommonConstant.job_type_map
    type_job_list = JobList.query.with_entities(JobList.job_type, func.count(JobList.id)) \
        .filter_by(is_del=CommonConstant.default_status_false) \
        .group_by(JobList.job_type).all()
    if type_job_list:
        for _item in type_job_list:
            type_job_map[ _item[0] ] = _item[1]

    return UtilHelper.renderView("home/index/index.html", {
        "job_count": job_count,
        "server_count": server_count,
        "alert_count": alert_count,
        'cate_map' : cate_map,
        'cat_job_map' : cat_job_map,
        'job_type_map' : job_type_map,
        'type_job_map' : type_job_map,
    })
Exemple #4
0
    def getReleaseVersion():
        ver = "%s" % (DateHelper.getCurrentTime("%Y%m%d%H%M%S%f"))
        release_path = app.config.get('RELEASE_PATH')
        if release_path and os.path.exists(release_path):
            with open(release_path, 'r') as f:
                ver = f.readline()

        return ver
Exemple #5
0
def job_info():
    req = request.values
    id = int(req['id']) if ('id' in req and req['id']) else 0
    info = JobList.query.filter_by(id=id).first()
    if not info:
        return redirect(GlobalUrlService.buildHomeUrl("/job/index/index"))

    info = ModelHelper.model2Dict(info)

    server_info = JobServer.query.filter_by(id=info['server_id']).first()
    cate_info = JobCategory.query.filter_by(id=info['cate_id']).first()
    server_env_map = CommonConstant.server_env_map
    run_status_map = CommonConstant.run_status_map

    info['next_run_time'] = DateHelper.getDateOnTimestamps(
        info['next_run_time'], '%Y-%m-%d %H:%M')
    info['env_name'] = server_env_map.get(info['env_id'])
    info['run_status_desc'] = run_status_map.get(info['run_status'])
    info['job_status_desc'] = job_status_map.get(info['status'])
    info['server_name'] = server_info.name
    info['cate_name'] = cate_info.name if cate_info else ''
    info['run_interval_desc'] = DateHelper.formatBeautyTime(
        info['run_interval'] * 60)

    user_map = ModelHelper.getDictFilterField(
        User,
        select_field=User.id,
        id_list=[info['owner_uid'], info['relate_uid']])

    ##获取最近5天运行记录
    log_list = JobRunLog.query.filter_by(job_id=id).order_by(
        JobRunLog.id.desc())[0:5]
    log_data = []
    if log_list:
        for item in log_list:
            tmp_data = ModelHelper.model2Dict(item)
            tmp_data['status_desc'] = CommonConstant.job_log_status_map[
                tmp_data['status']]
            tmp_data['duration'] = ""
            if DateHelper.getCurrentTime(date=tmp_data['end_time']
                                         ) == CommonConstant.DEFAULT_DATETIME:
                tmp_data['end_time'] = "未知"
                tmp_data['duration'] = time.time() - time.mktime(
                    tmp_data['start_time'].timetuple())
            else:
                tmp_data['duration'] = tmp_data['end_time'].timestamp(
                ) - tmp_data['start_time'].timestamp()
            tmp_data['duration'] = DateHelper.formatBeautyTime(
                tmp_data['duration'])
            log_data.append(tmp_data)

    return UtilHelper.renderView(
        "home/job/index/info.html", {
            "info": info,
            "log_list": log_data,
            "user_map": user_map,
            "job_level_map": CommonConstant.job_level_map,
        })
Exemple #6
0
 def updateRunLog(log_id=0, max_mem=0, status=0):
     params = {
         "end_time": DateHelper.getCurrentTime(),
         "max_mem": max_mem,
         "status": status
     }
     JobRunLog.query.filter_by(id=log_id).update(dict(params))
     db.session.commit()
     return True
Exemple #7
0
def index():
    return "APP_NAME:{0} VERSION:{1} APP:API {2}"\
        .format( app.config.get("APP_NAME"),app.config.get("APP_VERSION"), DateHelper.getCurrentTime() )
Exemple #8
0
def tools_log():
    status_map = CommonConstant.job_log_status_map

    req = request.values
    page = int(req.get("p", 1))
    date_from = req.get("date_from", DateHelper.getCurrentTime(fmt="%Y-%m-%d"))
    date_to = req.get("date_to", DateHelper.getCurrentTime(fmt="%Y-%m-%d"))
    status = int(req.get("status", CommonConstant.default_status_neg_99))
    job_id = int(req.get("job_id", CommonConstant.default_status_false))

    query = JobRunLog.query.filter(
        JobRunLog.created_time.between(date_from, date_to + " 23:59:59"))

    if job_id:
        query = query.filter_by(job_id=job_id)

    if status > CommonConstant.default_status_neg_99:
        query = query.filter_by(status=status)

    page_params = {
        "total": query.count(),
        "page_size": CommonConstant.PAGE_SIZE,
        "page": page,
        "display": CommonConstant.PAGE_DISPLAY,
    }

    pages = UtilHelper.iPagination(page_params)
    offset = (page - 1) * CommonConstant.PAGE_SIZE
    limit = CommonConstant.PAGE_SIZE * page
    list = query.order_by(JobRunLog.id.desc())[offset:limit]
    data = []
    if list:
        job_ids = ModelHelper.getFieldList(list, "job_id")
        job_map = ModelHelper.getDictFilterField(JobList,
                                                 select_field=JobList.id,
                                                 id_list=job_ids.sort())
        for item in list:
            tmp_data = ModelHelper.model2Dict(item)
            tmp_job_info = ModelHelper.model2Dict(
                job_map.get(tmp_data['job_id']))
            tmp_data['status_desc'] = status_map[tmp_data['status']]
            tmp_data['job_name'] = tmp_job_info['name']
            tmp_data['duration'] = ""
            if DateHelper.getCurrentTime(date=tmp_data['end_time']
                                         ) == CommonConstant.DEFAULT_DATETIME:
                tmp_data['end_time'] = "未知"
                tmp_data['duration'] = time.time() - time.mktime(
                    tmp_data['start_time'].timetuple())
            else:
                tmp_data['duration'] = (tmp_data['end_time'].timestamp() -
                                        tmp_data['start_time'].timestamp())
            tmp_data['duration'] = DateHelper.formatBeautyTime(
                tmp_data['duration'])

            data.append(tmp_data)

    sc = {
        'date_from': date_from,
        'date_to': date_to,
        'status': status,
        'job_id': job_id
    }

    return UtilHelper.renderView(
        "home/job/tools/log.html", {
            "list": data,
            "pages": pages,
            "sc": sc,
            "status_map": status_map,
            "current": "log"
        })
Exemple #9
0
    def run(self, params):
        pid_path = self.getPidPath('dispatch.pid')
        if self.checkPidExist(pid_path):
            app.logger.info("[core] dispatch is running")
            return False

        pid = str(os.getpid())
        if not self.setPidFile(pid_path, pid):
            err_msg = self.getErrMsg()
            app.logger.info("Cann't get a lock file,err msg : " + err_msg)
            return False


        params = self.getEnvFile()
        server_id = params['id']
        host = params['name']
        list = JobList.query.filter_by( server_id = server_id,status = CommonConstant.default_status_true,
                                        run_status = CommonConstant.default_status_true,is_del = CommonConstant.default_status_false ).all()

        if not list:
            app.logger.info("没有数据需要调度~~")
            return True

        for t in list:

            ##调度时间是否到了,应该加入到SQL查询中查询
            if t.next_run_time >= time.time():
                app.logger.info( "job_id:%s,运行时间未到"%( t.id ) )
                continue

            # 启子进程中运行
            app.logger.info( "get a task: job_id:%s,运行时间:%s" % (t.id,DateHelper.getCurrentTime()) )
            '''
            子进程复制一份父进程内存给自己,两个进程之间的执行是相互独立的,其执行顺序可以是不确定的、随机的、不可预测的
            多进程(os.fork())下,子进程会继承父进程的连接,所以会有问题.先销毁已有的engine,确保父进程没有数据库连接
            相关错误:Mysql server has gone away
            '''
            self.closePoolDB()

            pid = os.fork()
            if pid == 0: #子进程,这里是一个独立进程(在复制出来的那一刻 所有变量都会共享到子进程),所以写代码 就要感觉在一个独立方法中
                self.closePoolDB()
                job_id = t.id
                job_pid_file = self.getPidPath( 'job_%s.pid' % job_id )
                if self.checkPidExist(job_pid_file):
                    app.logger.info("job_id:%s is running on  %s" % (job_id, host))
                    return 0

                ## 建立具体job的pid 文件,防止重复运行,这个进程id其实记录的是不对的,应该使用下面的
                tmp_pid = str(os.getpid())
                if not self.setPidFile(job_pid_file, tmp_pid):
                    app.logger.info("job_id:%s 不能建立pid,path:%s,msg:%s" % (job_id, job_pid_file, self.getErrMsg()))
                    return True

                app.logger.info("job_id:%s 建立pid,子进程pid:%s" % (job_id, tmp_pid))


                ## 更新job为运行中
                try:
                    tmp_affect_rows = JobList.query.filter_by( id = job_id,run_status = CommonConstant.default_status_true )\
                        .update( dict( run_status = CommonConstant.default_status_pos_2 ) )
                    db.session.commit()
                    if tmp_affect_rows < 1:
                        app.logger.info("job_id:%s不能得到lock,任务已经运行中" % job_id)
                        return False
                except:
                    app.logger.info( "job_id:%s不能得到锁状态,err:%s" % (job_id,str( sys.exc_info() ) ) )

                ##写入一条调度日志
                tmp_log_id = 0
                try:
                    tmp_log_params = {
                        "job_id":job_id,
                        "server_id":server_id,
                        "server_name":host,
                        "status": CommonConstant.default_status_neg_1,
                        "start_time":DateHelper.getCurrentTime()
                    }
                    tmp_log_id = JobService.addRunLog( tmp_log_params )
                except :
                    pass

                self.closePoolDB()

                tmp_job_run_start_time = time.time()  # job开始运行的时间
                # t.command无法获取job内部输出的内容,我们需要按行读取或者按buffer读取的
                # status = os.system(t.command)>>8#status, output = commands.getstatusoutput(t.command)
                # 可以加 bufsize = -1 表示使用系统默认缓存
                # 命令前面加一下前缀,方便搜索关键词查找
                # 创建子进程后子进程不结束 https://bbs.csdn.net/topics/390596479?_=1515055076
                tmp_command = t.command
                tmp_command = "tmp_job_%s='%s' && %s"%( job_id,DateHelper.getCurrentTime(),tmp_command)
                #如果想要达到2>&1 可以设置为stdout=subprocess.PIPE,stderr=subprocess.STDOUT
                sp = subprocess.Popen(tmp_command, bufsize = -1, shell = True, close_fds=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT)

                tmp_run_job_pid = sp.pid

                #上面存的关于job的进程id 是不对的,这里在覆盖一次
                self.coverPidFile(job_pid_file,tmp_run_job_pid)
                '''
                    如果是包裹了一层runshell的需要找到进程的子进程pid,然后在查看内存
                    tmp_pid 是目前子进程的进程号
                    tmp_run_job_pid 是目前 subprocess启动的进程好
                    真真运行的进程号,需要通过关键词查询,所以在命令前面加了特别东西
                '''
                app.logger.info( "job_id:%s 启动Job脚本,pid:%s" % (job_id, tmp_run_job_pid))

                ##统计内存占用量
                tmp_max_job_used_mem = tmp_job_used_mem = UtilHelper.getUsedMemory( tmp_run_job_pid )
                app.logger.info("job_id:%s 日志start-------" %job_id )
                '''
                sp.poll() 返回值
                0 正常结束
                1 sleep
                2 子进程不存在
                -15 kill
                None 在运行
                判断进程状态
                '''
                while sp.poll() is None:
                    ##统计内存占用量
                    tmp_job_used_mem = UtilHelper.getUsedMemory( tmp_run_job_pid )
                    if tmp_job_used_mem > tmp_max_job_used_mem:
                        tmp_max_job_used_mem = tmp_job_used_mem

                    tmp_line_output = sp.stdout.readline()
                    tmp_line_output = tmp_line_output.strip()
                    #返回的是bytes
                    tmp_line_output = str(tmp_line_output, encoding="utf8")

                    tmp_lines = tmp_line_output.split("\n")
                    for tmp_line in tmp_lines:
                        app.logger.info("job_id:%s %s" % (job_id, tmp_line) )

                app.logger.info("job_id:%s 日志end-------" % job_id)
                ##统计内存占用量
                tmp_job_used_mem = UtilHelper.getUsedMemory( tmp_run_job_pid )
                if tmp_job_used_mem > tmp_max_job_used_mem:
                    tmp_max_job_used_mem = tmp_job_used_mem

                app.logger.info("job_id:%s PID:%s, 使用内存(end) %s" % (job_id, tmp_run_job_pid, tmp_job_used_mem))
                app.logger.info("job_id:%s PID:%s, 最大使用内存 %s" % (job_id, tmp_run_job_pid, tmp_max_job_used_mem))
                app.logger.info("job_id:%s 更新消耗内存完毕" % (job_id))

                # 将标准输出关闭了
                sp.stdout.close()
                tmp_status = sp.wait()
                app.logger.info("job_id:%s status_code:%s,%s" % (t.id, str(tmp_status),tmp_command ))


                #和下面分开就是怕报警影响正常处理
                try:
                    #杀死常驻job也会发生 MySQL server has gone away,只要运行的时间太长就会出问题了
                    self.closePoolDB()
                    #相关报警判断
                    self.alertStatusJudge(t, tmp_status)
                    self.alertRunTimeJudge(t, tmp_job_run_start_time)
                except:
                    app.logger.info( self.getErrMsg() )


                # 更新状态和下一次运行时间
                try:
                    self.closePoolDB()
                    ##提前将文件释放下,因为当服务器状态非常繁忙的时候,进程比较缓慢,会导致状态已经更新但是pid文件没有删除
                    self.atexit_removepid(job_pid_file)
                    if int( t.job_type ) == CommonConstant.default_status_pos_3 :#一次性job
                        JobList.query.filter_by(id=job_id).update( dict( run_status = CommonConstant.default_status_false,status = CommonConstant.default_status_false) )
                        db.session.commit()
                    else:
                        if int( t.job_type ) == CommonConstant.default_status_pos_2:  # 常驻Job,他停止之后下一分钟直接运行
                            tmp_next_time = datetime.datetime.now() + datetime.timedelta(minutes=1)
                            tmp_next_time = tmp_next_time.replace(second=0)
                            tmp_next_time = int( time.mktime(tmp_next_time.timetuple() ) )
                        else:
                            tmp_next_time = t.next_run_time + int( math.ceil((time.time() - t.next_run_time) / (t.run_interval * 60)) * t.run_interval * 60)
                        JobList.query.filter_by(id=job_id).update( dict( run_status = CommonConstant.default_status_true ,next_run_time =  tmp_next_time ) )
                        db.session.commit()

                    ##更新对应日志的log,担心出错影响其他
                    JobService.updateRunLog(tmp_log_id, tmp_max_job_used_mem, (tmp_status == 0))
                except:
                    app.logger.info( self.getErrMsg() )
                # 完成
                self.closePoolDB()

                app.logger.info('job_id:%s 运行完成时间为:%s,子进程结束~~' % (job_id, DateHelper.getCurrentTime() ))
                return 0

            elif pid > 0:  # 父进程
                '''
                status是一个传出参数。
                waitpid的pid参数选择:
                < -1 回收指定进程组内的任意子进程
                = -1 回收任意子进程,等待所有的子进程终止
                = 0  回收和当前调用waitpid一个组的所有子进程
                > 0  回收指定ID的子进程
                '''
                app.logger.info("父进程 job_id:%s pid:%s" % (t.id, pid))
                #os.waitpid( pid , os.WNOHANG)
                os.waitpid(-1, os.WNOHANG)
                app.logger.info("job_id:%s 父进程结束~~" % t.id)
            else:
                app.logger.info("job_id:%s,不能建立调度器" % (t.id))

        app.logger.info("it's over~~")
        return True