Exemple #1
0
async def save_to_db(data):
    # async def save_to_db(task_unique_id, service_path, method, params_json):
    """
    保存任务到DB
    :param data: list
        data[][task_unique_id]
        data[][service_path]
        data[][method]
        data[][params_json]
        data[][key1]
        data[][key2]
        data[][key3]
    :return:
    """
    if isinstance(data, dict):
        data = [data]

    model = AsyncModelBase()
    sql_list = []
    for item in data:
        key = 'task_unique_id, service_path, method, key1, key2, key3, params, create_time'
        val = '%s, %s, %s, %s, %s, %s, %s, %s'
        duplicate = ['params = %s', 'create_time = %s']
        value = (item['task_unique_id'], item['service_path'], item['method'],
                 item.get('key1', ''), item.get('key2', ''),
                 item.get('key3',
                          ''), item['params_json'], DateUtils.time_now(),
                 item['params_json'], DateUtils.time_now())
        sql_list.append({
            model.sql_constants.SQL_TYPE: model.sql_constants.INSERT,
            model.sql_constants.TABLE_NAME: 'tbl_cfg_task',
            model.sql_constants.DICT_DATA: {
                model.sql_constants.KEY: key,
                model.sql_constants.VAL: val,
                model.sql_constants.DUPLICATE_KEY_UPDATE: duplicate
            },
            model.sql_constants.VALUE_TUPLE: value
        })
    #
    result = await model.do_sqls(sql_list)
    if result is None:
        for item in data:
            logger.info(
                'Task Add [%s] to DB failed, path [%s], method [%s], params: %s',
                item['task_unique_id'], item['service_path'], item['method'],
                item['params_json'])
    else:
        for item in data:
            logger.info(
                'Task Add [%s] to DB success, path [%s], method [%s], params: %s',
                item['task_unique_id'], item['service_path'], item['method'],
                item['params_json'])

    return result
Exemple #2
0
def run():
    """
    开始执行
    :return:
    """
    if warning_to_email is None:
        print 'warning mail error, check setting.conf [monitor]'
        return

    last_len = 0
    loop_num = 0
    conn_failed_num = 0
    while True:
        try:
            conn = redis.get_conn()
            list_len = conn.llen(list_cache_key)
        except Exception, e:
            conn_failed_num += 1
            if conn_failed_num >= 6:
                response = send_warning_info(
                    'monitor',
                    json.dumps({
                        'exception message': e.message,
                        'time': DateUtils.time_now()
                    }))
                if response is True:
                    conn_failed_num = 0

            list_len = 0
            logger.error('monitor redis get_conn failed, e: %s', e.message)

        if list_len > 0 and list_len >= last_len:
            loop_num += 1
            # 队列长度保持不变或增加
            if loop_num >= 6:
                response = send_warning_info(
                    'task',
                    json.dumps({
                        'list_len': list_len,
                        'time': DateUtils.time_now()
                    }))
                if response is True:
                    loop_num = 0
            else:
                logger.info('task len add')
        else:
            last_len = list_len
            loop_num = 0
            # print 'task status is normal'

        time.sleep(10)
Exemple #3
0
async def update_to_db(task_unique_id, status):
    """
    更新任务状态
    :param task_unique_id:
    :param status:
    :return:
    """
    model = AsyncModelBase()
    fields = [
        'status = %s',
        'execute_time = %s'
    ]
    condition = 'task_unique_id = %s'
    value = (status, DateUtils.time_now(), task_unique_id)
    result = await model.update('tbl_cfg_task', {
        model.sql_constants.FIELDS: fields,
        model.sql_constants.CONDITION: condition
    }, value)
    if result is None:
        logger.info('Task Update [%s] failed, status: %s',
                    task_unique_id, status)
    else:
        logger.info('Task Update [%s] to DB success, status: %s',
                    task_unique_id, status)

    return result
Exemple #4
0
async def save_to_db(task_unique_id, service_path, method, params_json):
    """
    保存任务到DB
    :param task_unique_id:
    :param service_path:
    :param method:
    :param params_json:
    :return:
    """
    model = AsyncModelBase()
    key = 'task_unique_id, service_path, method, params, create_time'
    val = '%s, %s, %s, %s, %s'
    duplicate = [
        'create_time = %s'
    ]
    value = (task_unique_id, service_path, method, params_json, DateUtils.time_now(), DateUtils.time_now())
    result = await model.insert('tbl_cfg_task', {
        model.sql_constants.KEY: key,
        model.sql_constants.VAL: val,
        model.sql_constants.DUPLICATE_KEY_UPDATE: duplicate
    }, value)
    if not result:
        logger.info('Task Add [%s] to DB failed, path [%s], method [%s], params: %s',
                    task_unique_id, service_path, method, params_json)
    else:
        logger.info('Task Add [%s] to DB success, path [%s], method [%s], params: %s',
                    task_unique_id, service_path, method, params_json)

    return result
Exemple #5
0
async def add_job(job_list):
    """
    添加任务至任务队列
    :param job_list: 
    :return: 
    """
    logger.info('添加任务至队列')
    current_time = DateUtils.time_now()
    if job_list:
        for job in job_list:
            if isinstance(job['params'], str):
                try:
                    job['params'] = json.loads(job['params'])
                except Exception as e:
                    logger.exception('JSON ERROR', e)
            del job['create_time']
            try:
                cache_key = ServiceBase.schedule.JOB_KEY + job[
                    'start_time'].replace(' ', '').replace('-', '').replace(
                        ':', '')
                if current_time >= job['start_time']:
                    # 当前时间大于任务执行时间, 则立刻执行任务, 然后删除当前的key
                    await do_job(job, is_normal=False)
                    await redis.delete(cache_key)
                else:
                    # 将任务添加进对应的队列组, 采用set所以当添加重复元素时,重复元素会被忽略
                    await redis.sadd(cache_key,
                                     json.dumps(job, cls=CJsonEncoder))
                    length = await redis.scard(cache_key)
                    await redis.hset(ServiceBase.schedule.SCHEDULE_KEY,
                                     cache_key, length)
            except Exception as e:
                logger.exception('ADD JOB ERROR', e)
                await task.save_task_error(job, e)
Exemple #6
0
def run():
    """
    开始执行
    :return:
    """
    if warning_to_email is None:
        print 'warning mail error, check setting.conf [monitor]'
        return

    last_len = 0
    loop_num = 0
    conn_failed_num = 0
    while True:
        try:
            conn = redis.get_conn()
            list_len = conn.llen(list_cache_key)
        except Exception, e:
            conn_failed_num += 1
            if conn_failed_num >= 6:
                response = send_warning_info('monitor', json.dumps({
                    'exception message': e.message,
                    'time': DateUtils.time_now()
                }))
                if response is True:
                    conn_failed_num = 0

            list_len = 0
            logger.error('monitor redis get_conn failed, e: %s', e.message)

        if list_len > 0 and list_len >= last_len:
            loop_num += 1
            # 队列长度保持不变或增加
            if loop_num >= 6:
                response = send_warning_info('task', json.dumps({
                    'list_len': list_len,
                    'time': DateUtils.time_now()
                }))
                if response is True:
                    loop_num = 0
            else:
                logger.info('task len add')
        else:
            last_len = list_len
            loop_num = 0
            # print 'task status is normal'

        time.sleep(10)
Exemple #7
0
async def cal_next_start_time(job, is_normal=True):
    """
    计算下一次定时任务发生的时间
    :param job: 
    :param is_normal
    :return: 
    """
    # 无限循环执行, 必定带正则表达式,否则直接报错
    # 解析正则表达式,  计算出下一次需要执行的时间点
    if not is_normal:
        current_time = int(DateUtils.timestamps_now())
    else:
        current_time = DateUtils.str_to_time(job['start_time'])
    left_time = cron_utils.analyze(current_time + 1, job['cron'])
    start_time = DateUtils.format_time(current_time + left_time)
    # 计算距离start_time最近的RUN_TIME秒
    current_date = start_time[:16] + ':00'
    current_count = 1
    while current_date < start_time:
        # 避免死循环
        if current_count >= 1000:
            break
        current_count += 1
        current_date = DateUtils.add_second(current_date, seconds=run_time)
    start_time = current_date
    job['start_time'] = start_time
    cache_key = ServiceBase.schedule.JOB_KEY + job['start_time'].replace(
        ' ', '').replace('-', '').replace(':', '')
    now_date = DateUtils.time_now()
    # 如果下一次的执行时间小于当前时间,则跳至下一个执行的时间节点
    if job['start_time'] < now_date:
        logger.info('任务下一次执行时间小于当前时间')

        current_date = now_date[:16] + ':00'

        while current_date < now_date:

            current_date = DateUtils.add_second(current_date,
                                                seconds=2 * run_time)

        job['start_time'] = current_date

        cache_key = ServiceBase.schedule.JOB_KEY + job['start_time'].replace(
            ' ', '').replace('-', '').replace(':', '')

    model = importlib.import_module('task.schedule.model')
    model = model.Model()
    await model.update_job(job)
    await redis.sadd(cache_key, json.dumps(job, cls=CJsonEncoder))
    length = await redis.scard(cache_key)
    await redis.hset(ServiceBase.schedule.SCHEDULE_KEY, cache_key, length)
Exemple #8
0
    async def check_queue(cls):
        """
        检查任务队列
            每10秒检查一次,如果任务队列的长度比前一次检查的长度长,且前一次检查长度不为0,那么发送报警邮件
        :return:
        """
        try:
            task_queue_len = await redis.llen(cls.task_queue)
        except Exception:
            raise

        if task_queue_len > 0:
            if (cls.task_queue_length > 0) and (task_queue_len >=
                                                cls.task_queue_length):
                if cls.task_queue_report_num >= 3:
                    report_html = """
                        <tr>
                            <td style="font-size:30px;">任务队列未能即时消费,当前任务数量:{},前一次检查任务数量:{},请注意.</td>
                        </tr>
                        <tr>
                            <td>可能原因:</td>
                        </tr>
                        <tr>
                            <td>1.任务消费能力不足。 如果是,程序将在3次检查之后自动增加消费者进行处理。之后如果仍然不能停止报警,见2</td>
                        </tr>
                        <tr>
                            <td>2.任务进程死亡。检查任务死亡原因,并重启服务</td>
                        </tr>
                        <tr>
                            <td>Task Monitor report at: {}</td>
                        </tr>
                    """.format(task_queue_len, cls.task_queue_length,
                               DateUtils.time_now())
                    await Report.report("", cls.mail_template(report_html))

                    if cls.task_queue_report_num >= 6:
                        await task.add("", "", None, False, {
                            'loop_num': 1,
                        })
                else:
                    cls.task_queue_report_num = cls.task_queue_report_num + 1
            else:
                cls.task_queue_report_num = 0

            cls.task_queue_length = task_queue_len