Exemple #1
0
    def verify(access_token):
        """
        JWT验签
        :param access_token:
        :return:
        """
        if access_token:
            try:
                header, p, signature = access_token.split('.')
                p = p + '=' * (-len(p) % 4)
                p = base64.decodebytes(p.encode('utf-8')).decode('utf-8')
                p_dict = json.loads(p)
            except Exception as e:
                logger.exception(e)
                return False

            create_time = p_dict['iat']
            expires_in = p_dict['exp']
            time_now = int(dateUtils.timestamps_now())
            if create_time + expires_in < time_now:
                return False

            encoded = JWT.encode(p_dict)
            if encoded != access_token:
                return False

            return p_dict

        return False
Exemple #2
0
 async def loop(self, task_id):
     """
     1. 获取一个队列数据
     2. 解析数据
     3. 调用目标方法
     :return:
     """
     logger.info('task[%s] started.', task_id)
     await task.register_coroutine(self.server_name, task_id, int(DateUtils.timestamps_now()))
     await self.process(task_id)
Exemple #3
0
async def async_add_job(service_path='',
                        method='',
                        params={},
                        start_time='',
                        cron='',
                        job_id='',
                        group_name='default',
                        repeat_count=0):
    """
    添加定时任务
    :param service_path:   需要执行的service路径
    :param method:         需要执行的方法
    :param params:         需要传入的参数
    :param start_time: (2018-06-20 16:30:00)
    :param cron: 这里采用五位表达式,从左到右依次表示秒、分、时、天、月  
                 可以使用具体数字或者区间
                 20  表示[20]
                 1-3 表示[1, 2, 3]  
                 1,4,6,7 表示[1, 4, 6, 7]
                 * 表示所有, 
                 1/5 从第1个开始,每五个执行一次
    :param job_id:       任务编号,每个任务的编号都要求唯一
    :param group_name: 
    :param repeat_count: 如果要求无限次执行, 则该值需要传入-1, 同一个任务有限次多次执行的情况暂不考虑,
                         如果业务上有需要, 希望你用多个任务来处理这件事
    :return: 
    """
    if cron:
        current_time = int(DateUtils.timestamps_now())
        left_time = cron_utils.analyze(current_time + 1, cron)
        start_time = DateUtils.format_time(current_time + left_time)
    # 计算距离start_time最近的RUN_TIME秒
    current_date = start_time[:16] + ':00'
    current_count = 1
    while current_date < start_time:
        # 避免死循环
        if current_count >= 1000:
            break
        current_count += 1
        current_date = DateUtils.add_second(current_date, seconds=RUN_TIME)
    start_time = current_date

    job_params = {
        'job_id': job_id,
        'group_name': group_name,
        'start_time': start_time,
        'limit_time': 0,
        'repeat_count': repeat_count,
        'cron': cron,
        'path': service_path,
        'method': method,
        'params': params
    }
    result = await save_job(job_params)
    return result
Exemple #4
0
async def cal_next_start_time(job, is_normal=True):
    """
    计算下一次定时任务发生的时间
    :param job: 
    :param is_normal
    :return: 
    """
    # 无限循环执行, 必定带正则表达式,否则直接报错
    # 解析正则表达式,  计算出下一次需要执行的时间点
    if not is_normal:
        current_time = int(DateUtils.timestamps_now())
    else:
        current_time = DateUtils.str_to_time(job['start_time'])
    left_time = cron_utils.analyze(current_time + 1, job['cron'])
    start_time = DateUtils.format_time(current_time + left_time)
    # 计算距离start_time最近的RUN_TIME秒
    current_date = start_time[:16] + ':00'
    current_count = 1
    while current_date < start_time:
        # 避免死循环
        if current_count >= 1000:
            break
        current_count += 1
        current_date = DateUtils.add_second(current_date, seconds=run_time)
    start_time = current_date
    job['start_time'] = start_time
    cache_key = ServiceBase.schedule.JOB_KEY + job['start_time'].replace(
        ' ', '').replace('-', '').replace(':', '')
    now_date = DateUtils.time_now()
    # 如果下一次的执行时间小于当前时间,则跳至下一个执行的时间节点
    if job['start_time'] < now_date:
        logger.info('任务下一次执行时间小于当前时间')

        current_date = now_date[:16] + ':00'

        while current_date < now_date:

            current_date = DateUtils.add_second(current_date,
                                                seconds=2 * run_time)

        job['start_time'] = current_date

        cache_key = ServiceBase.schedule.JOB_KEY + job['start_time'].replace(
            ' ', '').replace('-', '').replace(':', '')

    model = importlib.import_module('task.schedule.model')
    model = model.Model()
    await model.update_job(job)
    await redis.sadd(cache_key, json.dumps(job, cls=CJsonEncoder))
    length = await redis.scard(cache_key)
    await redis.hset(ServiceBase.schedule.SCHEDULE_KEY, cache_key, length)
Exemple #5
0
async def add(path='', method='', arguments=None, is_priority=False, sub_task=None, task_unique_id=None):
    """
    添加任务
    :param path: 调用包文件路径
    :param method:  调用方法
    :param arguments: 请求参数
    :param is_priority: 是否优先处理(True or False)
    :param sub_task: 是否有子任务
            sub_task['queue_key'] 目标队列key
            sub_task['task_num'] 任务数
    :param task_unique_id
    :return:
    """
    #
    arguments_json = json.dumps(arguments, cls=CJsonEncoder)
    if not task_unique_id:
        task_unique_id = str(int(DateUtils.timestamps_now())) + str(random.randrange(10000, 100000))

    logger.info('Task Add [%s], path [%s], method [%s], params: %s',
                task_unique_id, path, method, arguments_json)
    #
    await save_to_db(task_unique_id, path, method, arguments_json)
    if (path and method and arguments) or sub_task:
        params = {
            'task_unique_id': task_unique_id,
            'path': path,
            'method': method,
            'arguments': arguments,
            'sub_task': sub_task
        }

        try:
            params = json.dumps(params, cls=CJsonEncoder)
            if is_priority:
                result = await redis.rpush(task_queue, params)
            else:
                result = await redis.lpush(task_queue, params)
            #
            if result:
                logger.info('Task Add [%s] to QUEUE success, path [%s], method [%s], params: %s',
                            task_unique_id, path, method, arguments_json)
            else:
                logger.info('Task Add [%s] to QUEUE failed, path [%s], method [%s], params: %s',
                            task_unique_id, path, method, arguments_json)
        except Exception as e:
            await Report.report('添加任务异常', e)
Exemple #6
0
    def run(self):
        # 获取参数
        loop_num = 0
        config_file = ''
        arguments = sys.argv
        for k, v in enumerate(arguments):
            if v == '-t':
                loop_num = arguments[k + 1]

            if v == '-c':
                config_file = arguments[k + 1]

            if v == '-s':
                self.server_name = arguments[k + 1]

        if config_file:
            self.parse_config(config_file)

        if loop_num:
            task.loop_num = loop_num

        #
        if not self.server_name:
            self.server_name = 'S' + str(random.randrange(10000, 100000))

        event_loop = asyncio.get_event_loop()
        # register server
        asyncio.ensure_future(
            task.register_server(server_name=self.server_name,
                                 create_time=int(DateUtils.timestamps_now())))

        # main loop
        for i in range(task.loop_num):
            asyncio.ensure_future(self.loop(str(i)))

        # monitor loop
        # asyncio.ensure_future(Monitor.start())

        event_loop.run_forever()
Exemple #7
0
    async def process(self, task_id, task_queue_key=None, is_break=False):
        """
        处理任务
        :param task_id: 任务ID
        :param task_queue_key: 指定队列的缓存KEY
        :param is_break: 是否需要停止循环
        :return:
        """
        while True:
            #
            status = await task.get_server_status(self.server_name)
            if not status or status == '0':
                logger.info('Server [%s] shutdown, task [%s] shutdown', self.server_name, task_id)
                self.is_running = False
                await asyncio.sleep(10)
                continue

            if not self.is_running:
                self.is_running = True
                logger.info('Server [%s] start, task [%s] start', self.server_name, task_id)
            #
            is_process_success = 'success'
            task_unique_id = ''
            item = await task.get_one(task_queue_key)
            if item is False:
                logger.info('Server [%s], task [%s], get seed failed.', self.server_name, task_id)
                await Report.report('获取任务异常', '')

            if item:
                try:
                    target = json.loads(item)
                    task_unique_id = target.get('task_unique_id', '')
                    path = target['path']
                    method = target['method']
                    params = target['arguments']
                    sub_task = target['sub_task']
                    #
                    logger.info('Task [%s] Pop [%s] from queue, path [%s], method [%s], params: %s',
                                task_id, task_unique_id, path, method, json.dumps(params))
                    #
                    if sub_task:
                        self.create_sub_task(sub_task)
                    else:
                        result = await ServiceBase().cs(path, method, params)
                        logger.info('task[%s], path: %s, method: %s result: %s', task_id, path, method, result)
                        if not result or 'code' not in result or (result['code'] != 0 and result['code'] != 1004):
                            # failed
                            raise ValueError(result)
                except Exception as e:
                    logger.exception(e)
                    is_process_success = 'failed'
                    await task.save_task_error(item, e)
                #
                if task_unique_id:
                    await task.update_to_db(task_unique_id, is_process_success)
            else:
                # logger.info('task[%s] wait seed.', task_id)
                if is_break:
                    logger.info('task[%s] break.', task_id)
                    break

            #
            await task.refresh_coroutine(self.server_name, task_id, int(DateUtils.timestamps_now()))
            await asyncio.sleep(1)
Exemple #8
0
async def add(path='',
              method='',
              arguments=None,
              is_priority=False,
              sub_task=None,
              task_unique_id=None,
              batch_data=None):
    """
    添加任务
    :param path: 调用包文件路径
    :param method:  调用方法
    :param arguments: 请求参数
    :param is_priority: 是否优先处理(True or False)
    :param sub_task: 是否有子任务
            sub_task['queue_key'] 目标队列key
            sub_task['task_num'] 任务数
    :param task_unique_id
    :param batch_data: list
        batch_data[][service_path]
        batch_data[][method]
        batch_data[][arguments]
        batch_data[][key1]
        batch_data[][key2]
        batch_data[][key3]
    :return:
    """
    #
    arguments_json = json.dumps(arguments, cls=CJsonEncoder)
    if not task_unique_id:
        task_unique_id = str(int(DateUtils.timestamps_now())) + str(
            random.randrange(10000, 100000))

    logger.info('Task Add [%s], path [%s], method [%s], params: %s',
                task_unique_id, path, method, arguments_json)
    #
    if batch_data:
        for item in batch_data:
            if not item.get('task_unique_id'):
                item['task_unique_id'] = str(int(
                    DateUtils.timestamps_now())) + str(
                        random.randrange(10000, 100000))
            #
            item['params_json'] = json.dumps(item['arguments'],
                                             cls=CJsonEncoder)
            # del item['arguments']
    else:
        batch_data = [{
            'task_unique_id': task_unique_id,
            'service_path': path,
            'method': method,
            'params_json': arguments_json
        }]
    #
    await save_to_db(batch_data)
    for item in batch_data:
        params = {
            'task_unique_id': item['task_unique_id'],
            'path': item['service_path'],
            'method': item['method'],
            'key1': item.get('key1', ''),
            'key2': item.get('key2', ''),
            'key3': item.get('key3', ''),
            'arguments': item['arguments'],
            'sub_task': sub_task
        }

        try:
            params = json.dumps(params, cls=CJsonEncoder)
            if is_priority:
                result = await redis.rpush(task_queue, params)
            else:
                result = await redis.lpush(task_queue, params)
            #
            if result:
                logger.info(
                    'Task Add [%s] to QUEUE success, path [%s], method [%s], '
                    'key1 [%s], key2 [%s], key3 [%s], params: %s',
                    item['task_unique_id'], item['service_path'],
                    item['method'], item.get('key1', ''), item.get('key2', ''),
                    item.get('key3', ''), item['params_json'])
            else:
                logger.info(
                    'Task Add [%s] to QUEUE failed, path [%s], method [%s], ',
                    'key1 [%s], key2 [%s], key3 [%s], params: %s',
                    item.get('key1', ''), item.get('key2', ''),
                    item.get('key3', ''), item['params_json'])
        except Exception as e:
            await Report.report('添加任务异常', e)