def _run_consuming_function_with_confirm_and_retry(self, kw: dict, current_retry_times, function_result_status: FunctionResultStatus):
        if current_retry_times < self._max_retry_times:
            function_result_status.run_times += 1
            # noinspection PyBroadException
            t_start = time.time()
            try:
                function_run = self.consuming_function if self._function_timeout == 0 else self._concurrent_mode_dispatcher.timeout_deco(self._function_timeout)(self.consuming_function)
                if self._is_consuming_function_use_multi_params:  # 消费函数使用传统的多参数形式
                    function_result_status.result = function_run(**delete_keys_and_return_new_dict(kw['body'], ['publish_time', 'publish_time_format', 'extra']))
                else:
                    function_result_status.result = function_run(delete_keys_and_return_new_dict(kw['body'], ['publish_time', 'publish_time_format', 'extra']))  # 消费函数使用单个参数,参数自身是一个字典,由键值对表示各个参数。
                function_result_status.success = True
                self._confirm_consume(kw)
                if self._do_task_filtering:
                    self._redis_filter.add_a_value(kw['body'])  # 函数执行成功后,添加函数的参数排序后的键值对字符串到set中。
                self.logger.debug(f' 函数 {self.consuming_function.__name__}  '
                                  f'第{current_retry_times + 1}次 运行, 正确了,函数运行时间是 {round(time.time() - t_start, 4)} 秒,入参是 【 {kw["body"]} 】。  {ConsumersManager.get_concurrent_info()}')

            except Exception as e:
                if isinstance(e, (PyMongoError, ExceptionForRequeue)):  # mongo经常维护备份时候插入不了或挂了,或者自己主动抛出一个ExceptionForRequeue类型的错误会重新入队,不受指定重试次数逇约束。
                    self.logger.critical(f'函数 [{self.consuming_function.__name__}] 中发生错误 {type(e)}  {e}')
                    return self._requeue(kw)
                self.logger.error(f'函数 {self.consuming_function.__name__}  第{current_retry_times + 1}次发生错误,'
                                  f'函数运行时间是 {round(time.time() - t_start, 4)} 秒,\n  入参是 【 {kw["body"]} 】   \n 原因是 {type(e)} {e} ', exc_info=self._is_print_detail_exception)
                function_result_status.exception = f'{e.__class__.__name__}    {str(e)}'
                self._run_consuming_function_with_confirm_and_retry(kw, current_retry_times + 1, function_result_status)
        else:
            self.logger.critical(f'函数 {self.consuming_function.__name__} 达到最大重试次数 {self._max_retry_times} 后,仍然失败, 入参是 【 {kw["body"]} 】')
            self._confirm_consume(kw)  # 错得超过指定的次数了,就确认消费了。
        self._result_persistence_helper.save_function_result_to_mongo(function_result_status)
        if kw['body'].get('extra', {}).get('is_using_rpc_mode', False):
            RedisMixin().redis_db_frame.lpush(kw['body']['extra']['task_id'], json.dumps(function_result_status.get_status_dict(without_datetime_obj=True)))
            RedisMixin().redis_db_frame.expire(kw['body']['extra']['task_id'], 600)
    def _run_consuming_function_with_confirm_and_retry(self, kw: dict, current_retry_times,
                                                       function_result_status: FunctionResultStatus, ):
        function_only_params = _delete_keys_and_return_new_dict(kw['body'])
        if current_retry_times < self.__get_priority_conf(kw, 'max_retry_times'):
            function_result_status.run_times += 1
            # noinspection PyBroadException
            t_start = time.time()
            try:
                function_run = self.consuming_function if self._function_timeout == 0 else self._concurrent_mode_dispatcher.timeout_deco(
                    self.__get_priority_conf(kw, 'function_timeout'))(self.consuming_function)
                if self._is_consuming_function_use_multi_params:  # 消费函数使用传统的多参数形式
                    function_result_status.result = function_run(**function_only_params)
                else:
                    function_result_status.result = function_run(
                        function_only_params)  # 消费函数使用单个参数,参数自身是一个字典,由键值对表示各个参数。
                if asyncio.iscoroutine(function_result_status.result):
                    self.logger.critical(f'异步的协程消费函数必须使用 async 并发模式并发,请设置 '
                                         f'消费函数 {self.consuming_function.__name__} 的concurrent_mode 为4')
                    # noinspection PyProtectedMember,PyUnresolvedReferences
                    os._exit(4)
                function_result_status.success = True
                self._confirm_consume(kw)
                if self.__get_priority_conf(kw, 'do_task_filtering'):
                    self._redis_filter.add_a_value(function_only_params)  # 函数执行成功后,添加函数的参数排序后的键值对字符串到set中。
                if self._log_level <= logging.DEBUG:
                    result_str_to_be_print = str(function_result_status.result)[:100] if len(str(function_result_status.result)) < 100 else str(function_result_status.result)[:100] + '  。。。。。  '
                    self.logger.debug(f' 函数 {self.consuming_function.__name__}  '
                                      f'第{current_retry_times + 1}次 运行, 正确了,函数运行时间是 {round(time.time() - t_start, 4)} 秒,入参是 【 {function_only_params} 】。 '
                                      f' 结果是  {result_str_to_be_print} ,  {self._get_concurrent_info()}  ')
            except Exception as e:
                if isinstance(e, (PyMongoError,
                                  ExceptionForRequeue)):  # mongo经常维护备份时候插入不了或挂了,或者自己主动抛出一个ExceptionForRequeue类型的错误会重新入队,不受指定重试次数逇约束。
                    self.logger.critical(f'函数 [{self.consuming_function.__name__}] 中发生错误 {type(e)}  {e},消息重新入队')
                    time.sleep(1)  # 防止快速无限出错入队出队,导致cpu和中间件忙
                    return self._requeue(kw)
                self.logger.error(f'函数 {self.consuming_function.__name__}  第{current_retry_times + 1}次发生错误,'
                                  f'函数运行时间是 {round(time.time() - t_start, 4)} 秒,\n  入参是 【 {function_only_params} 】   \n 原因是 {type(e)} {e} ',
                                  exc_info=self.__get_priority_conf(kw, 'is_print_detail_exception'))
                function_result_status.exception = f'{e.__class__.__name__}    {str(e)}'
                return self._run_consuming_function_with_confirm_and_retry(kw, current_retry_times + 1, function_result_status, )
        else:
            self.logger.critical(
                f'函数 {self.consuming_function.__name__} 达到最大重试次数 {self.__get_priority_conf(kw, "max_retry_times")} 后,仍然失败, 入参是 【 {function_only_params} 】')
            self._confirm_consume(kw)  # 错得超过指定的次数了,就确认消费了。
            if self.__get_priority_conf(kw, 'do_task_filtering'):
                self._redis_filter.add_a_value(function_only_params)  # 函数执行成功后,添加函数的参数排序后的键值对字符串到set中。

        if self.__get_priority_conf(kw, 'is_using_rpc_mode'):
            # print(function_result_status.get_status_dict(without_datetime_obj=
            with RedisMixin().redis_db_frame.pipeline() as p:
                # RedisMixin().redis_db_frame.lpush(kw['body']['extra']['task_id'], json.dumps(function_result_status.get_status_dict(without_datetime_obj=True)))
                # RedisMixin().redis_db_frame.expire(kw['body']['extra']['task_id'], 600)
                p.lpush(kw['body']['extra']['task_id'],
                        json.dumps(function_result_status.get_status_dict(without_datetime_obj=True)))
                p.expire(kw['body']['extra']['task_id'], 600)
                p.execute()
        self._result_persistence_helper.save_function_result_to_mongo(function_result_status)
Exemplo n.º 3
0
 def push_result():
     with RedisMixin().redis_db_frame.pipeline() as p:
         p.lpush(
             kw['body']['extra']['task_id'],
             json.dumps(
                 function_result_status.get_status_dict(
                     without_datetime_obj=True)))
         p.expire(kw['body']['extra']['task_id'], 600)
         p.execute()
    def test_redis_bulk_write(self):

        with decorators.TimerContextManager():
            # r = redis.Redis(password='******')
            r = RedisMixin().redis_db0
            redis_helper = RedisBulkWriteHelper(r, 200)
            # redis_helper = RedisBulkWriteHelper(r, 100)  # 放在外面可以
            for i in range(1003):
                # time.sleep(0.2)
                # 也可以在这里无限实例化
                redis_helper.add_task(RedisOperation('sadd', 'key1', str(i)))
Exemplo n.º 5
0
def main():
    try:
        # logger.info('测试pysnoop')
        for i in range(5):
            print(i)
        j = 333
        resp = requests.get('https://www.baidu.com')  # 测试深层次跳转下的代码轨迹自动跟踪效果。
        logger.debug(resp.text)
        print(RedisMixin().redis_db_frame.set('key_test', '1'))
        bar()

    except:
        pass
# -*- coding: utf-8 -*-
# @Author  : ydf
# @Time    : 2019/8/8 0008 13:53
from function_scheduling_distributed_framework.utils import RedisMixin

from test_frame.my_patch_frame_config import do_patch_frame_config

do_patch_frame_config()

print(RedisMixin().redis_db_frame.keys())
import time

from function_scheduling_distributed_framework.utils import RedisMixin, LogManager
from test_frame.test_frame_using_thread.test_celery.test_celery_app import add

LogManager().get_logger_and_add_handlers()
RedisMixin().redis_db_frame.delete('queue_add')

t1 = time.time()
for i in range(100):
    # print('生产者添加任务')
    print(i)
    result = add.delay(i, i * 2)
    print(type(result))
    # print(result.get())

print(time.time() - t1)
print('任务添加完成')
"""
import celery
celery.result.AsyncResult
"""
Exemplo n.º 8
0
# -*- coding: utf-8 -*-
# @Author  : ydf
# @Time    : 2019/8/8 0008 13:53
from function_scheduling_distributed_framework.utils import RedisMixin

from test_frame.my_patch_frame_config import do_patch_frame_config

do_patch_frame_config()

print(RedisMixin().redis_db7.keys())
Exemplo n.º 9
0
 async def _async_run_consuming_function_with_confirm_and_retry(
     self,
     kw: dict,
     current_retry_times,
     function_result_status: FunctionResultStatus,
 ):
     function_only_params = _delete_keys_and_return_new_dict(kw['body'])
     if current_retry_times < self.__get_priority_conf(
             kw, 'max_retry_times'):
         function_result_status.run_times += 1
         # noinspection PyBroadException
         t_start = time.time()
         try:
             corotinue_obj = self.consuming_function(**function_only_params)
             if not asyncio.iscoroutine(corotinue_obj):
                 self.logger.critical(
                     f'当前设置的并发模式为 async 并发模式,但消费函数不是异步协程函数,'
                     f'请不要把消费函数 {self.consuming_function.__name__} 的 concurrent_mode 设置为 4'
                 )
                 # noinspection PyProtectedMember
                 os._exit(444)
             if self._function_timeout == 0:
                 rs = await corotinue_obj
                 # rs = await asyncio.wait_for(corotinue_obj, timeout=4)
             else:
                 rs = await asyncio.wait_for(corotinue_obj,
                                             timeout=self._function_timeout)
             function_result_status.result = rs
             function_result_status.success = True
             self._confirm_consume(kw)
             if self.__get_priority_conf(kw, 'do_task_filtering'):
                 self._redis_filter.add_a_value(
                     function_only_params
                 )  # 函数执行成功后,添加函数的参数排序后的键值对字符串到set中。
             self.logger.debug(
                 f' 函数 {self.consuming_function.__name__}  '
                 f'第{current_retry_times + 1}次 运行, 正确了,函数运行时间是 {round(time.time() - t_start, 4)} 秒,入参是 【 {function_only_params} 】。  {corotinue_obj}'
             )
         except Exception as e:
             if isinstance(
                     e, (PyMongoError, ExceptionForRequeue)
             ):  # mongo经常维护备份时候插入不了或挂了,或者自己主动抛出一个ExceptionForRequeue类型的错误会重新入队,不受指定重试次数逇约束。
                 self.logger.critical(
                     f'函数 [{self.consuming_function.__name__}] 中发生错误 {type(e)}  {e},消息重新入队'
                 )
                 time.sleep(1)  # 防止快速无限出错入队出队,导致cpu和中间件忙
                 return self._requeue(kw)
             self.logger.error(
                 f'函数 {self.consuming_function.__name__}  第{current_retry_times + 1}次发生错误,'
                 f'函数运行时间是 {round(time.time() - t_start, 4)} 秒,\n  入参是 【 {function_only_params} 】   \n 原因是 {type(e)} {e} ',
                 exc_info=self.__get_priority_conf(
                     kw, 'is_print_detail_exception'))
             function_result_status.exception = f'{e.__class__.__name__}    {str(e)}'
             return await self._async_run_consuming_function_with_confirm_and_retry(
                 kw,
                 current_retry_times + 1,
                 function_result_status,
             )
     else:
         self.logger.critical(
             f'函数 {self.consuming_function.__name__} 达到最大重试次数 {self.__get_priority_conf(kw, "max_retry_times")} 后,仍然失败, 入参是 【 {function_only_params} 】'
         )
         self._confirm_consume(kw)  # 错得超过指定的次数了,就确认消费了。
     if self.__get_priority_conf(kw, 'is_using_rpc_mode'):
         # print(function_result_status.get_status_dict(without_datetime_obj=True))
         with RedisMixin().redis_db_frame.pipeline() as p:
             # RedisMixin().redis_db_frame.lpush(kw['body']['extra']['task_id'], json.dumps(function_result_status.get_status_dict(without_datetime_obj=True)))
             # RedisMixin().redis_db_frame.expire(kw['body']['extra']['task_id'], 600)
             p.lpush(
                 kw['body']['extra']['task_id'],
                 json.dumps(
                     function_result_status.get_status_dict(
                         without_datetime_obj=True)))
             p.expire(kw['body']['extra']['task_id'], 600)
             p.execute()
     self._result_persistence_helper.save_function_result_to_mongo(
         function_result_status)
Exemplo n.º 10
0
from function_scheduling_distributed_framework.utils import RedisMixin
from function_scheduling_distributed_framework.utils.bulk_operation import RedisOperation, RedisBulkWriteHelper
from test_frame.test_with_multi_process.test_consume import fff
import json
fff.clear()

helper = RedisBulkWriteHelper(RedisMixin().redis_db_frame, 10000)
for i in range(0, 100000):
    helper.add_task(
        RedisOperation('rpush', fff.consumer.queue_name, json.dumps({"x": i})))

if __name__ == '__main__':
    pass