예제 #1
0
 def custom_init(self):
     self._kombu_broker_url_prefix = frame_config.KOMBU_URL.split(":")[0]
     logger_name = f'{self._logger_prefix}{self.__class__.__name__}--{self._kombu_broker_url_prefix}--{self._queue_name}'
     self.logger = LogManager(logger_name).get_logger_and_add_handlers(self._log_level_int,
                                                                       log_filename=f'{logger_name}.log' if self._is_add_file_handler else None,
                                                                       formatter_template=frame_config.NB_LOG_FORMATER_INDEX_FOR_CONSUMER_AND_PUBLISHER,
                                                                       )  #
예제 #2
0
def log(text):
    now = datetime.datetime.now().strftime('%Y%m%d-%H%M')
    logger = LogManager('INFO').get_logger_and_add_handlers(
        is_add_stream_handler=True,
        do_not_use_color_handler=True,
        log_filename=LOG_PATH + now + '.log')
    logger.info(text)
 def __init__(self, queue_name, log_level_int=10, logger_prefix='', is_add_file_handler=True,
              clear_queue_within_init=False, is_add_publish_time=True, consuming_function: callable = None):
     """
     :param queue_name:
     :param log_level_int:
     :param logger_prefix:
     :param is_add_file_handler:
     :param clear_queue_within_init:
     :param is_add_publish_time:是否添加发布时间,以后废弃,都添加。
     :param consuming_function:消费函数,为了做发布时候的函数入参校验用的,如果不传则不做发布任务的校验,
            例如add 函数接收x,y入参,你推送{"x":1,"z":3}就是不正确的,函数不接受z参数。
     """
     self._queue_name = queue_name
     if logger_prefix != '':
         logger_prefix += '--'
     logger_name = f'{logger_prefix}{self.__class__.__name__}--{queue_name}'
     self.logger = LogManager(logger_name).get_logger_and_add_handlers(log_level_int,
                                                                       log_filename=f'{logger_name}.log' if is_add_file_handler else None)  #
     self.publish_params_checker = PublishParamsChecker(consuming_function) if consuming_function else None
     # self.rabbit_client = RabbitMqFactory(is_use_rabbitpy=is_use_rabbitpy).get_rabbit_cleint()
     # self.channel = self.rabbit_client.creat_a_channel()
     # self.queue = self.channel.queue_declare(queue=queue_name, durable=True)
     self._lock_for_count = Lock()
     self._current_time = None
     self.count_per_minute = None
     self._init_count()
     self.custom_init()
     self.logger.info(f'{self.__class__} 被实例化了')
     self.publish_msg_num_total = 0
     self._is_add_publish_time = is_add_publish_time
     self.__init_time = time.time()
     atexit.register(self.__at_exit)
     if clear_queue_within_init:
         self.clear()
예제 #4
0
class ExceptionContextManager:
    """
    用上下文管理器捕获异常,可对代码片段进行错误捕捉,比装饰器更细腻
    """
    def __init__(
        self,
        logger_name='ExceptionContextManager',
        verbose=100,
        donot_raise__exception=True,
    ):
        """
        :param verbose: 打印错误的深度,对应traceback对象的limit,为正整数
        :param donot_raise__exception:是否不重新抛出错误,为Fasle则抛出,为True则不抛出
        """
        self.logger = LogManager(logger_name).get_logger_and_add_handlers()
        self._verbose = verbose
        self._donot_raise__exception = donot_raise__exception

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        # print(exc_val)
        # print(traceback.format_exc())
        exc_str = str(exc_type) + '  :  ' + str(exc_val)
        exc_str_color = '\033[0;30;45m%s\033[0m' % exc_str
        if self._donot_raise__exception:
            if exc_tb is not None:
                self.logger.error(
                    '\n'.join(traceback.format_tb(exc_tb)[:self._verbose]) +
                    exc_str_color)
        return self._donot_raise__exception  # __exit__方法必须retuen True才会不重新抛出错误
예제 #5
0
    def init_app(self, app: Flask):
        flask_record_log_file_name_from_config = app.config.get(
            'FLASK_RECORD_LOG_FILE_NAME', None)
        flask_record_log_file_name_default = Path(
            sys.path[1]).as_posix().split('/')[-1] + '_flask_record.log'
        if flask_record_log_file_name_from_config:
            app.flask_record_logger = LogManager(
                flask_record_log_file_name_from_config.split('.')
                [0]).get_logger_and_add_handlers(
                    log_filename=flask_record_log_file_name_from_config)
            self.logger.info(
                f'flask的正常请求记录将记录在  /pythonlogs/{flask_record_log_file_name_from_config} 文件中 '
            )
        else:
            app.flask_record_logger = LogManager(
                flask_record_log_file_name_default.split('.')
                [0]).get_logger_and_add_handlers(
                    log_filename=flask_record_log_file_name_default)
            self.logger.info(
                f'flask的正常请求记录将记录在  /pythonlogs/{flask_record_log_file_name_default} 文件中 '
            )
            self.logger.warning(
                f'也可以手动配置flask的正常请求记录日志文件名字,请指定 FLASK_RECORD_LOG_FILE_NAME')

        flask_error_log_file_name_from_config = app.config.get(
            'FLASK_ERROR_LOG_FILE_NAME', None)
        flask_error_log_file_name_default = Path(
            sys.path[1]).as_posix().split('/')[-1] + '_flask_error.log'
        if flask_error_log_file_name_from_config:
            logger_error_name = flask_error_log_file_name_from_config.split(
                '.')[0] + (app.config['DING_TALK_KEYWORD'] if app.config.get(
                    'FLASK_ERROR_DING_TALK_TOKEN', None) else '')
            # logger_dingtalk_debug.debug(logger_error_name)
            app.flask_error_logger = LogManager(
                logger_error_name).get_logger_and_add_handlers(
                    log_filename=flask_error_log_file_name_from_config,
                    ding_talk_token=app.config.get(
                        'FLASK_ERROR_DING_TALK_TOKEN', None))
            self.logger.info(
                f'''flask错误日志将记录在  /pythonlogs/{flask_error_log_file_name_from_config} 文件中'''
            )
        else:
            logger_error_name = flask_error_log_file_name_default.split(
                '.')[0] + (app.config['DING_TALK_KEYWORD'] if app.config.get(
                    'FLASK_ERROR_DING_TALK_TOKEN', None) else '')
            # logger_dingtalk_debug.debug(logger_error_name)
            app.flask_error_logger = LogManager(
                logger_error_name).get_logger_and_add_handlers(
                    log_filename=flask_error_log_file_name_default,
                    ding_talk_token=app.config.get(
                        'FLASK_ERROR_DING_TALK_TOKEN', None))
            self.logger.info(
                f'''flask错误日志将记录在  /pythonlogs/{flask_error_log_file_name_default} 文件中'''
            )
            self.logger.warning(
                f'''也可以手动配置flask的错误记录日志文件名字,请指定 FLASK_ERROR_LOG_FILE_NAME''')

        app.before_first_request_funcs.append(self.__before_first_request)
 def custom_init(self):
     self._middware_name = frame_config.KOMBU_URL.split(":")[0]
     logger_name = f'{self._logger_prefix}{self.__class__.__name__}--{self._middware_name}--{self._queue_name}'
     self.logger = LogManager(logger_name).get_logger_and_add_handlers(
         self._log_level,
         log_filename=f'{logger_name}.log'
         if self._create_logger_file else None,
         formatter_template=frame_config.
         NB_LOG_FORMATER_INDEX_FOR_CONSUMER_AND_PUBLISHER,
     )  #
     patch_kombu_redis()
예제 #7
0
class KombuPublisher(AbstractPublisher, ):
    """
    使用redis作为中间件,这种是最简单的使用redis的方式,此方式不靠谱很容易丢失大量消息。非要用reids作为中间件,请用其他类型的redis consumer
    """

    def custom_init(self):
        self._kombu_broker_url_prefix = frame_config.KOMBU_URL.split(":")[0]
        logger_name = f'{self._logger_prefix}{self.__class__.__name__}--{self._kombu_broker_url_prefix}--{self._queue_name}'
        self.logger = LogManager(logger_name).get_logger_and_add_handlers(self._log_level_int,
                                                                          log_filename=f'{logger_name}.log' if self._is_add_file_handler else None,
                                                                          formatter_template=frame_config.NB_LOG_FORMATER_INDEX_FOR_CONSUMER_AND_PUBLISHER,
                                                                          )  #

    def init_broker(self):
        self.exchange = Exchange('distributed_framework_exchange', 'direct', durable=True)
        self.queue = Queue(self._queue_name, exchange=self.exchange, routing_key=self._queue_name, auto_delete=False)
        self.conn = Connection(frame_config.KOMBU_URL)
        self.queue(self.conn).declare()
        self.producer = self.conn.Producer(serializer='json')
        self.channel = self.producer.channel  # type: Channel
        self.channel.body_encoding = 'no_encode'
        # self.channel = self.conn.channel()  # type: Channel
        # # self.channel.exchange_declare(exchange='distributed_framework_exchange', durable=True, type='direct')
        # self.queue = self.channel.queue_declare(queue=self._queue_name, durable=True)
        self.logger.warning(f'使用 kombu 库 连接中间件')

    @deco_mq_conn_error
    def concrete_realization_of_publish(self, msg):
        self.producer.publish(json.loads(msg), exchange=self.exchange, routing_key=self._queue_name, declare=[self.queue])

    @deco_mq_conn_error
    def clear(self):
        self.channel.queue_purge(self._queue_name)

    @deco_mq_conn_error
    def get_message_count(self):
        # queue = self.channel.queue_declare(queue=self._queue_name, durable=True)
        # return queue.method.message_count
        # self.logger.warning(self.channel._size(self._queue_name))
        if self._kombu_broker_url_prefix == 'amqp':
            '''amqp tries to use librabbitmq but falls back to pyamqp.'''
            queue_declare_ok_t_named_tuple = self.channel.queue_declare(queue=self._queue_name, durable=True, auto_delete=False)
            # queue_declare_ok_t(queue='test_rabbit_queue2', message_count=100000, consumer_count=0)
            # print(type(queue_declare_ok_t_named_tuple),queue_declare_ok_t_named_tuple)
            return queue_declare_ok_t_named_tuple.message_count
        # noinspection PyProtectedMember
        return self.channel._size(self._queue_name)

    def close(self):
        self.channel.close()
        self.conn.close()
        self.logger.warning('关闭 kombu 包 链接')
예제 #8
0
 def __init__(
     self,
     logger_name='ExceptionContextManager',
     verbose=100,
     donot_raise__exception=True,
 ):
     """
     :param verbose: 打印错误的深度,对应traceback对象的limit,为正整数
     :param donot_raise__exception:是否不重新抛出错误,为Fasle则抛出,为True则不抛出
     """
     self.logger = LogManager(logger_name).get_logger_and_add_handlers()
     self._verbose = verbose
     self._donot_raise__exception = donot_raise__exception
class case_lable(unittest.TestCase):
    def setUp(self) -> None:
        self.session = requests.session()
        self.HOSTS = config.HOSTS
        self.APPID = config.appid
        self.SECRET = config.secret
        self.logger = LogManager('case_log').get_logger_and_add_handlers(10)

    def tearDown(self) -> None:
        self.session.close()

    def testcase_lable_04(self):
        self._testMethodName = 'case07'
        self._testMethodDoc = '验证查询标签是否成功'
        self.logger.info('查询标签开始')
        response_select = get_api.select_label(self.session,self.HOSTS,self.APPID,self.SECRET)
        body_select = response_select.content.decode('utf-8')
        self.logger.info('查询标签结果是:%s'%body_select)
        #str转dict
        json_data = json.loads(body_select)
        #list 列表+[0]=str
        select_name = jsonpath.jsonpath(json_data,'$.tags[0].name')[0]
        try:
            self.assertEqual(select_name,"星标组",'case07 验证查询标签成功')
            self.logger.info('查询标签结果:成功')
        except Exception as e:
            self.logger.debug('查询标签结果:失败,内容不匹配')
예제 #10
0
class TimerContextManager(object):
    """
    用上下文管理器计时,可对代码片段计时
    """
    log = LogManager('TimerContext').get_logger_and_add_handlers()

    def __init__(self, is_print_log=True):
        self._is_print_log = is_print_log
        self.t_spend = None
        self._line = None
        self._file_name = None
        self.time_start = None

    def __enter__(self):
        self._line = sys._getframe().f_back.f_lineno  # 调用此方法的代码的函数
        self._file_name = sys._getframe(1).f_code.co_filename  # 哪个文件调了用此方法
        self.time_start = time.time()
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.t_spend = time.time() - self.time_start
        if self._is_print_log:
            self.log.debug(
                f'对下面代码片段进行计时:  \n执行"{self._file_name}:{self._line}" 用时 {round(self.t_spend, 2)} 秒'
            )
class TestGetaccesstokenApiCase(unittest.TestCase):
    def setUp(self) -> None:
        self.session = requests.session()
        self.HOSTS = config.HOSTS
        self.logger = LogManager('ApiCase').get_logger_and_add_handlers()

    def tearDown(self) -> None:
        self.session.close()

    def test_get_accesstoken_success(self):
        self._testMethodName = 'case01'
        self._testMethodDoc = '验证get_access_token接口能否成功调用'
        logger.info('case01 验证get_access_token接口能否成功调用 --开始执行--')
        self.logger.info('case01 验证get_access_token接口能否成功调用 --开始执行--')
        response = api_info.get_access_token_api(
            self.session, self.HOSTS, 'wx55614004f367f8ca',
            '65515b46dd758dfdb09420bb7db2c67f')
        actual_result = response.status_code
        logger.info('case01 验证get_access_token接口能否成功调用 --执行结束--')
        self.logger.info('case01 验证get_access_token接口能否成功调用 --执行结束--')
        self.assertEqual(actual_result, 200,
                         'case01 验证get_access_token接口能否成功调用')

    def test_get_accesstoken_error_appid(self):
        self._testMethodName = 'case02'
        self._testMethodDoc = '验证appid错误时,get_access_token接口能否正常处理'
        response = api_info.get_access_token_api(
            self.session, self.HOSTS, 'wx55614004f367f8',
            '65515b46dd758dfdb09420bb7db2c67f')
        actual_result = response.json()['errcode']
        self.assertEqual(actual_result, 40013,
                         'case02 验证appid错误时,get_access_token接口能否正常处理')

    def test_get_accesstoken_error_appsecret(self):
        self._testMethodName = 'case03'
        self._testMethodDoc = '验证appsecret错误时,get_access_token接口能否正常处理'
        get_param_dict = {
            "grant_type": "client_credential",
            "appid": "wx55614004f367f8ca",
            "secret": "65515b46dd758dfdb09420bb7db2c6"
        }
        response = self.session.get(url='https://%s/cgi-bin/token' %
                                    self.HOSTS,
                                    params=get_param_dict)
        actual_result = response.json()['errcode']
        self.assertEqual(actual_result, 40001,
                         'case03 验证appsecret错误时,get_access_token接口能否正常处理')
 def __init__(self, function_of_get_new_https_proxies_list_from_website, func_args=tuple(), func_kwargs: dict = None,
              platform_name='xx平台', redis_key=PROXY_KEY_IN_REDIS_DEFAULT,
              time_sleep_for_get_new_proxies=60,
              ):
     """
     :param function_of_get_new_https_proxies_list_from_website: 獲取代理ip列表的函數,使用策略模式。
     :param redis_key 代理ip存放的redis键名,是个zset。
     :param time_sleep_for_get_new_proxies:每个单独的网页隔多久拉取一次。
     """
     self.function_of_get_new_https_proxies_list_from_website = function_of_get_new_https_proxies_list_from_website
     self._func_args = func_args
     self._func_kwargs = func_kwargs or {}
     self.platform_name = platform_name
     self._redis_key = redis_key
     self._time_sleep_for_get_new_proxies = time_sleep_for_get_new_proxies
     self.logger = LogManager(f'ProxyCollector-{platform_name}').get_logger_and_add_handlers(
         log_filename=f'ProxyCollector-{platform_name}.log', formatter_template=7)
예제 #13
0
    def get_logger(cls):
        if cls.logger is None:
            cls.logger = LogManager('simple').get_logger_and_add_handlers(
                log_path='../logger',
                log_filename='test.log',
                formatter_template=5,
                log_file_size=10)

            return cls.logger
예제 #14
0
class case_token(unittest.TestCase):
    def setUp(self) -> None:
        self.session = requests.session()
        self.HOSTS = config.HOSTS
        self.APPID = config.appid
        self.SECRET = config.secret
        self.logger = LogManager('case_log').get_logger_and_add_handlers(10)

    def tearDown(self) -> None:
        self.session.close()

    def testcase_token_01(self):
        self._testMethodName = 'case01'
        self._testMethodDoc = '验证获取token是否成功'
        self.logger.info('获取tokencase01开始')
        reponse = get_api.get_response(self.session, self.HOSTS, self.APPID,
                                       self.SECRET)
        self.expirecode = reponse.json()['expires_in']
        print('获取token的返回码是:%s' % self.expirecode)
        self.assertEqual(self.expirecode, 7200, 'case01 验证获取token成功')
        self.logger.info('获取tokencase01结束:成功')
예제 #15
0
class FunctionResultCacher:
    logger = LogManager('FunctionResultChche').get_logger_and_add_handlers()
    func_result_dict = {}
    """
    {
        (f1,(1,2,3,4)):(10,1532066199.739),
        (f2,(5,6,7,8)):(26,1532066211.645),
    }
    """
    @classmethod
    def cached_function_result_for_a_time(cls, cache_time: float):
        """
        函数的结果缓存一段时间装饰器,不要装饰在返回结果是超大字符串或者其他占用大内存的数据结构上的函数上面。
        :param cache_time :缓存的时间
        :type cache_time : float
        """
        def _cached_function_result_for_a_time(fun):
            @wraps(fun)
            def __cached_function_result_for_a_time(*args, **kwargs):
                # print(cls.func_result_dict)
                # if len(cls.func_result_dict) > 1024:
                if sys.getsizeof(cls.func_result_dict) > 100 * 1000 * 1000:
                    cls.func_result_dict.clear()

                key = cls._make_arguments_to_key(args, kwargs)
                if (fun, key) in cls.func_result_dict and time.time(
                ) - cls.func_result_dict[(fun, key)][1] < cache_time:
                    return cls.func_result_dict[(fun, key)][0]
                else:
                    if (fun, key) in cls.func_result_dict and time.time(
                    ) - cls.func_result_dict[(fun, key)][1] < cache_time:
                        return cls.func_result_dict[(fun, key)][0]
                    else:
                        cls.logger.debug('函数 [{}] 此次不能使用缓存'.format(
                            fun.__name__))
                        result = fun(*args, **kwargs)
                        cls.func_result_dict[(fun, key)] = (result,
                                                            time.time())
                        return result

            return __cached_function_result_for_a_time

        return _cached_function_result_for_a_time

    @staticmethod
    def _make_arguments_to_key(args, kwds):
        key = args
        if kwds:
            sorted_items = sorted(kwds.items())
            for item in sorted_items:
                key += item
        return key  # 元祖可以相加。
예제 #16
0
def timer(func):
    """计时器装饰器,只能用来计算函数运行时间"""
    if not hasattr(timer, 'log'):
        timer.log = LogManager(
            f'timer_{func.__name__}').get_logger_and_add_handlers(
                log_filename=f'timer_{func.__name__}.log')

    @wraps(func)
    def _timer(*args, **kwargs):
        t1 = time.time()
        result = func(*args, **kwargs)
        t2 = time.time()
        t_spend = round(t2 - t1, 2)
        timer.log.debug('执行[ {} ]方法用时 {} 秒'.format(func.__name__, t_spend))
        return result

    return _timer
예제 #17
0
def where_is_it_called(func):
    """一个装饰器,被装饰的函数,如果被调用,将记录一条日志,记录函数被什么文件的哪一行代码所调用,非常犀利黑科技的装饰器"""
    if not hasattr(where_is_it_called, 'log'):
        where_is_it_called.log = LogManager(
            'where_is_it_called').get_logger_and_add_handlers()

    # noinspection PyProtectedMember
    @wraps(func)
    def _where_is_it_called(*args, **kwargs):
        # 获取被调用函数名称
        # func_name = sys._getframe().f_code.co_name
        func_name = func.__name__
        # 什么函数调用了此函数
        which_fun_call_this = sys._getframe(1).f_code.co_name  # NOQA

        # 获取被调用函数在被调用时所处代码行数
        line = sys._getframe().f_back.f_lineno

        # 获取被调用函数所在模块文件名
        file_name = sys._getframe(1).f_code.co_filename

        # noinspection PyPep8
        where_is_it_called.log.debug(
            f'文件[{func.__code__.co_filename}]的第[{func.__code__.co_firstlineno}]行即模块 [{func.__module__}] 中的方法 [{func_name}] 正在被文件 [{file_name}] 中的'
            f'方法 [{which_fun_call_this}] 中的第 [{line}] 行处调用,传入的参数为[{args},{kwargs}]'
        )
        try:
            t0 = time.time()
            result = func(*args, **kwargs)
            result_raw = result
            t_spend = round(time.time() - t0, 2)
            if isinstance(result, dict):
                result = json.dumps(result)
            if len(str(result)) > 200:
                result = str(result)[0:200] + '  。。。。。。  '
            where_is_it_called.log.debug(
                '执行函数[{}]消耗的时间是{}秒,返回的结果是 --> '.format(func_name, t_spend) +
                str(result))
            return result_raw
        except Exception as e:
            where_is_it_called.log.debug('执行函数{},发生错误'.format(func_name))
            where_is_it_called.log.exception(e)
            raise e

    return _where_is_it_called
예제 #18
0
def keep_circulating(time_sleep=0.001,
                     exit_if_function_run_sucsess=False,
                     is_display_detail_exception=True,
                     block=True,
                     daemon=False):
    """间隔一段时间,一直循环运行某个方法的装饰器
    :param time_sleep :循环的间隔时间
    :param exit_if_function_run_sucsess :如果成功了就退出循环
    :param is_display_detail_exception
    :param block :是否阻塞主主线程,False时候开启一个新的线程运行while 1。
    :param daemon: 如果使用线程,那么是否使用守护线程,使这个while 1有机会自动结束。
    """
    if not hasattr(keep_circulating, 'keep_circulating_log'):
        keep_circulating.log = LogManager(
            'keep_circulating').get_logger_and_add_handlers()

    def _keep_circulating(func):
        @wraps(func)
        def __keep_circulating(*args, **kwargs):

            # noinspection PyBroadException
            def ___keep_circulating():
                while 1:
                    try:
                        result = func(*args, **kwargs)
                        if exit_if_function_run_sucsess:
                            return result
                    except Exception as e:
                        msg = func.__name__ + '   运行出错\n ' + traceback.format_exc(
                            limit=10) if is_display_detail_exception else str(
                                e)
                        keep_circulating.log.error(msg)
                    finally:
                        time.sleep(time_sleep)

            if block:
                return ___keep_circulating()
            else:
                threading.Thread(target=___keep_circulating,
                                 daemon=daemon).start()

        return __keep_circulating

    return _keep_circulating
예제 #19
0
# -*- coding: utf-8 -*-
# @Author  : ydf
# @Time    : 2019/8/8 0008 13:27
import json
from threading import Lock

from pikav0.exceptions import AMQPError

from function_scheduling_distributed_framework.consumers.base_consumer import AbstractConsumer
from nb_log import LogManager
from function_scheduling_distributed_framework.utils.rabbitmq_factory import RabbitMqFactory

LogManager('pikav0').get_logger_and_add_handlers(20)


class RabbitmqConsumer(AbstractConsumer):
    """
    使用pika包实现的。
    """
    BROKER_KIND = 0

    # noinspection PyAttributeOutsideInit
    def custom_init(self):
        self._lock_for_pika = Lock()

    def _shedual_task(self):
        channel = RabbitMqFactory(
            is_use_rabbitpy=0).get_rabbit_cleint().creat_a_channel()
        channel.queue_declare(queue=self._queue_name, durable=True)
        channel.basic_qos(prefetch_count=self._concurrent_num)
예제 #20
0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import sqlalchemy as db
from nb_log import LogManager

log = LogManager('fedb-sdk-test').get_logger_and_add_handlers()


class FedbClient:
    def __init__(self, zkCluster, zkRootPath, dbName='test_fedb'):
        self.zkCluster = zkCluster
        self.zkRootPath = zkRootPath
        self.dbName = dbName

    def getConnect(self):
        engine = db.create_engine('openmldb://@/{}?zk={}&zkPath={}'.format(
            self.dbName, self.zkCluster, self.zkRootPath))
        connect = engine.connect()
        return connect
 def setUp(self) -> None:
     self.session = requests.session()
     self.HOSTS = config.HOSTS
     self.logger = LogManager('ApiCase').get_logger_and_add_handlers()
class Test_GetaccesstokenApiCase(unittest.TestCase):
    def setUp(self) -> None:
        self.session = requests.session()
        self.HOSTS = config.HOSTS
        self.logger = LogManager('ApiCase').get_logger_and_add_handlers()

    def tearDown(self) -> None:
        self.session.close()

    def test_get_accesstoken_success(self):
        requests.packages.urllib3.disable_warnings()
        self._testMethodName = 'case01'
        self._testMethodDoc = '验证get_access_token接口能否成功调用'
        logger.info('case01 验证get_access_token接口能否成功调用 --开始执行--11111')
        self.logger.info('case01 验证get_access_token接口能否成功调用 --开始执行--')
        # get_param_dict = {
        #     "grant_type": "client_credential",
        #     "appid": "wx55614004f367f8ca",
        #     "secret": "65515b46dd758dfdb09420bb7db2c67f"
        # }
        # response = self.session.get( url='https://%s/cgi-bin/token'%self.HOSTS,
        #                  params=get_param_dict,verify=False)
        response = get_access_token_apiget_access_token_api(
            self.session, self.HOSTS, '65515b46dd758dfdb09420bb7db2c67f',
            'wx55614004f367f8ca', 'client_credential')
        print(response)
        actual_result = response['expires_in']
        self.assertEqual(actual_result, 7200,
                         'case01 验证get_access_token接口能否成功调用')

    def test_get_accesstoken_error_appid(self):
        requests.packages.urllib3.disable_warnings()

        self._testMethodName = 'case02'
        self._testMethodDoc = '验证appid错误时,接口能否正常处理'
        # get_param_dict = {
        #     "grant_type": "client_credential",
        #     "appid": "wx55614004f367f8ca1",
        #     "secret": "65515b46dd758dfdb09420bb7db2c67f"
        # }
        # response = self.session.get( url='https://%s/cgi-bin/token'%self.HOSTS,
        #                  params=get_param_dict,verify=False)
        response = get_access_token_apiget_access_token_api(
            self.session, self.HOSTS, '65515b46dd758dfdb09420bb7db2c67f',
            'wx55614004f367f8ca1', 'client_credential')

        actual_result = response['errcode']
        self.assertEqual(actual_result, 40013, 'case02 验证appid错误接口能否成功调用')

    def test_get_accesstoken_error_secret(self):
        requests.packages.urllib3.disable_warnings()

        self._testMethodName = 'secret'
        self._testMethodDoc = '验证appidsecret错误时,接口能否正常处理'
        # get_param_dict = {
        #     "grant_type": "client_credential",
        #     "appid": "wx55614004f367f8ca",
        #     "secret": "65515b46dd758dfdb09420bb7db2c67f1"
        # }
        # response = self.session.get( url='https://%s/cgi-bin/token'%self.HOSTS,
        #                  params=get_param_dict,verify=False)
        response = get_access_token_apiget_access_token_api(
            self.session, self.HOSTS, '65515b46dd758dfdb09420bb7db2c67f1',
            'wx55614004f367f8ca', 'client_credential')

        actual_result = response.json()['errcode']
        self.assertEqual(actual_result, 40001, 'case03 验证secret错误接口能否成功调用')
import unittest
import os
from utils import HTMLTestReportCN
from utils.config_utils import local_config
from nb_log import LogManager

logger = LogManager('PopOn-School_API_TEST').get_logger_and_add_handlers(
    is_add_stream_handler=True, log_filename=local_config.LOG_NAME)
case_path = os.path.join(os.path.dirname(__file__), '..', 'testcases')
result_path = os.path.join(os.path.dirname(__file__), '..',
                           local_config.REPORT_PATH)


def load_testcase():
    logger.info('加载接口测试用例')
    discover = unittest.defaultTestLoader.discover(start_dir=case_path,
                                                   pattern='test_api_case.py',
                                                   top_level_dir=case_path)
    all_case_suite = unittest.TestSuite()
    all_case_suite.addTest(discover)
    return all_case_suite


result_dir = HTMLTestReportCN.ReportDirectory(result_path)

result_dir.create_dir('PopOn-School接口自动化测试报告_')
report_html_path = HTMLTestReportCN.GlobalMsg.get_value('report_path')
report_html_obj = open(report_html_path, 'wb')
runner = HTMLTestReportCN.HTMLTestRunner(stream=report_html_obj,
                                         title='PopOn-School接口自动化测试报告',
                                         description='数据驱动+关键字驱动测试',
            # nb_print(work_item)
            if work_item is not None:
                self._executorx._change_threads_free_count(-1)
                work_item.run()
                del work_item
                self._executorx._change_threads_free_count(1)
                continue
            if _shutdown or self._executorx._shutdown:
                self._executorx.work_queue.put(None)
                break


process_name_set = set()
logger_show_current_threads_num = LogManager(
    'show_current_threads_num').get_logger_and_add_handlers(
        formatter_template=5,
        log_filename='show_current_threads_num.log',
        do_not_use_color_handler=False)


def show_current_threads_num(sleep_time=600,
                             process_name='',
                             block=False,
                             daemon=True):
    process_name = sys.argv[0] if process_name == '' else process_name

    def _show_current_threads_num():
        while True:
            # logger_show_current_threads_num.info(f'{process_name} 进程 的 并发数量是 -->  {threading.active_count()}')
            # nb_print(f'  {process_name} {os.getpid()} 进程 的 线程数量是 -->  {threading.active_count()}')
            logger_show_current_threads_num.info(
예제 #25
0
class AbstractConsumer(
        LoggerLevelSetterMixin,
        metaclass=abc.ABCMeta,
):
    time_interval_for_check_do_not_run_time = 60
    BROKER_KIND = None

    @property
    @decorators.synchronized
    def publisher_of_same_queue(self):
        if not self._publisher_of_same_queue:
            self._publisher_of_same_queue = get_publisher(
                self._queue_name,
                consuming_function=self.consuming_function,
                broker_kind=self.BROKER_KIND)
            if self._msg_expire_senconds:
                self._publisher_of_same_queue.set_is_add_publish_time()
        return self._publisher_of_same_queue

    def bulid_a_new_publisher_of_same_queue(self):
        return get_publisher(self._queue_name, broker_kind=self.BROKER_KIND)

    @classmethod
    def join_shedual_task_thread(cls):
        """

        :return:
        """
        """
        def ff():
            RabbitmqConsumer('queue_test', consuming_function=f3, threads_num=20, msg_schedule_time_intercal=2, log_level=10, logger_prefix='yy平台消费', is_consuming_function_use_multi_params=True).start_consuming_message()
            RabbitmqConsumer('queue_test2', consuming_function=f4, threads_num=20, msg_schedule_time_intercal=4, log_level=10, logger_prefix='zz平台消费', is_consuming_function_use_multi_params=True).start_consuming_message()
            AbstractConsumer.join_shedual_task_thread()            # 如果开多进程启动消费者,在linux上需要这样写下这一行。


        if __name__ == '__main__':
            [Process(target=ff).start() for _ in range(4)]

        """
        ConsumersManager.join_all_consumer_shedual_task_thread()

    # noinspection PyProtectedMember
    def __init__(
            self,
            queue_name,
            *,
            consuming_function: Callable = None,
            function_timeout=0,
            threads_num=50,
            concurrent_num=50,
            specify_threadpool=None,
            concurrent_mode=1,
            max_retry_times=3,
            log_level=10,
            is_print_detail_exception=True,
            msg_schedule_time_intercal=0.0,
            qps: float = 0,
            msg_expire_senconds=0,
            is_using_distributed_frequency_control=False,
            is_send_consumer_hearbeat_to_redis=False,
            logger_prefix='',
            create_logger_file=True,
            do_task_filtering=False,
            task_filtering_expire_seconds=0,
            is_consuming_function_use_multi_params=True,
            is_do_not_run_by_specify_time_effect=False,
            do_not_run_by_specify_time=('10:00:00', '22:00:00'),
            schedule_tasks_on_main_thread=False,
            function_result_status_persistance_conf=FunctionResultStatusPersistanceConfig(
                False, False, 7 * 24 * 3600),
            is_using_rpc_mode=False):
        """
        :param queue_name:
        :param consuming_function: 处理消息的函数。
        :param function_timeout : 超时秒数,函数运行超过这个时间,则自动杀死函数。为0是不限制。
        :param threads_num:线程或协程并发数量
        :param concurrent_num:并发数量,这个覆盖threads_num。以后会废弃threads_num参数,因为表达的意思不太准确,不一定是线程模式并发。
        :param specify_threadpool:使用指定的线程池/携程池,可以多个消费者共使用一个线程池,不为None时候。threads_num失效
        :param concurrent_mode:并发模式,暂时支持 线程 、gevent、eventlet三种模式。  1线程  2 gevent 3 evenlet
        :param max_retry_times:
        :param log_level:
        :param is_print_detail_exception:
        :param msg_schedule_time_intercal:消息调度的时间间隔,用于控频
        :param qps:指定1秒内的函数执行次数,qps会覆盖msg_schedule_time_intercal,一会废弃msg_schedule_time_intercal这个参数。
        :param is_using_distributed_frequency_control: 是否使用分布式空频(依赖redis计数),默认只对当前实例化的消费者空频有效。假如实例化了2个qps为10的使用同一队列名的消费者,
               并且都启动,则每秒运行次数会达到20。如果使用分布式空频则所有消费者加起来的总运行次数是10。
        :param is_send_consumer_hearbeat_to_redis   时候将发布者的心跳发送到redis,有些功能的实现需要统计活跃消费者。因为有的中间件不是真mq。
        :param logger_prefix: 日志前缀,可使不同的消费者生成不同的日志
        :param create_logger_file : 是否创建文件日志
        :param do_task_filtering :是否执行基于函数参数的任务过滤
        :param task_filtering_expire_seconds:任务过滤的失效期,为0则永久性过滤任务。例如设置过滤过期时间是1800秒 ,
               30分钟前发布过1 + 2 的任务,现在仍然执行,
               如果是30分钟以内发布过这个任务,则不执行1 + 2,现在把这个逻辑集成到框架,一般用于接口价格缓存。
        :is_consuming_function_use_multi_params  函数的参数是否是传统的多参数,不为单个body字典表示多个参数。
        :param is_do_not_run_by_specify_time_effect :是否使不运行的时间段生效
        :param do_not_run_by_specify_time   :不运行的时间段
        :param schedule_tasks_on_main_thread :直接在主线程调度任务,意味着不能直接在当前主线程同时开启两个消费者。
        :param function_result_status_persistance_conf   :配置。是否保存函数的入参,运行结果和运行状态到mongodb。
               这一步用于后续的参数追溯,任务统计和web展示,需要安装mongo。
        :param is_using_rpc_mode 是否使用rpc模式,可以在发布端获取消费端的结果回调,但消耗一定性能,使用async_result.result时候会等待阻塞住当前线程。
        """
        self.init_params = copy.copy(locals())
        self.init_params.pop('self')
        self.init_params['broker_kind'] = self.__class__.BROKER_KIND

        ConsumersManager.consumers_queue__info_map[
            queue_name] = current_queue__info_dict = copy.copy(
                self.init_params)
        current_queue__info_dict['consuming_function'] = str(
            consuming_function)  # consuming_function.__name__
        current_queue__info_dict[
            'function_result_status_persistance_conf'] = function_result_status_persistance_conf.to_dict(
            )
        current_queue__info_dict['class_name'] = self.__class__.__name__
        concurrent_name = ConsumersManager.get_concurrent_name_by_concurrent_mode(
            concurrent_mode)
        current_queue__info_dict['concurrent_mode_name'] = concurrent_name

        # 方便点击跳转定位到当前解释器下所有实例化消费者的文件行,点击可跳转到该处。
        # 获取被调用函数在被调用时所处代码行数
        # 直接实例化相应的类和使用工厂模式来实例化相应的类,得到的消费者实际实例化的行是不一样的,希望定位到用户的代码处,而不是定位到工厂模式处。
        line = sys._getframe(0).f_back.f_lineno
        # 获取被调用函数所在模块文件名
        file_name = sys._getframe(1).f_code.co_filename
        if 'consumer_factory.py' in file_name:
            line = sys._getframe(1).f_back.f_lineno
            file_name = sys._getframe(2).f_code.co_filename
        current_queue__info_dict[
            'where_to_instantiate'] = f'{file_name}:{line}'

        self._queue_name = queue_name
        self.queue_name = queue_name  # 可以换成公有的,免得外部访问有警告。
        self.consuming_function = consuming_function
        self._function_timeout = function_timeout
        self._threads_num = concurrent_num if threads_num == 50 else threads_num  # concurrent参数优先,以后废弃threads_num参数。
        self._specify_threadpool = specify_threadpool
        self._threadpool = None  # 单独加一个检测消息数量和心跳的线程
        self._concurrent_mode = concurrent_mode
        self._max_retry_times = max_retry_times
        self._is_print_detail_exception = is_print_detail_exception
        self._qps = qps
        if qps != 0:
            msg_schedule_time_intercal = 1.0 / qps  # 使用qps覆盖消息调度间隔,以qps为准,以后废弃msg_schedule_time_intercal这个参数。
        self._msg_schedule_time_intercal = msg_schedule_time_intercal if msg_schedule_time_intercal > 0.001 else 0.001
        self._is_using_distributed_frequency_control = is_using_distributed_frequency_control
        self._is_send_consumer_hearbeat_to_redis = is_send_consumer_hearbeat_to_redis or is_using_distributed_frequency_control
        self._msg_expire_senconds = msg_expire_senconds

        if self._concurrent_mode not in (1, 2, 3):
            raise ValueError('设置的并发模式不正确')
        self._concurrent_mode_dispatcher = ConcurrentModeDispatcher(self)

        self._logger_prefix = logger_prefix
        self._log_level = log_level
        if logger_prefix != '':
            logger_prefix += '--'

        # logger_name = f'{logger_prefix}{self.__class__.__name__}--{concurrent_name}--{queue_name}--{self.consuming_function.__name__}'
        logger_name = f'{logger_prefix}{self.__class__.__name__}--{queue_name}'
        # nb_print(logger_name)
        self.logger = LogManager(logger_name).get_logger_and_add_handlers(
            log_level,
            log_filename=f'{logger_name}.log' if create_logger_file else None,
            formatter_template=frame_config.
            NB_LOG_FORMATER_INDEX_FOR_CONSUMER_AND_PUBLISHER,
        )
        # self.logger.info(f'{self.__class__} 在 {current_queue__info_dict["where_to_instantiate"]}  被实例化')
        sys.stdout.write(
            f'{time.strftime("%H:%M:%S")} "{current_queue__info_dict["where_to_instantiate"]}"  \033[0;30;44m此行 '
            f'实例化队列名 {current_queue__info_dict["queue_name"]} 的消费者, 类型为 {self.__class__}\033[0m\n'
        )

        self._do_task_filtering = do_task_filtering
        self._redis_filter_key_name = f'filter_zset:{queue_name}' if task_filtering_expire_seconds else f'filter_set:{queue_name}'
        filter_class = RedisFilter if task_filtering_expire_seconds == 0 else RedisImpermanencyFilter
        self._redis_filter = filter_class(self._redis_filter_key_name,
                                          task_filtering_expire_seconds)

        self._is_consuming_function_use_multi_params = is_consuming_function_use_multi_params

        self._execute_task_times_every_minute = 0  # 每分钟执行了多少次任务。
        self._lock_for_count_execute_task_times_every_minute = Lock()
        self._current_time_for_execute_task_times_every_minute = time.time()

        self._msg_num_in_broker = 0
        self._last_timestamp_when_has_task_in_queue = 0
        self._last_timestamp_print_msg_num = 0

        self._is_do_not_run_by_specify_time_effect = is_do_not_run_by_specify_time_effect
        self._do_not_run_by_specify_time = do_not_run_by_specify_time  # 可以设置在指定的时间段不运行。
        self._schedule_tasks_on_main_thread = schedule_tasks_on_main_thread

        self._result_persistence_helper = ResultPersistenceHelper(
            function_result_status_persistance_conf, queue_name)

        self._is_using_rpc_mode = is_using_rpc_mode

        self.stop_flag = False

        # 控频要用到的成员变量
        self._last_submit_task_timestamp = 0
        self._last_start_count_qps_timestamp = time.time()
        self._has_execute_times_in_recent_second = 0

        self._publisher_of_same_queue = None

        self.consumer_identification = f'{socket.gethostname()}_{time_util.DatetimeConverter().datetime_str.replace(":","-")}_{os.getpid()}_{id(self)}'

        self.custom_init()

    @property
    @decorators.synchronized
    def concurrent_pool(self):
        return self._concurrent_mode_dispatcher.build_pool()

    def custom_init(self):
        pass

    def keep_circulating(self,
                         time_sleep=0.001,
                         exit_if_function_run_sucsess=False,
                         is_display_detail_exception=True,
                         block=True):
        """间隔一段时间,一直循环运行某个方法的装饰器
        :param time_sleep :循环的间隔时间
        :param is_display_detail_exception
        :param exit_if_function_run_sucsess :如果成功了就退出循环
        :param block:是否阻塞在当前主线程运行。
        """
        def _keep_circulating(func):
            @wraps(func)
            def __keep_circulating(*args, **kwargs):

                # noinspection PyBroadException
                def ___keep_circulating():
                    while 1:
                        try:
                            result = func(*args, **kwargs)
                            if exit_if_function_run_sucsess:
                                return result
                        except Exception as e:
                            msg = func.__name__ + '   运行出错\n ' + traceback.format_exc(
                                limit=10
                            ) if is_display_detail_exception else str(e)
                            self.logger.error(msg)
                        finally:
                            time.sleep(time_sleep)

                if block:
                    return ___keep_circulating()
                else:
                    threading.Thread(target=___keep_circulating, ).start()

            return __keep_circulating

        return _keep_circulating

    # noinspection PyAttributeOutsideInit
    def start_consuming_message(self):
        self.logger.warning(f'开始消费 {self._queue_name} 中的消息')
        if self._is_send_consumer_hearbeat_to_redis:
            self._distributed_consumer_statistics = DistributedConsumerStatistics(
                self._queue_name, self.consumer_identification)
            self._distributed_consumer_statistics.run()
        self.keep_circulating(20, block=False)(
            self.check_heartbeat_and_message_count)()
        self._redis_filter.delete_expire_filter_task_cycle()
        if self._schedule_tasks_on_main_thread:
            self.keep_circulating(1)(self._shedual_task)()
        else:
            self._concurrent_mode_dispatcher.schedulal_task_with_no_block()

    @abc.abstractmethod
    def _shedual_task(self):
        """
        每个子类必须实现这个的方法,完成如何从中间件取出消息,并将函数和运行参数添加到工作池。
        :return:
        """
        raise NotImplementedError

    def _run(
        self,
        kw: dict,
    ):
        function_only_params = _delete_keys_and_return_new_dict(kw['body'], )
        if self.__get_priority_conf(
                kw,
                'do_task_filtering') and self._redis_filter.check_value_exists(
                    function_only_params):  # 对函数的参数进行检查,过滤已经执行过并且成功的任务。
            self.logger.info(
                f'redis的 [{self._redis_filter_key_name}] 键 中 过滤任务 {kw["body"]}'
            )
            self._confirm_consume(kw)
            return
        with self._lock_for_count_execute_task_times_every_minute:
            self._execute_task_times_every_minute += 1
            if time.time(
            ) - self._current_time_for_execute_task_times_every_minute > 60:
                self.logger.info(
                    f'一分钟内执行了 {self._execute_task_times_every_minute} 次函数 [ {self.consuming_function.__name__} ] ,预计'
                    f'还需要 {time_util.seconds_to_hour_minute_second(self._msg_num_in_broker / self._execute_task_times_every_minute * 60)} 时间'
                    f'才能执行完成 {self._msg_num_in_broker}个剩余的任务 ')
                self._current_time_for_execute_task_times_every_minute = time.time(
                )
                self._execute_task_times_every_minute = 0
        self._run_consuming_function_with_confirm_and_retry(
            kw,
            current_retry_times=0,
            function_result_status=FunctionResultStatus(
                self.queue_name, self.consuming_function.__name__, kw['body']),
        )

    def __get_priority_conf(self, kw: dict, broker_task_config_key: str):
        broker_task_config = kw['body'].get('extra',
                                            {}).get(broker_task_config_key,
                                                    None)
        if broker_task_config is None:
            return getattr(self, f'_{broker_task_config_key}')
        else:
            return broker_task_config

    def _run_consuming_function_with_confirm_and_retry(
        self,
        kw: dict,
        current_retry_times,
        function_result_status: FunctionResultStatus,
    ):
        function_only_params = _delete_keys_and_return_new_dict(kw['body'])
        if current_retry_times < self.__get_priority_conf(
                kw, 'max_retry_times'):
            function_result_status.run_times += 1
            # noinspection PyBroadException
            t_start = time.time()
            try:
                function_run = self.consuming_function if self._function_timeout == 0 else self._concurrent_mode_dispatcher.timeout_deco(
                    self.__get_priority_conf(kw, 'function_timeout'))(
                        self.consuming_function)
                if self._is_consuming_function_use_multi_params:  # 消费函数使用传统的多参数形式
                    function_result_status.result = function_run(
                        **function_only_params)
                else:
                    function_result_status.result = function_run(
                        function_only_params
                    )  # 消费函数使用单个参数,参数自身是一个字典,由键值对表示各个参数。
                function_result_status.success = True
                self._confirm_consume(kw)
                if self.__get_priority_conf(kw, 'do_task_filtering'):
                    self._redis_filter.add_a_value(
                        function_only_params
                    )  # 函数执行成功后,添加函数的参数排序后的键值对字符串到set中。
                self.logger.debug(
                    f' 函数 {self.consuming_function.__name__}  '
                    f'第{current_retry_times + 1}次 运行, 正确了,函数运行时间是 {round(time.time() - t_start, 4)} 秒,入参是 【 {function_only_params} 】。  {ConsumersManager.get_concurrent_info()}'
                )
            except Exception as e:
                if isinstance(
                        e, (PyMongoError, ExceptionForRequeue)
                ):  # mongo经常维护备份时候插入不了或挂了,或者自己主动抛出一个ExceptionForRequeue类型的错误会重新入队,不受指定重试次数逇约束。
                    self.logger.critical(
                        f'函数 [{self.consuming_function.__name__}] 中发生错误 {type(e)}  {e},消息重新入队'
                    )
                    time.sleep(1)  # 防止快速无限出错入队出队,导致cpu和中间件忙
                    return self._requeue(kw)
                self.logger.error(
                    f'函数 {self.consuming_function.__name__}  第{current_retry_times + 1}次发生错误,'
                    f'函数运行时间是 {round(time.time() - t_start, 4)} 秒,\n  入参是 【 {function_only_params} 】   \n 原因是 {type(e)} {e} ',
                    exc_info=self.__get_priority_conf(
                        kw, 'is_print_detail_exception'))
                function_result_status.exception = f'{e.__class__.__name__}    {str(e)}'
                return self._run_consuming_function_with_confirm_and_retry(
                    kw,
                    current_retry_times + 1,
                    function_result_status,
                )
        else:
            self.logger.critical(
                f'函数 {self.consuming_function.__name__} 达到最大重试次数 {self.__get_priority_conf(kw, "max_retry_times")} 后,仍然失败, 入参是 【 {function_only_params} 】'
            )
            self._confirm_consume(kw)  # 错得超过指定的次数了,就确认消费了。
        if self.__get_priority_conf(kw, 'is_using_rpc_mode'):
            # print(function_result_status.get_status_dict(without_datetime_obj=True))
            with RedisMixin().redis_db_frame.pipeline() as p:
                # RedisMixin().redis_db_frame.lpush(kw['body']['extra']['task_id'], json.dumps(function_result_status.get_status_dict(without_datetime_obj=True)))
                # RedisMixin().redis_db_frame.expire(kw['body']['extra']['task_id'], 600)
                p.lpush(
                    kw['body']['extra']['task_id'],
                    json.dumps(
                        function_result_status.get_status_dict(
                            without_datetime_obj=True)))
                p.expire(kw['body']['extra']['task_id'], 600)
                p.execute()
        self._result_persistence_helper.save_function_result_to_mongo(
            function_result_status)

    @abc.abstractmethod
    def _confirm_consume(self, kw):
        """确认消费"""
        raise NotImplementedError

    def check_heartbeat_and_message_count(self):
        self._msg_num_in_broker = self.publisher_of_same_queue.get_message_count(
        )
        if time.time() - self._last_timestamp_print_msg_num > 60:
            self.logger.info(
                f'[{self._queue_name}] 队列中还有 [{self._msg_num_in_broker}] 个任务')
            self._last_timestamp_print_msg_num = time.time()
        if self._msg_num_in_broker != 0:
            self._last_timestamp_when_has_task_in_queue = time.time()
        return self._msg_num_in_broker

    @abc.abstractmethod
    def _requeue(self, kw):
        """重新入队"""
        raise NotImplementedError

    def _submit_task(self, kw):
        if self._judge_is_daylight():
            self._requeue(kw)
            time.sleep(self.time_interval_for_check_do_not_run_time)
            return
        publish_time = _get_publish_time(kw['body'])
        msg_expire_senconds_priority = self.__get_priority_conf(
            kw, 'msg_expire_senconds')
        if msg_expire_senconds_priority != 0 and time.time(
        ) - msg_expire_senconds_priority > publish_time:
            self.logger.warning(
                f'消息发布时戳是 {publish_time} {kw["body"].get("publish_time_format", "")},距离现在 {round(time.time() - publish_time, 4)} 秒 ,'
                f'超过了指定的 {msg_expire_senconds_priority} 秒,丢弃任务')
            self._confirm_consume(kw)
            return 0
        if self._is_using_distributed_frequency_control:  # 如果是需要分布式控频。
            active_num = self._distributed_consumer_statistics.active_consumer_num
            self.__frequency_control(
                self._qps / active_num,
                self._msg_schedule_time_intercal * active_num)
        else:
            self.__frequency_control(self._qps,
                                     self._msg_schedule_time_intercal)
        self.concurrent_pool.submit(self._run, kw)

    def __frequency_control(self, qpsx, msg_schedule_time_intercalx):
        # 以下是消费函数qps控制代码。
        if qpsx <= 2:
            """ 原来的简单版 """
            time.sleep(msg_schedule_time_intercalx)
        elif 2 < qpsx <= 20:
            """ 改进的控频版,防止网络波动"""
            time_sleep_for_qps_control = max(
                (msg_schedule_time_intercalx -
                 (time.time() - self._last_submit_task_timestamp)) * 0.99,
                10**-3)
            # print(time.time() - self._last_submit_task_timestamp)
            # print(time_sleep_for_qps_control)
            time.sleep(time_sleep_for_qps_control)
            self._last_submit_task_timestamp = time.time()
        else:
            """基于计数的控频"""
            if time.time() - self._last_start_count_qps_timestamp > 1:
                self._has_execute_times_in_recent_second = 1
                self._last_start_count_qps_timestamp = time.time()
            else:
                self._has_execute_times_in_recent_second += 1
            # print(self._has_execute_times_in_recent_second)
            if self._has_execute_times_in_recent_second >= qpsx:
                time.sleep(
                    (1 -
                     (time.time() - self._last_start_count_qps_timestamp)) * 1)

    @decorators.FunctionResultCacher.cached_function_result_for_a_time(120)
    def _judge_is_daylight(self):
        if self._is_do_not_run_by_specify_time_effect and (
                self._do_not_run_by_specify_time[0] <
                time_util.DatetimeConverter().time_str <
                self._do_not_run_by_specify_time[1]):
            self.logger.warning(
                f'现在时间是 {time_util.DatetimeConverter()} ,现在时间是在 {self._do_not_run_by_specify_time} 之间,不运行'
            )
            return True

    def __str__(self):
        return f'队列为 {self.queue_name} 函数为 {self.consuming_function} 的消费者'
예제 #26
0
import time
import os
import requests
import importlib
import sys
import time
import os.path
from pdfminer.pdfparser import PDFParser, PDFDocument
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LTTextBoxHorizontal, LAParams
from pdfminer.pdfinterp import PDFTextExtractionNotAllowed
from nb_log import LogManager
from nb_log_config import LOG_PATH

logger = LogManager("api").get_logger_and_add_handlers(
    is_add_stream_handler=True, log_filename="api.log", log_path=LOG_PATH)

importlib.reload(sys)
time1 = time.time()
from case.VC_project.VCtest_login import VC_Login


def Ycontract_Listquire(s,
                        contractNo=None,
                        supplierCode=None,
                        businessStatus=None,
                        createTimeStart=None,
                        createTimeEnd=None,
                        executeStart=None,
                        executeEnd=None,
                        contractTemplateNo=None,
예제 #27
0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import allure
import pytest

from nb_log import LogManager

from common.standalone_test import StandaloneTest
from executor import fedb_executor
from util.test_util import getCases

log = LogManager('python-sdk-test').get_logger_and_add_handlers()

class TestStandaloneWindow(StandaloneTest):

    #都pass
    @pytest.mark.parametrize("testCase", getCases(["/function/window/"]))
    @allure.feature("window")
    @allure.story("batch")
    def test_window1(self, testCase):
        print(testCase)
        fedb_executor.build(self.connect, testCase).run()

    # 13没pass属于正常情况 剩下都pass
    @pytest.mark.parametrize("testCase", getCases(["/function/cluster/"]))
    @allure.feature("window")
    @allure.story("batch")
예제 #28
0
    def __init__(
            self,
            queue_name,
            *,
            consuming_function: Callable = None,
            function_timeout=0,
            threads_num=50,
            concurrent_num=50,
            specify_threadpool=None,
            concurrent_mode=1,
            max_retry_times=3,
            log_level=10,
            is_print_detail_exception=True,
            msg_schedule_time_intercal=0.0,
            qps: float = 0,
            msg_expire_senconds=0,
            is_using_distributed_frequency_control=False,
            is_send_consumer_hearbeat_to_redis=False,
            logger_prefix='',
            create_logger_file=True,
            do_task_filtering=False,
            task_filtering_expire_seconds=0,
            is_consuming_function_use_multi_params=True,
            is_do_not_run_by_specify_time_effect=False,
            do_not_run_by_specify_time=('10:00:00', '22:00:00'),
            schedule_tasks_on_main_thread=False,
            function_result_status_persistance_conf=FunctionResultStatusPersistanceConfig(
                False, False, 7 * 24 * 3600),
            is_using_rpc_mode=False):
        """
        :param queue_name:
        :param consuming_function: 处理消息的函数。
        :param function_timeout : 超时秒数,函数运行超过这个时间,则自动杀死函数。为0是不限制。
        :param threads_num:线程或协程并发数量
        :param concurrent_num:并发数量,这个覆盖threads_num。以后会废弃threads_num参数,因为表达的意思不太准确,不一定是线程模式并发。
        :param specify_threadpool:使用指定的线程池/携程池,可以多个消费者共使用一个线程池,不为None时候。threads_num失效
        :param concurrent_mode:并发模式,暂时支持 线程 、gevent、eventlet三种模式。  1线程  2 gevent 3 evenlet
        :param max_retry_times:
        :param log_level:
        :param is_print_detail_exception:
        :param msg_schedule_time_intercal:消息调度的时间间隔,用于控频
        :param qps:指定1秒内的函数执行次数,qps会覆盖msg_schedule_time_intercal,一会废弃msg_schedule_time_intercal这个参数。
        :param is_using_distributed_frequency_control: 是否使用分布式空频(依赖redis计数),默认只对当前实例化的消费者空频有效。假如实例化了2个qps为10的使用同一队列名的消费者,
               并且都启动,则每秒运行次数会达到20。如果使用分布式空频则所有消费者加起来的总运行次数是10。
        :param is_send_consumer_hearbeat_to_redis   时候将发布者的心跳发送到redis,有些功能的实现需要统计活跃消费者。因为有的中间件不是真mq。
        :param logger_prefix: 日志前缀,可使不同的消费者生成不同的日志
        :param create_logger_file : 是否创建文件日志
        :param do_task_filtering :是否执行基于函数参数的任务过滤
        :param task_filtering_expire_seconds:任务过滤的失效期,为0则永久性过滤任务。例如设置过滤过期时间是1800秒 ,
               30分钟前发布过1 + 2 的任务,现在仍然执行,
               如果是30分钟以内发布过这个任务,则不执行1 + 2,现在把这个逻辑集成到框架,一般用于接口价格缓存。
        :is_consuming_function_use_multi_params  函数的参数是否是传统的多参数,不为单个body字典表示多个参数。
        :param is_do_not_run_by_specify_time_effect :是否使不运行的时间段生效
        :param do_not_run_by_specify_time   :不运行的时间段
        :param schedule_tasks_on_main_thread :直接在主线程调度任务,意味着不能直接在当前主线程同时开启两个消费者。
        :param function_result_status_persistance_conf   :配置。是否保存函数的入参,运行结果和运行状态到mongodb。
               这一步用于后续的参数追溯,任务统计和web展示,需要安装mongo。
        :param is_using_rpc_mode 是否使用rpc模式,可以在发布端获取消费端的结果回调,但消耗一定性能,使用async_result.result时候会等待阻塞住当前线程。
        """
        self.init_params = copy.copy(locals())
        self.init_params.pop('self')
        self.init_params['broker_kind'] = self.__class__.BROKER_KIND

        ConsumersManager.consumers_queue__info_map[
            queue_name] = current_queue__info_dict = copy.copy(
                self.init_params)
        current_queue__info_dict['consuming_function'] = str(
            consuming_function)  # consuming_function.__name__
        current_queue__info_dict[
            'function_result_status_persistance_conf'] = function_result_status_persistance_conf.to_dict(
            )
        current_queue__info_dict['class_name'] = self.__class__.__name__
        concurrent_name = ConsumersManager.get_concurrent_name_by_concurrent_mode(
            concurrent_mode)
        current_queue__info_dict['concurrent_mode_name'] = concurrent_name

        # 方便点击跳转定位到当前解释器下所有实例化消费者的文件行,点击可跳转到该处。
        # 获取被调用函数在被调用时所处代码行数
        # 直接实例化相应的类和使用工厂模式来实例化相应的类,得到的消费者实际实例化的行是不一样的,希望定位到用户的代码处,而不是定位到工厂模式处。
        line = sys._getframe(0).f_back.f_lineno
        # 获取被调用函数所在模块文件名
        file_name = sys._getframe(1).f_code.co_filename
        if 'consumer_factory.py' in file_name:
            line = sys._getframe(1).f_back.f_lineno
            file_name = sys._getframe(2).f_code.co_filename
        current_queue__info_dict[
            'where_to_instantiate'] = f'{file_name}:{line}'

        self._queue_name = queue_name
        self.queue_name = queue_name  # 可以换成公有的,免得外部访问有警告。
        self.consuming_function = consuming_function
        self._function_timeout = function_timeout
        self._threads_num = concurrent_num if threads_num == 50 else threads_num  # concurrent参数优先,以后废弃threads_num参数。
        self._specify_threadpool = specify_threadpool
        self._threadpool = None  # 单独加一个检测消息数量和心跳的线程
        self._concurrent_mode = concurrent_mode
        self._max_retry_times = max_retry_times
        self._is_print_detail_exception = is_print_detail_exception
        self._qps = qps
        if qps != 0:
            msg_schedule_time_intercal = 1.0 / qps  # 使用qps覆盖消息调度间隔,以qps为准,以后废弃msg_schedule_time_intercal这个参数。
        self._msg_schedule_time_intercal = msg_schedule_time_intercal if msg_schedule_time_intercal > 0.001 else 0.001
        self._is_using_distributed_frequency_control = is_using_distributed_frequency_control
        self._is_send_consumer_hearbeat_to_redis = is_send_consumer_hearbeat_to_redis or is_using_distributed_frequency_control
        self._msg_expire_senconds = msg_expire_senconds

        if self._concurrent_mode not in (1, 2, 3):
            raise ValueError('设置的并发模式不正确')
        self._concurrent_mode_dispatcher = ConcurrentModeDispatcher(self)

        self._logger_prefix = logger_prefix
        self._log_level = log_level
        if logger_prefix != '':
            logger_prefix += '--'

        # logger_name = f'{logger_prefix}{self.__class__.__name__}--{concurrent_name}--{queue_name}--{self.consuming_function.__name__}'
        logger_name = f'{logger_prefix}{self.__class__.__name__}--{queue_name}'
        # nb_print(logger_name)
        self.logger = LogManager(logger_name).get_logger_and_add_handlers(
            log_level,
            log_filename=f'{logger_name}.log' if create_logger_file else None,
            formatter_template=frame_config.
            NB_LOG_FORMATER_INDEX_FOR_CONSUMER_AND_PUBLISHER,
        )
        # self.logger.info(f'{self.__class__} 在 {current_queue__info_dict["where_to_instantiate"]}  被实例化')
        sys.stdout.write(
            f'{time.strftime("%H:%M:%S")} "{current_queue__info_dict["where_to_instantiate"]}"  \033[0;30;44m此行 '
            f'实例化队列名 {current_queue__info_dict["queue_name"]} 的消费者, 类型为 {self.__class__}\033[0m\n'
        )

        self._do_task_filtering = do_task_filtering
        self._redis_filter_key_name = f'filter_zset:{queue_name}' if task_filtering_expire_seconds else f'filter_set:{queue_name}'
        filter_class = RedisFilter if task_filtering_expire_seconds == 0 else RedisImpermanencyFilter
        self._redis_filter = filter_class(self._redis_filter_key_name,
                                          task_filtering_expire_seconds)

        self._is_consuming_function_use_multi_params = is_consuming_function_use_multi_params

        self._execute_task_times_every_minute = 0  # 每分钟执行了多少次任务。
        self._lock_for_count_execute_task_times_every_minute = Lock()
        self._current_time_for_execute_task_times_every_minute = time.time()

        self._msg_num_in_broker = 0
        self._last_timestamp_when_has_task_in_queue = 0
        self._last_timestamp_print_msg_num = 0

        self._is_do_not_run_by_specify_time_effect = is_do_not_run_by_specify_time_effect
        self._do_not_run_by_specify_time = do_not_run_by_specify_time  # 可以设置在指定的时间段不运行。
        self._schedule_tasks_on_main_thread = schedule_tasks_on_main_thread

        self._result_persistence_helper = ResultPersistenceHelper(
            function_result_status_persistance_conf, queue_name)

        self._is_using_rpc_mode = is_using_rpc_mode

        self.stop_flag = False

        # 控频要用到的成员变量
        self._last_submit_task_timestamp = 0
        self._last_start_count_qps_timestamp = time.time()
        self._has_execute_times_in_recent_second = 0

        self._publisher_of_same_queue = None

        self.consumer_identification = f'{socket.gethostname()}_{time_util.DatetimeConverter().datetime_str.replace(":","-")}_{os.getpid()}_{id(self)}'

        self.custom_init()
#!/usr/bin/env python
# encoding: utf-8
# @author: liusir
# @file: requests_utils.py
# @time: 2020/11/18 8:07 下午

import json
import jsonpath
import requests
import re
from utils.config_utils import local_config
from utils.check_utils import CheckUtils
from requests.exceptions import RequestException, ProxyError, ConnectionError
from nb_log import LogManager

logger = LogManager('P3P4_API_TEST').get_logger_and_add_handlers(
    is_add_stream_handler=True, log_filename=local_config.LOG_NAME)


class RequestsUtils:
    def __init__(self):
        self.hosts = local_config.HOSTS
        self.session = requests.session()
        self.tmp_variables = {}

    def __get(self, requests_info):
        try:
            url = self.hosts + requests_info['请求地址']
            variable_list = re.findall('\\${\w+}', requests_info['请求参数(get)'])
            for variable in variable_list:
                requests_info['请求参数(get)'] = requests_info[
                    '请求参数(get)'].replace(
예제 #30
0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from abc import ABCMeta, abstractmethod
from nb_log import LogManager
import re
import datetime
from entity.fesql_result import FesqlResult
import util.fesql_util as fesql_util
import check.fesql_assert as fesql_assert

log = LogManager('fesql-auto-test').get_logger_and_add_handlers()


class Checker(metaclass=ABCMeta):
    @abstractmethod
    def check(self):
        pass


class BaseChecker(Checker):
    def __init__(self, fesqlCase, fesqlResult: FesqlResult):
        self.fesqlCase = fesqlCase
        self.fesqlResult = fesqlResult


class RowsChecker(BaseChecker):