def logger(name='AutomatedTesting', path=path, flieName=flieName): """ 日志调用封装 :param name: 日志命名空间,默认为AutomatedTesting :param path: 日志的文件夹存放路径,默认为项目下的log文件夹 :param flieName: 日志的名字,默认以当天时间命名 :return: """ return get_logger( name, # 日志命名空间 log_level_int=int( logConfig['log_level'] ), # 日志输出级别,设置为 1 2 3 4 5,分别对应原生logging.DEBUG(10),logging.INFO(20) log_path=path, # 日志的文件夹路径 log_filename=flieName, # 日志的名字 formatter_template=int(logConfig['formatter_template'] ), # 日志模板,1为formatter_dict的详细模板,2为简要模板,5为最好模板 is_add_stream_handler=logConfig['stream_handler'], # 是否打印日志到控制台 do_not_use_color_handler=logConfig['color_handler'] # 是否禁止使用color彩色日志 )
def __init__(self, *args, **kwargs): QMainWindow.__init__(self, *args, **kwargs) # 除了控制台以外,在文件中也会记录日志。 self.file_logger = nb_log.get_logger(f'{self.__class__.__name__}_file', is_add_stream_handler=False, log_filename=f'{self.__class__.__name__}_file.log', log_path='./') """ # 这个用组合的形式,来访问控件。 网上有的是用继承方式,让WindowsClient同时也继承Ui_MainWindow,那么这两行 self.ui = Ui_MainWindow() self.ui.setupUi(self) 就成了一行,变成 self.setupUi(self) 然后用self.pushButtonxx 来访问控件。 现在方式self.ui.pushButtonxx来访问控件,这种pycahrm自动补全范围更小,使用更清晰。 """ self.ui = Ui_MainWindow() self.ui.setupUi(self) self._now_is_stop_print = False self._len_textEdit = 0 self.ui.pushButton_3.clicked.connect(self._stop_or_start_print) self.ui.pushButton_4.clicked.connect(self._clear_text_edit) self.config_ini = ConfigObj("qt_box_values.ini", encoding='UTF8') sys.excepthook = my_excepthook # 错误重定向到print,print重定向到qt界面的控制台,使永远不会发生出错导致闪退。 self.__init_std() self.custom_init() self.set_button_click_event() self.set_default_value() self._init_all_input_box_value() decorator_libs.keep_circulating(60,block=False)(self._save_all_input_box_value)()
# @Author : ydf # @Time : 2019/8/8 0008 13:27 import os import functools import json from threading import Lock from nb_log import LogManager, get_logger from function_scheduling_distributed_framework.publishers.base_publisher import deco_mq_conn_error import pikav1.exceptions from pikav1.exceptions import AMQPError import pikav1 from function_scheduling_distributed_framework.consumers.base_consumer import AbstractConsumer from function_scheduling_distributed_framework import frame_config get_logger('pikav1', log_level_int=20) class RabbitmqConsumer(AbstractConsumer): """ 使用pika包实现的。 """ BROKER_KIND = 4 # noinspection PyAttributeOutsideInit def custom_init(self): self._lock_for_pika = Lock() self.logger.critical( 'pika 多线程中操作同一个 channel 有问题,如果使用 rabbitmq 建议设置中间件为 BrokerEnum.RABBITMQ_AMQPSTORM' ) os._exit(444) # noqa
import nb_log import distributed_frame_config #开发时候的调试日志,比print方便通过级别一键屏蔽。 develop_logger = nb_log.get_logger( 'fsdf_develop', log_level_int=distributed_frame_config.FSDF_DEVELOP_LOG_LEVEL)
# noinspection PyPep8 def check_port_is_used(ip, port): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # noinspection PyPep8,PyBroadException try: s.connect((ip, int(port))) s.shutdown(2) # 利用shutdown()函数使socket双向数据传输变为单向数据传输。shutdown()需要一个单独的参数, # 该参数表示了如何关闭socket。具体为:0表示禁止将来读;1表示禁止将来写;2表示禁止将来读和写。 return True except Exception: return False logger_zeromq_broker = get_logger('zeromq_broker') # noinspection PyUnresolvedReferences def start_broker(port_router: int, port_dealer: int): try: context = zmq.Context() # noinspection PyUnresolvedReferences frontend = context.socket(zmq.ROUTER) backend = context.socket(zmq.DEALER) frontend.bind(f"tcp://*:{port_router}") backend.bind(f"tcp://*:{port_dealer}") # Initialize poll set poller = zmq.Poller() poller.register(frontend, zmq.POLLIN)
# import gevent.monkey;gevent.monkey.patch_all() import time from function_scheduling_distributed_framework import task_deco, BrokerEnum, run_consumer_with_multi_process, ConcurrentModeEnum import nb_log logger = nb_log.get_logger('sdsda', is_add_stream_handler=False, log_filename='xxx.log') @task_deco( '20000', broker_kind=BrokerEnum.REDIS, concurrent_num=2, log_level=20, qps=0, concurrent_mode=ConcurrentModeEnum.SINGLE_THREAD, ) def f_test_speed(x): pass # logger.debug(x) # f_test_speed2.push(x * 10) print(x) # time.sleep(20) # @task_deco('speed_test_queue2', broker_kind=BrokerEnum.REDIS, log_level=20, qps=2) # def f_test_speed2(y): # pass # print(y)
def __init__(self): self.logger = nb_log.get_logger('xx')
print('导入nb_log之前的print是普通的') # import sys # print(sys.path) from nb_log import get_logger logger = get_logger('lalala', log_filename='jinzhifengzhuang.log', formatter_template=5) logger.debug(f'debug是绿色,说明是调试的,代码ok ') logger.info('info是天蓝色,日志正常 ') logger.warning('黄色yello,有警告了 ') logger.error('粉红色说明代码有错误 ') logger.critical('血红色,说明发生了严重错误 ') print('导入nb_log之后的print是强化版的可点击跳转的') # raise(1)
""" 如果当前文件夹包含了 nb_log_config.py 则会自动优先使用当前文件夹的 nb_log_config.py 作为配置。 """ print('导入nb_log之前的print是普通的') from nb_log import get_logger logger = get_logger('lalala', log_filename='jinzhifengzhuang.log') logger.debug(f'debug是绿色,说明是调试的,代码ok') logger.info('info是天蓝色,日志正常') logger.warning('黄色yello,有警告了') logger.error('粉红色说明代码有错误') logger.critical('血红色,说明发生了严重错误') print('导入nb_log之后的print是强化版的可点击跳转的')
import requests from nb_log import get_logger logger = get_logger( 'urllib3', log_filename='urllib3.log', formatter_template=8, is_use_watched_file_handler_instead_of_custom_concurrent_rotating_file_handler =True) requests.get('http://www.baidu.com') logger.debug(11, extra={'c': 5, 'd': 6}) logger.info(22) logger.warning(33) logger.error(44) logger.critical(55, extra=dict(f=7, g=8, h=9)) logger.debug('哈哈哈哈', extra=dict(a=1, b=2)) try: 1 / 0 except Exception as e: logger.exception('错误了')
from function_scheduling_distributed_framework.set_frame_config import patch_frame_config, show_frame_config # import frame_config from function_scheduling_distributed_framework.consumers.base_consumer import ExceptionForRequeue, ExceptionForRetry, \ AbstractConsumer, ConsumersManager, FunctionResultStatusPersistanceConfig from function_scheduling_distributed_framework.publishers.base_publisher import PriorityConsumingControlConfig, AbstractPublisher from function_scheduling_distributed_framework.factories.publisher_factotry import get_publisher from function_scheduling_distributed_framework.factories.consumer_factory import get_consumer # noinspection PyUnresolvedReferences from function_scheduling_distributed_framework.utils import nb_print, patch_print, LogManager, get_logger, LoggerMixin from function_scheduling_distributed_framework.timing_job import fsdf_background_scheduler, timing_publish_deco from function_scheduling_distributed_framework.constant import BrokerEnum, ConcurrentModeEnum # 有的包默认没加handlers,原始的日志不漂亮且不可跳转不知道哪里发生的。这里把warnning级别以上的日志默认加上handlers。 nb_log.get_logger(name=None, log_level_int=30, log_filename='pywarning.log') logger = nb_log.get_logger('function_scheduling_distributed_framework') class IdeAutoCompleteHelper(LoggerMixin): """ 为了被装饰的消费函数的敲代码时候的被pycharm自动补全而写的类。 """ def __init__(self, consuming_func_decorated: callable): """ :param consuming_func_decorated: 传入被task_deco装饰的函数 此框架非常非常注重,公有函数、方法、类 的名字和入参在ide开发环境下面的自动提示补全效果,如果不是为了这一点,框架能减少很多重复地方。 此类是防止用户调用打错字母或者不知道怎么敲代码不知道有哪些入参。所以才有这个类。
import multiprocessing import concurrent.futures import threading from concurrent.futures.process import _ExceptionWithTraceback, _ResultItem # noqa from functools import wraps import os import nb_log name = 'bounded_pool_executor' logger = nb_log.get_logger('BoundedProcessPoolExecutor') def _process_worker(call_queue, result_queue): """Evaluates calls from call_queue and places the results in result_queue. This worker is run in a separate process. Args: call_queue: A multiprocessing.Queue of _CallItems that will be read and evaluated by the worker. result_queue: A multiprocessing.Queue of _ResultItems that will written to by the worker. shutdown: A multiprocessing.Event that will be set as a signal to the worker that it should exit when call_queue is empty. """ while True: call_item = call_queue.get(block=True) if call_item is None: # Wake up queue management thread
import nb_log import time logger = nb_log.get_logger('dsdsd', log_filename='dsdsd.log', is_add_stream_handler=False) t1 = time.perf_counter() for i in range(100 * 10000): logger.debug('heloo' * 10) print(time.perf_counter() - t1) # nb_log的 ConcurrentRotatingFileHandlerWithBufferInitiativeWindow windwos 单进程写入100万条 115秒 # linux 58秒。
print('导入nb_log之前的print是普通的') from nb_log import get_logger print('导入nb_log之后的print是强化版的可点击跳转的') logger = get_logger('lalala', log_filename='lalala.log') for i in range(100): logger.debug(f'debug是绿色,说明是调试的,代码ok。 ' * 4) logger.info('info是天蓝色,日志正常。 ' * 4) logger.warning('黄色yello,有警告了。 ' * 4) logger.error('粉红色说明代码有错误。 ' * 4) logger.critical('血红色,说明发生了严重错误。 ' * 4) print('只要导入nb_log一次之后的项目任意文件的print是强化版的可点击跳转的,在输出控制台点击行号能自动打开文件跳转到精确行号。')
from nb_log import get_logger logger = get_logger('lalala') for i in range(10000000): logger.debug(f'绿色{i}') logger.info('蓝色') logger.warning('黄色') logger.error('粉红色') logger.critical('紫红色')
# noinspection PyUnresolvedReferences import nb_log from function_scheduling_distributed_framework.set_frame_config import patch_frame_config, show_frame_config # import frame_config from function_scheduling_distributed_framework.consumers.base_consumer import ExceptionForRequeue, ExceptionForRetry, \ AbstractConsumer, ConsumersManager, FunctionResultStatusPersistanceConfig from function_scheduling_distributed_framework.publishers.base_publisher import PriorityConsumingControlConfig, AbstractPublisher from function_scheduling_distributed_framework.factories.publisher_factotry import get_publisher from function_scheduling_distributed_framework.factories.consumer_factory import get_consumer # noinspection PyUnresolvedReferences from function_scheduling_distributed_framework.utils import nb_print, patch_print, LogManager, get_logger, LoggerMixin from function_scheduling_distributed_framework.timing_job import fsdf_background_scheduler, timing_publish_deco # 有的包默认没加handlers,原始的日志不漂亮且不可跳转不知道哪里发生的。这里把warnning级别以上的日志默认加上handlers。 nb_log.get_logger(name=None, log_level_int=30, log_filename='pywarning.log') class BrokerEnum: RABBITMQ_AMQPSTORM = 0 # 使用 amqpstorm 包操作rabbitmq 作为 分布式消息队列,支持消费确认.推荐这个。 RABBITMQ_RABBITPY = 1 # 使用 rabbitpy 包操作rabbitmq 作为 分布式消息队列,支持消费确认。 REDIS = 2 # 使用 redis 的 list结构,brpop 作为分布式消息队列。随意重启和关闭会丢失大量消息,不支持消费确认。 LOCAL_PYTHON_QUEUE = 3 # 使用python queue.Queue实现的基于当前python进程的消息队列,不支持跨进程 跨脚本 跨机器共享任务,不支持持久化,适合一次性短期简单任务。 RABBITMQ_PIKA = 4 # 使用pika包操作rabbitmq 作为 分布式消息队列。 MONGOMQ = 5 # 使用mongo的表中的行模拟的 作为分布式消息队列,支持消费确认。
import time from nb_log import get_logger logger = get_logger('test_time_raote', log_filename='test_roat_file_log3.log', log_file_handler_type=2) # for i in range(10): # time.sleep(0.001) # logger.warning('hhh') logger.warning('dddd')
import time from multiprocessing import Process from nb_log import get_logger logger = get_logger('abcd', log_filename='abcd.log', is_add_stream_handler=False) def test(): while True: time.sleep(0.0001) # 就算要模拟多进程,最起码要sleep0.1毫秒,真实情况不可能无间隔一直超高速写日志。 logger.info("test") if __name__ == '__main__': p = [Process(target=test) for _ in range(5)] for i in p: i.start() for i in p: i.join()