class sfLogger: def __init__(self, logger_name): self.format = logging.Formatter("%(message)s") self.log_queue = queue.Queue() self.queue_handler = QueueHandler(self.log_queue) self.queue_handler.setFormatter(self.format) self.logger = logging.getLogger(logger_name) self.logger.addHandler(self.queue_handler) self.logger.setLevel(logging.DEBUG) self.listener = QueueListener(self.log_queue, self.queue_handler) self.isStop = False def start(self): #print("logger.start()") self.listener.start() self.isStop = False def loggenerator(self): #print("logger.loggenerator()") while self.isStop == False: yield self.log_queue.get().getMessage() def stop(self): #print("logger.stop()") self.listener.stop() self.isStop = True while self.log_queue.empty() == False: self.log_queue.get().getMessage()
def execute(self, recordonly=False, dryrun=False): ''' Execute a migration If recordonly is True, the migration is only recorded If dryrun is True, the migration is neither executed nor recorded ''' q = queue.Queue(-1) # no limit on size handler = QueueHandler(q) handler.setFormatter(MigrationFormatter()) logger = getattr(self.module, 'log', logging.getLogger(self.module.__name__)) logger.propagate = False for h in logger.handlers: logger.removeHandler(h) logger.addHandler(handler) if not hasattr(self.module, 'migrate'): error = SyntaxError( 'A migration should at least have a migrate(db) function') raise MigrationError('Error while executing migration', exc=error) out = [['info', 'Recorded only']] if recordonly else [] state = {} if not recordonly and not dryrun: db = get_db() db._state = state try: self.module.migrate(db) out = _extract_output(q) except Exception as e: out = _extract_output(q) tb = traceback.format_exc() self.add_record('migrate', out, db._state, False, traceback=tb) fe = MigrationError('Error while executing migration', output=out, exc=e, traceback=tb) if hasattr(self.module, 'rollback'): try: self.module.rollback(db) out = _extract_output(q) self.add_record('rollback', out, db._state, True) msg = 'Error while executing migration, rollback has been applied' fe = RollbackError(msg, output=out, migrate_exc=fe) except Exception as re: out = _extract_output(q) self.add_record('rollback', out, db._state, False) msg = 'Error while executing migration rollback' fe = RollbackError(msg, output=out, exc=re, migrate_exc=fe) raise fe if not dryrun: self.add_record('migrate', out, state, True) return out
def __init__(self, *modules): self._queue = Queue() self._log = logging.getLogger('friends') handler = QueueHandler(self._queue) formatter = logging.Formatter(LOG_FORMAT, style='{') handler.setFormatter(formatter) self._log.addHandler(handler) # Capture effectively everything. This can't be NOTSET because by # definition, that propagates log messages to the root logger. self._log.setLevel(1) self._log.propagate = False # Create the mock, and then go through all the named modules, mocking # their 'log' attribute. self._patchers = [] for path in modules: prefix, dot, module = path.rpartition('.') if module == '*': # Partition again to get the parent package. subprefix, dot, parent = prefix.rpartition('.') for filename in resource_listdir(subprefix, parent): basename, extension = os.path.splitext(filename) if extension != '.py': continue patch_path = '{}.{}.__dict__'.format(prefix, basename) patcher = mock.patch.dict(patch_path, {'log': self._log}) self._patchers.append(patcher) else: patch_path = '{}.__dict__'.format(path) patcher = mock.patch.dict(patch_path, {'log': self._log}) self._patchers.append(patcher) # Start all the patchers. for patcher in self._patchers: patcher.start()
class ConsoleUi: """Poll messages from a logging queue and display them in a scrolled text widget""" def __init__(self, frame): self.frame = frame self.input_start_idx = tk.END # Create a ScrolledText wdiget self.scrolled_text = ScrolledText(frame, state='disabled', height=12) self.scrolled_text.pack(expand=True, fill=tk.BOTH) self.scrolled_text.configure(font='TkFixedFont') self.scrolled_text.tag_config('INFO', foreground='black') self.scrolled_text.tag_config('DEBUG', foreground='gray') self.scrolled_text.tag_config('WARNING', foreground='dark orange') self.scrolled_text.tag_config('ERROR', foreground='red') self.scrolled_text.tag_config('CRITICAL', foreground='red', underline=1) self.scrolled_text.bind('<Key>', self.key_press) # Create a logging handler using a queue self.log_queue = queue.Queue() self.queue_handler = QueueHandler(self.log_queue) formatter = logging.Formatter('%(asctime)s:\t%(message)s', datefmt='%H:%M:%S') self.queue_handler.setFormatter(formatter) logger.addHandler(self.queue_handler) # Start polling messages from the queue self.frame.after(100, self.poll_log_queue) def display(self, record): msg = record.getMessage() self.scrolled_text.configure(state='normal') self.scrolled_text.insert(tk.END, msg + '\n', record.levelname) # self.scrolled_text.configure(state='disabled') # Autoscroll to the bottom self.scrolled_text.yview(tk.END) self.scrolled_text.mark_set('input_start', 'end-1c') self.scrolled_text.mark_gravity('input_start', tk.LEFT) def poll_log_queue(self): while True: try: record = self.log_queue.get(block=False) except queue.Empty: break else: self.display(record) # Check every 100ms if there is a new message in the queue to display self.frame.after(100, self.poll_log_queue) def key_press(self, event): """Function used to send any inputs to the input_queue when the return key is pressed""" if event.char == '\r': user_input = self.scrolled_text.get('input_start', 'end-1c').strip() input_queue.put(user_input) self.scrolled_text.mark_set('input_start', 'end-1c')
def main(system, setpoint, kp, ki, kd, period, sensor_pin, relay_pin, logconfig, visible, port, sensehat, goalpost, speed, frequency): '''MSVP is the Minimum Sous Vide Project or Product''' with open(base_dir / logconfig) as f: logging.config.dictConfig(yaml.safe_load(f.read())) logger = logging.getLogger('msvp') sysargs = {'period': period} if system == 'MsvpRtdRelay': sysargs.update({'sensor_pin': sensor_pin, 'relay_pin': relay_pin}) else: sysargs.update({'temp': 50.0}) System = getattr(importlib.import_module('msvp.systems'), system) sv = System(**sysargs) # set up queue and log handler for passing messages to the web application # (there's a way to set up a QueueHandler with dictConfig, but maybe not # when we need to pass the queue into a thread, without a global -- and # since this is properly part of how the program works, it seems fine to # have it here) q = Queue() handler = QueueHandler(q) handler.setFormatter(logging.Formatter('%(asctime)s|%(message)s')) logger.addHandler(handler) # start web application; from https://stackoverflow.com/a/49482036 thread = threading.Thread(target=web_application, args=(q, setpoint, kp, ki, kd, visible, port)) thread.setDaemon(True) thread.start() if sensehat: try: display_thread = threading.Thread(target=sensehat_display, args=(sv, setpoint, goalpost, speed, frequency)) display_thread.setDaemon(True) display_thread.start() except ImportError: logger.warn('The sense-hat package is not installed.') logger.info(f'startup -- setpoint {setpoint}, tuning is {kp}, {ki}, {kd}') pid = PID(kp, ki, kd, setpoint=setpoint, sample_time=period, output_limits=(0.0, 100.0)) while True: temp = sv.temperature() logger.info(f'temperature|{temp}') output = pid(temp) logger.info(f'output|{output}') sv.control(output)
def init_logger(): root = logging.getLogger() root.setLevel(logging.DEBUG) if platform.python_version().startswith('2'): # python 2 # log at stdout import sys ch = logging.StreamHandler(sys.stdout) else: # python 3 # log into queue import queue que = queue.Queue(-1) # no limit on size from logging.handlers import QueueHandler ch = QueueHandler(que) ch.setLevel(logging.DEBUG) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) root.addHandler(ch) yield ch
def setup_logger() -> logging.Logger: # create logger logger = logging.getLogger('cnstream_service') logger.propagate = False # create formatter # formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') formatter = logging.Formatter('%(levelname)s - %(message)s') # create console handler and set level to debug ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) ch.setFormatter(formatter) # create queue handler and set level to info qh = QueueHandler(log_queue) qh.setLevel(logging.INFO) qh.setFormatter(formatter) # add handler to logger logger.addHandler(ch) logger.addHandler(qh) return logger
def run(self): print("starting DB Worker") qh = QueueHandler(self.errq) f = logging.Formatter('SW formatter: %(asctime)s: %(name)s|%(processName)s|%(process)d|%(levelname)s -- %(message)s') qh.setFormatter(f) qh.setLevel(logging.INFO) self.log = logging.getLogger(__name__) self.log.setLevel(logging.INFO) self.log.propagate = False self.log.addHandler(qh) self.log.info('DBWorker {} starting'.format(self.pid)) self.tweet_lookup_queue = {} self.tweet_text_lookup_queue = [] while True: try: d = self.tweet_queue.get(block=True) except Empty: time.sleep(SLEEP_TIME) else: try: code, data = d except ValueError: self.log.exception('Code, data assignment failed with:\n{}\n'.format(d)) else: if code==TWEET_MESSAGE: # Set streamsession_id from data ssid = data.pop('streamsession_id', None) if ssid is not None: self.streamsession_id = ssid retries = 0 retry_limit = 5 while retries < retry_limit: try: self.process_tweet(data) except (IntegrityError, OperationalError) as e: msg = '\n\n' + '*'*70 msg += '\n\nDB ingegrity error. Retrying ({}).'.format(retries) msg += 'Exception: {}\n' msg += '-'*70 + '\n' msg += traceback.format_exc() msg += '\n\n{0}\nTweet Data:\n{1}\n{0}'.format('*'*70, json_dumps(data, indent=4)) self.log.warning(msg) retries += 1 else: retries = retry_limit # Check/dump lookup queues if (len(self.tweet_lookup_queue) >= self.lookup_queue_limit): self.dump_tweet_lookup_queue() if (len(self.tweet_text_lookup_queue) >= self.lookup_queue_limit): self.dump_tweet_text_lookup_queue() self.countq.put(1) self.count += 1 elif code == START_MESSAGE: # stream started, time passed as data self.log.debug('received START_MESSAGE') session = get_session(self.db_path) ss = session.merge(StreamSession(starttime=data)) self.commit_transaction(session) self.streamsession_id = ss.id session.close() elif code == STOP_STREAM_MESSAGE: # stream stopped, time passed as data self.log.debug('received STOP_STREAM_MESSAGE') session = get_session(self.db_path) ss = session.merge(StreamSession(id=self.streamsession_id)) ss.endtime=data self.commit_transaction(session) session.close() elif code == STOP_MESSAGE: # process stopped by parent # replace message for other workers self.tweet_queue.put((code, data)) print('stopping DB worker') print(' dumping tweet lookup queue...') self.dump_tweet_lookup_queue() print(' DONE.') print(' dumping tweet text lookup queue...') self.dump_tweet_text_lookup_queue() print(' DONE.') print('Recording session stop time') print(' DONE.') break print('{}: Process {} (id={}) finished.'.format(str(dt.now()), self.name, self.pid))
class GUIClient(TunnelClient): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.options = 10 self.logs = [] self.configure_logging() def configure_logging(self) -> None: """ Reconfigure the logging to catch the messages for the GUI """ self.log_queue = queue.Queue() self.log_handler = QueueHandler(self.log_queue) self.log_handler.setFormatter(logging.Formatter(LOG_FORMAT, style="{")) logging.getLogger().handlers = [self.log_handler] def get_dimension(self) -> None: """ Get the dimensions of the current window """ self.height, self.width = self.scr.getmaxyx() def _draw(self) -> None: """ Draw all GUI elements """ self.scr.clear() self._draw_config() self._draw_info() self._draw_log() def _draw_info(self): """ Draw a box with main information about the current status """ win = self.scr.subwin(self.options, self.width // 2, 0, 0) win.box() win.border(0) bytes_in = sum(cl.bytes_in for cl in self.clients.values()) bytes_out = sum(cl.bytes_out for cl in self.clients.values()) total = self.tunnel.bytes_in + self.tunnel.bytes_out win.addstr(0, 2, "Info") if self.last_ping and self.last_pong: ping_time = f"{1000 * (self.last_pong - self.last_ping):.0f}" else: ping_time = "-" overhead = total / (bytes_in + bytes_out) - 1 if bytes_in + bytes_out else 0 self._draw_lines( win, [ f"Clients: {len(self.clients)}", f"Domain: {self.domain}", f"Overhead: {100 * overhead:.2f} %", f"Ping: {ping_time}" f"Transfer In: {format_transfer(bytes_out)}", f"Transfer Out: {format_transfer(bytes_in)}", f"Transfer Total: {format_transfer(bytes_in + bytes_out)}", ], ) win.refresh() return win def _draw_config(self): """ Draw a box with the current tunnel configuration """ mx, my = self.width // 2, self.options win = self.scr.subwin(my, self.width - mx, 0, mx) win.box() win.border(0) win.addstr(0, 2, "Configuration") networks = self.networks if self.networks else ["0.0.0.0/0", "::/0"] self._draw_lines( win, [ f"Allowed networks: {', '.join(map(str, networks))}", f"Ban time: {self.bantime or 'off'}", f"Clients: {self.max_clients or '-'}", f"Connections per IP: {self.max_connects or '-'}", f"Idle timeout: {self.idle_timeout or 'off'}", f"Ping: {'on' if self.ping_enabled else 'off'}", f"Protocol: {self.protocol.name}", ], ) win.refresh() return win def _draw_log(self): """ Draw a box with the latest logs """ h = self.height - self.options - 4 w = self.width - 4 win = self.scr.subwin(h + 4, w + 4, self.options, 0) win.box() win.border(0) win.addstr(0, 2, "Log") while not self.log_queue.empty(): self.logs.append(self.log_queue.get().msg) self.logs = self.logs[-self.height :] self._draw_lines(win, self.logs) win.refresh() return win def _draw_lines(self, win, lines: List[str]) -> None: """ Draw multiple lines in a window with some border """ h, w = [k - 4 for k in win.getmaxyx()] for y, line in enumerate(lines[:h]): win.addstr(y + 2, 2, line[:w]) async def _handle(self) -> bool: """ Handle the drawing after each package """ self.get_dimension() self._draw() return await super()._handle() def _gui(self, scr) -> None: """ Configure the main screen """ self.scr = scr curses.noecho() curses.curs_set(0) super().start() def start(self) -> None: curses.wrapper(self._gui)
# 设置日志格式 fmt = logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s', '%Y-%m-%d %H:%M:%S') # 添加cmd handler cmd_handler = logging.StreamHandler(sys.stdout) cmd_handler.setLevel(logging.DEBUG) cmd_handler.setFormatter(fmt) # 添加文件的handler file_handler = logging.handlers.TimedRotatingFileHandler( filename="logs\\log.txt", encoding='utf-8', when="D", interval=1, backupCount=31) file_handler.setLevel(logging.DEBUG) file_handler.setFormatter(fmt) # 添加http handler queue_handler = QueueHandler(log_queue) queue_handler.setLevel(logging.INFO) queue_handler.setFormatter(fmt) # 将handlers添加到logger中 logger.addHandler(cmd_handler) logger.addHandler(file_handler) logger.addHandler(queue_handler) # logger.debug("今天天气不错")
def get_logger(context: str, context_len: int = 15, log_profile: bool = False, log_sse: bool = False, log_remote: bool = False, fmt_str: str = None, event_trigger=None, **kwargs) -> Union['logging.Logger', 'NTLogger']: """Get a logger with configurations :param context: the name prefix of the log :param context_len: length of the context, i.e. module, function, line number :param log_profile: is this logger for profiling, profile logger takes dict and output to json :param log_sse: is this logger used for server-side event :param log_remote: is this logger for remote logging :param fmt_str: use customized logging format, otherwise respect the ``JINA_LOG_LONG`` environment variable :param event_trigger: a ``threading.Event`` or ``multiprocessing.Event`` for event-based logger :return: the configured logger .. note:: One can change the verbosity of jina logger via the environment variable ``JINA_LOG_VERBOSITY`` """ from .. import __uptime__ from .queue import __sse_queue__, __profile_queue__, __log_queue__ if not fmt_str: title = os.environ.get('JINA_POD_NAME', context) if 'JINA_LOG_LONG' in os.environ: fmt_str = f'{title[:context_len]:>{context_len}}@%(process)2d' \ f'[%(levelname).1s][%(filename).3s:%(funcName).3s:%(lineno)3d]:%(message)s' else: fmt_str = f'{title[:context_len]:>{context_len}}@%(process)2d' \ f'[%(levelname).1s]:%(message)s' timed_fmt_str = f'%(asctime)s:' + fmt_str verbose_level = LogVerbosity.from_string( os.environ.get('JINA_LOG_VERBOSITY', 'INFO')) if os.name == 'nt': # for Windows return NTLogger(context, verbose_level) # Remove all handlers associated with the root logger object. for handler in logging.root.handlers[:]: logging.root.removeHandler(handler) logger = logging.getLogger(context) logger.propagate = False logger.handlers = [] logger.setLevel(verbose_level.value) if log_profile: h = QueueHandler(__profile_queue__) # profile logger always use debug level logger.setLevel(LogVerbosity.DEBUG.value) h.setFormatter(ProfileFormatter(timed_fmt_str)) logger.addHandler(h) # profile logger do not need other handler return logger if event_trigger is not None: h = EventHandler(event_trigger) h.setFormatter(ColorFormatter(fmt_str)) logger.addHandler(h) if log_remote: h = QueueHandler(__log_queue__) h.setFormatter(ColorFormatter(fmt_str)) logger.addHandler(h) if ('JINA_LOG_SSE' in os.environ) or log_sse: h = QueueHandler(__sse_queue__) h.setFormatter(JsonFormatter(timed_fmt_str)) logger.addHandler(h) if os.environ.get('JINA_LOG_FILE') == 'TXT': h = logging.FileHandler(f'jina-{__uptime__}.log', delay=True) h.setFormatter(PlainFormatter(timed_fmt_str)) logger.addHandler(h) elif os.environ.get('JINA_LOG_FILE') == 'JSON': h = logging.FileHandler(f'jina-{__uptime__}.json', delay=True) h.setFormatter(JsonFormatter(timed_fmt_str)) logger.addHandler(h) console_handler = logging.StreamHandler(sys.stdout) console_handler.setFormatter(ColorFormatter(fmt_str)) logger.addHandler(console_handler) success_level = LogVerbosity.SUCCESS.value # between WARNING and INFO logging.addLevelName(success_level, 'SUCCESS') setattr(logger, 'success', lambda message: logger.log(success_level, message)) return logger
from logging.handlers import QueueHandler import os import re from urllib.parse import unquote from . import tokenise, parse, interpret, execute from . import database log = logging.getLogger(__name__) log_queue = queue.Queue() user_log = QueueHandler(log_queue) user_log.setLevel("INFO") formatter = logging.Formatter('%(levelname)s (%(module)s): %(message)s') user_log.setFormatter(formatter) for child in ["tokenise", "execute", "parse", "interpret"]: logging.root.getChild(f"csvql.{child}").addHandler(user_log) console_log = logging.StreamHandler() console_log.setLevel("DEBUG") formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s') console_log.setFormatter(formatter) logging.root.getChild(f"csvql.web").addHandler(console_log) myregex = "^" + "(?P<name>.*)" + "\.csv" + "$" DATABASE: Dict[str, database.Table] = {} log.info("Loading tables...") for file in os.listdir("../data/"):