def _run_organizer(self): """ Calls the MP3 organizer with the proper parameters. """ self.log_text.clear() print('Running organizer on path:"{}".'.format(self.dir_path.text())) args = Arguments(path=str(self.dir_path.text()), album=str(self.album.text()), artist=str(self.artist.text()), genre=str(self.genre.text()), image=str(self.image_path.text()), client=str(self.client.currentText()), grabber=str(self.lyrics.currentText())) handlers_list = list() handlers_list.append(logbook.NullHandler()) handlers_list.append( logbook.StreamHandler(sys.stdout, level='DEBUG', bubble=True)) handlers_list.append( logbook.StreamHandler(sys.stderr, level='ERROR', bubble=True)) handlers_list.append( logbook.StreamHandler(stream=ConsoleLogStream(self.log_text), bubble=True, level=logbook.INFO)) with logbook.NestedSetup(handlers_list).applicationbound(): organize(args)
def create_app() -> Flask: app = Flask(__name__) app.config.from_object(get_config()) if app.config['CONFIG_NAME'] in app.config['DEBUG_LOG_AVAILABLE_IN']: stdout_handler = logbook.StreamHandler( stream=sys.stdout, bubble=True, filter=lambda r, h: r.level < 13) else: # ignore debug when not in debug stdout_handler = logbook.StreamHandler( stream=sys.stdout, bubble=True, filter=lambda r, h: 10 < r.level < 13) stdout_handler.format_string = LOG_FORMAT_STRING logger.handlers.append(stdout_handler) stderr_handler = logbook.StreamHandler(stream=sys.stderr, bubble=True, level='WARNING') stderr_handler.format_string = LOG_FORMAT_STRING logger.handlers.append(stderr_handler) print('Creating app...') from remarker.views import main_blueprint app.register_blueprint(main_blueprint) global __app __app = app return app
def _create_log_handler(config, add_hostname=False, direct_hostname=False, write_toterm=True): logbook.set_datetime_format("utc") handlers = [logbook.NullHandler()] format_str = "".join([ "[{record.time:%Y-%m-%dT%H:%MZ}] " if config.get("include_time", True) else "", "{record.extra[source]}: " if add_hostname else "", "%s: " % (socket.gethostname)() if direct_hostname else "", "{record.message}" ]) log_dir = get_log_dir(config) if log_dir: if not os.path.exists(log_dir): utils.safe_makedir(log_dir) # Wait to propagate, Otherwise see logging errors on distributed filesystems. time.sleep(5) handlers.append( logbook.FileHandler(os.path.join(log_dir, "%s.log" % LOG_NAME), format_string=format_str, level="INFO", filter=_not_cl)) handlers.append( logbook.FileHandler(os.path.join(log_dir, "%s-debug.log" % LOG_NAME), format_string=format_str, level="DEBUG", bubble=True, filter=_not_cl)) handlers.append( logbook.FileHandler(os.path.join(log_dir, "%s-commands.log" % LOG_NAME), format_string=format_str, level="DEBUG", filter=_is_cl)) if write_toterm: handlers.append( logbook.StreamHandler(sys.stdout, format_string="{record.message}", level="DEBUG", filter=_is_stdout)) handlers.append( logbook.StreamHandler(sys.stderr, format_string=format_str, bubble=True, filter=_not_cl)) email = config.get("email", config.get("resources", {}).get("log", {}).get("email")) if email: email_str = u'''Subject: [bcbio-nextgen] {record.extra[run]} \n\n {record.message}''' handlers.append( logbook.MailHandler(email, [email], format_string=email_str, level='INFO', bubble=True)) return CloseableNestedSetup(handlers)
def snooze_handler(args): if not (args.host or args.issue_name or args.all): sys.exit('If you really want to snooze all issues for all hosts,\n' 'you need to specify --all.') if not (args.days or args.hours): args.days = 1 if args.days: then = now + datetime.timedelta(days=args.days) else: then = now + datetime.timedelta(hours=args.hours) hostname = (None if not args.host else args.host[0] if len(args.host) == 1 else { '$in': args.host }) issue_name = (None if not args.issue_name else args.issue_name[0] if len(args.issue_name) == 1 else { '$in': args.issue_name }) ids = snooze_issue(hostname, issue_name, then) if not ids: print('No matching issues.') return with logbook.StreamHandler(sys.stdout, bubble=True): for doc in get_db().issues.find({'_id': {'$in': ids}}): log.info('Snoozed {} {} until {}', doc['hostname'], doc['name'], then)
def get_logbook(name, level=logging.INFO, file=None): import logbook from logbook import StreamHandler from logbook.more import ColorizedStderrHandler def log_type(record, handler): log = "[{date}] [{level}] [{filename}] [{func_name}] [{lineno}] {msg}".format( date=record.time, # 日志时间 level=record.level_name, # 日志等级 filename=os.path.split(record.filename)[-1], # 文件名 func_name=record.func_name, # 函数名 lineno=record.lineno, # 行号 msg=record.message # 日志内容 ) return log if isinstance(level, str): # logbook use different define level = getattr(logbook, level.upper()) handler = logbook.StreamHandler(sys.stdout) handler.formatter = log_type handler.push_application() logger = logbook.Logger(name) logger.handlers = [] logger.handlers.append(handler) # set level on logger is enough logger.level = level return logger
def main(verbose, quiet): from _benchmark_queries import queries with logbook.NullHandler(), logbook.StreamHandler(sys.stderr, level=logbook.CRITICAL-verbose+quiet, bubble=False): loopback = FlaskLoopback(create_app()) loopback.activate_address((_root_address, 80)) num_attempts = 5 for obj, query in queries: times = [] has_error = False print(obj, '|', click.style(query, fg='cyan'), '--') for i in range(num_attempts): start_time = time.time() resp = requests.get(_root_url.add_path('/rest').add_path(obj).add_query_param('search', query).add_query_param('page_size', 25)) end_time = time.time() if resp.status_code == requests.codes.internal_server_error: print('\t', click.style('Timeout', fg='red')) has_error=True break else: resp.raise_for_status() times.append(end_time - start_time) if not has_error: print('\t', len(resp.json()[obj]), 'results --', '(Out of {}) Best: {:.03}s Avg: {:.03}s Worst: {:.03}s'.format(num_attempts, min(times), sum(times) / len(times), max(times)))
def init(self): self.filename, file_extension = os.path.splitext(os.path.basename(__file__)) config_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../config', 'config.ini') log_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../logs', '%s.log' % self.filename) # load config self.config = configparser.ConfigParser(strict=False, allow_no_value=True) self.config.read(config_file) # init logger logbook.set_datetime_format("local") self.logger = logbook.Logger(name=self.filename) format_string = '%s %s' % ('[{record.time:%Y-%m-%d %H:%M:%S.%f%z}] {record.level_name}', '{record.channel}:{record.lineno}: {record.message}') if self.config.has_option('handler_stream_handler', 'verbose'): loghandler = logbook.StreamHandler(sys.stdout, level=self.config.get('Logger', 'level'), bubble=True, format_string=format_string) self.logger.handlers.append(loghandler) loghandler = logbook.TimedRotatingFileHandler(log_file, level=self.config.get('Logger', 'level'), date_format='%Y%m%d', backup_count=5, bubble=True, format_string=format_string) self.logger.handlers.append(loghandler) else: loghandler = logbook.TimedRotatingFileHandler(log_file, level=self.config.get('Logger', 'level'), date_format='%Y%m%d', backup_count=5, bubble=True, format_string=format_string) self.logger.handlers.append(loghandler) self.db = NewsparserDatabaseHandler.instantiate_from_configparser(self.config, self.logger)
def _get_file_logging_context(self, filename_template, symlink): with ExitStack() as stack: if config.root.log.compression.enabled: handler, path = self._get_file_log_handler( filename_template, symlink, use_compression=True) else: handler, path = self._get_file_log_handler( filename_template, symlink) stack.enter_context(closing(handler)) stack.enter_context(handler.applicationbound()) if config.root.log.compression.enabled and config.root.log.compression.use_rotating_raw_file: cyclic_handler, _ = self._get_file_log_handler( filename_template, symlink, bubble=True, cyclic=True) stack.enter_context(cyclic_handler.applicationbound()) stack.enter_context(self.console_handler.applicationbound()) stack.enter_context(self.warnings_handler.applicationbound()) stack.enter_context(self._get_error_logging_context()) stack.enter_context(self._get_silenced_logs_context()) if config.root.log.unittest_mode: stack.enter_context( logbook.StreamHandler(sys.stderr, bubble=True, level=logbook.TRACE)) for extra_handler in _extra_handlers: stack.enter_context(extra_handler.applicationbound()) if config.root.log.unified_session_log and self.session_log_handler is not None: stack.enter_context( _make_bubbling_handler(self.session_log_handler)) yield handler, path
def my_logger(original_function, filename="logfileFF.log"): ###THIS IS WHERE I'M STRUGGLING import logbook import sys level = logbook.TRACE if filename: logbook.TimedRotatingFileHandler(filename, level=level).push_application() else: logbook.StreamHandler(sys.stdout, level=level).push_application() msg = f"Logging Initialized. level: {level}, mode: {'stdout mode' if not filename else 'file mode: ' + filename}" logger = logbook.Logger("Startup level") logger.notice(msg) logger.trace(f"Starting request for city id: {city_id}") def wrapper(*args, **kwargs): result = original_function(*args, **kwargs) wrap_logger = logbook.Logger("Wrapper Level") wrap_logger.trace(f"Search finished for city id: {city_id}") return result return wrapper
def getLogger(loggerName='mylogger.log', streamVerb='DEBUG', logFile='log'): # Get a logger instance. logger = logbook.Logger(name=loggerName) # set up logger mySH = logbook.StreamHandler(stream=sys.stdout, level=streamVerb.upper(), format_string='{record.message}', bubble=True) mySH.format_string = '{record.message}' logger.handlers.append(mySH) if logFile: myFH = logbook.FileHandler(filename=logFile, level='DEBUG', bubble=True) myFH.format_string = '{record.message}' logger.handlers.append(myFH) try: stdErr = list( logbook.handlers.Handler.stack_manager.iter_context_objects())[0] stdErr.pop_application() except: pass return logger
def log_init(quiet=False, verbose=False): log_level = logbook.INFO if quiet: log_level = logbook.NOTICE if verbose: log_level = logbook.DEBUG # TODO: get rid of global_state setup = global_state[1] if setup is not None: setup.pop_application() handler_null = LogNullHandler( level=log_level, bubble=False, ) handler_stderr = logbook.StreamHandler( stream=sys.stderr, level=log_level, ) setup = global_state[1] = logbook.NestedSetup([ handler_stderr, handler_null, ]) setup.push_application() return True
def testserver(tmux, livereload, port): if tmux: return _run_tmux_frontend(port=port) from flask_app.app import create_app extra_files = [from_project_root("flask_app", "app.yml")] app = create_app({ 'DEBUG': True, 'TESTING': True, 'SECRET_KEY': 'dummy', 'SECURITY_PASSWORD_SALT': 'dummy' }) logbook.StreamHandler(sys.stderr, level='DEBUG').push_application() logbook.compat.redirect_logging() if livereload: from livereload import Server s = Server(app) for filename in extra_files: s.watch(filename) s.watch('flask_app') for filename in ['webapp.js', 'vendor.js', 'webapp.css']: s.watch(os.path.join('static', 'assets', filename), delay=1) s.serve(port=port, liveport=35729) else: app.run(port=port, extra_files=extra_files)
def test_tagged(default_handler): from logbook.more import TaggingLogger, TaggingHandler stream = StringIO() second_handler = logbook.StreamHandler(stream) logger = TaggingLogger('name', ['cmd']) handler = TaggingHandler(dict( info=default_handler, cmd=second_handler, both=[default_handler, second_handler], )) handler.bubble = False with handler: with capturing_stderr_context() as captured: logger.log('info', 'info message') logger.log('both', 'all message') logger.cmd('cmd message') stderr = captured.getvalue() assert 'info message' in stderr assert 'all message' in stderr assert 'cmd message' not in stderr stringio = stream.getvalue() assert 'info message' not in stringio assert 'all message' in stringio assert 'cmd message' in stringio
def main(): if args.debug: out_dir = '/tmp' log_level = 'DEBUG' else: out_dir = args.out_dir log_level = 'INFO' inputs = glob.glob(args.inputs) mbytes = estimate_bytes(inputs) // (10 ** 6) out_fileroot = get_output_fileroot( out_dir, 'dna2vec', 'k{}to{}-{}d-{}c-{}Mbp-{}'.format( args.k_low, args.k_high, args.vec_dim, args.context, mbytes * args.epochs, # total Mb including epochs args.kmer_fragmenter)) out_txt_filename = '{}.txt'.format(out_fileroot) with open(out_txt_filename, 'w') as summary_fptr: with Tee(summary_fptr): logbook.StreamHandler(sys.stdout, level=log_level).push_application() redirect_logging() run_main(args, inputs, out_fileroot)
def __init__(self, loglevel=None): config = ClientConfig(encryption_enabled=True, pickle_key=cfg.pickle_key, store_name=cfg.store_name, store_sync_tokens=True) if not os.path.exists(cfg.store_path): os.makedirs(cfg.store_path) self.http_session = aiohttp.ClientSession( headers={'User-Agent': self.user_agent}) self.client = AsyncClient( cfg.server, cfg.user, cfg.device_id, config=config, store_path=cfg.store_path ) logger_group.level = getattr( logbook, loglevel) if loglevel else logbook.CRITICAL logbook.StreamHandler(sys.stdout).push_application() self.logger = logbook.Logger('bot') logger_group.add_logger(self.logger) self.mli = MessageLinksInfo(self.http_session) self._register_commands() self.client.add_response_callback(self._sync_cb, SyncResponse) self.client.add_response_callback( self._key_query_cb, KeysQueryResponse) self.client.add_event_callback(self._invite_cb, InviteMemberEvent)
def _get_file_logging_context(self, filename_template, symlink): with ExitStack() as stack: handler = stack.enter_context(self._log_file_handler_context(filename_template, symlink, \ use_compression=config.root.log.compression.enabled)) stack.enter_context(handler.applicationbound()) if config.root.log.compression.enabled and config.root.log.compression.use_rotating_raw_file: rotating_handler = stack.enter_context( self._log_file_handler_context(filename_template, symlink, bubble=True, use_rotation=True)) stack.enter_context(rotating_handler.applicationbound()) stack.enter_context(self.console_handler.applicationbound()) stack.enter_context(self.warnings_handler.applicationbound()) error_handler = stack.enter_context( self._get_error_logging_context()) stack.enter_context(error_handler.applicationbound()) stack.enter_context(self._get_silenced_logs_context()) if config.root.log.unittest_mode: stack.enter_context( logbook.StreamHandler(sys.stderr, bubble=True, level=logbook.TRACE)) for extra_handler in _extra_handlers: stack.enter_context(extra_handler.applicationbound()) if config.root.log.unified_session_log and self.session_log_handler is not None: stack.enter_context( _make_bubbling_handler(self.session_log_handler)) if config.root.run.capture.error_logs_as_errors: stack.enter_context(ErrorHandler().applicationbound()) path = handler.stream.name if isinstance( handler, logbook.FileHandler) else None yield handler, path
def main(): # Set up logging for the whole app util.ensure_dir(LOG_FILENAME) logbook.TimedRotatingFileHandler(LOG_FILENAME, bubble=True).push_application() logbook.StreamHandler(sys.stdout, level="NOTICE", bubble=True).push_application() log = logbook.Logger(__name__.split(".", 1)[-1]) log.info("*" * 80) log.info("***" + "Starting CLI Parser for binance-monitor".center(74) + "***") log.info("*" * 80) parser = argparse.ArgumentParser( description="CLI for monitoring Binance account information" ) parser.add_argument( "--update", help="Update trades from server", action="store_true" ) parser.add_argument( "--force", help="Update trades for all symbols, regardless of blacklist", action="store_true", ) parser.add_argument("--listen", help="Listen for new trades", action="store_true") parser.add_argument("--blacklist", help="Add symbol(s) to blacklist", nargs="*") parser.add_argument( "--whitelist", help="Remove symbol(s) from blacklist", nargs="*" ) parser.add_argument( "--csv", help="Write out CSV file of trades (from cache)", action="store_true" ) args = parser.parse_args() acct_monitor = monitor.AccountMonitor() blacklist_from_cli(args.blacklist or None) whitelist_from_cli(args.whitelist or None) force_all = True if args.force else False if args.update: acct_monitor.get_all_trades(force_all=force_all) acct_monitor.trade_store.save() if args.listen: acct_monitor.start_user_monitor() while True: try: time.sleep(60 * 60 * 24) except KeyboardInterrupt: print("\nExit requested...") break if args.csv: acct_monitor.trade_store.to_csv() if reactor.running: reactor.callFromThread(reactor.stop)
def init_logging(filename: str = None): level = logbook.TRACE if filename: logbook.TimedRotatingFileHandler( filename, level=level).push_application() # default date format else: logbook.StreamHandler(sys.stdout, level=level).push_application()
def main(): logbook.StreamHandler(sys.stderr, level='WARNING').push_application() config = yaml.load(open('esync.yaml')) log.debug('loaded config: {}', config) v = Validator(config) blobs = v.check_files() v.check_blobs(blobs) return v.fail
def global_init(logfile): if logfile: logbook.TimedRotatingFileHandler( logfile, level=logbook.Info, date_format='%Y-%m-%d').push_application() else: logbook.StreamHandler(sys.stdout, level=logbook.TRACE).push_application()
def logging_init(logfile=None): if logfile: logbook.TimedRotatingFileHandler( logfile, level=logbook.INFO, date_format='%Y-%m-%d').push_application() else: logbook.StreamHandler(sys.stdout, level=logbook.INFO).push_application()
def unsuspend_handler(args): matches = unsuspend_host(args.host) if not matches: print('No matching, suspended hosts.') return with logbook.StreamHandler(sys.stdout, bubble=True): for host in matches: log.info('Unsuspended {}', host)
def clize_log_level(*, logbook_level: 'll' = "NOTICE"): """Change log levels via command line. User select, which logging messages to see. See about 6 log levels here: https://logbook.readthedocs.io/en/stable/quickstart.html :param logbook_level: user select logging level """ if logbook_level == "DEBUG": logbook.StreamHandler(sys.stdout, level=logbook.DEBUG).push_application() elif logbook_level == "INFO": logbook.StreamHandler(sys.stdout, level=logbook.INFO).push_application() elif logbook_level == "NOTICE": logbook.StreamHandler(sys.stdout, level=logbook.NOTICE).push_application() elif logbook_level == "WARNING": logbook.StreamHandler(sys.stdout, level=logbook.WARNING).push_application() elif logbook_level == "ERROR": logbook.StreamHandler(sys.stdout, level=logbook.ERROR).push_application() elif logbook_level == "CRITICAL": logbook.StreamHandler(sys.stdout, level=logbook.CRITICAL).push_application() else: logbook.StreamHandler(sys.stdout, level=logbook.NOTICE).push_application()
def test_deadlock_in_emit(): logbook_logger = logbook.Logger("logbook") obj = MyObject(logbook_logger.info) stream_handler = logbook.StreamHandler(stream=sys.stderr, level=logbook.DEBUG) stream_handler.lock = FakeLock() with stream_handler.applicationbound(): logbook_logger.info("format this: {}", obj) assert not stream_handler.lock._deadlock_occurred
def set_handler(args): old_value = get_client_parameter(args.hostname, args.parameter) try: old = set_client_parameter(args.hostname, args.parameter, args.value) except Exception as e: sys.exit('Failed to set parameter: {}'.format(e)) if not old_value: with logbook.StreamHandler(sys.stdout, bubble=True): log.info('Set parameter {} for host {} to {}', args.parameter, args.hostname, args.value) elif old: with logbook.StreamHandler(sys.stdout, bubble=True): log.info('Changed parameter {} for host {} from {} to {}', args.parameter, args.hostname, old, args.value) else: print('No changes.')
def logging_handler(level='INFO'): handler = logbook.StreamHandler( sys.stdout, format_string=FORMAT_STRING, level=level, bubble=False, ) with logbook.NullHandler(), handler: yield
def get_logger(name='system', level=''): """ get logger Factory function """ logbook.set_datetime_format('local') ColorizedStderrHandler(bubble=False, level=level).push_thread() logbook.StreamHandler(sys.stdout, bubble=False, encoding='utf-8', level=level).push_thread() return logbook.Logger(name)
def logging_register(): level = logbook.TRACE log_filename = None if not log_filename: logbook.StreamHandler(sys.stdout, level=level).push_application() else: logbook.TimedRotatingFileHandler(log_filename, level=level).push_application()
def open_handler(args): with logbook.StreamHandler(sys.stdout, bubble=True): for host in args.host: for issue in args.issue_name: if open_issue(host, issue): log.info('Manually opened {} issue for {}', issue, host) else: print('Open {} issue for {} already exists.'.format( issue, host))
def __init__(self, search_url=None, timeout=None, tries=5, verbose=False): self.search_url = search_url or self.get_sub_domain() self.token = None self.logger = logbook.Logger('BSPlayerLogger') self.tries = tries self.timeout = timeout / tries if verbose: self.logger.handlers.append(logbook.StreamHandler(sys.stdout))