def init_logger(): """ pass """ system_log.handlers = [StderrHandler(bubble=True)] basic_system_log.handlers = [StderrHandler(bubble=True)] std_log.handlers = [StderrHandler(bubble=True)] user_log.handlers = [] user_system_log.handlers = []
def main(): handler = StderrHandler() # handler.formatter = color_formatter handler.level = 2 nullhandler = NullHandler() with nullhandler.applicationbound(): with handler.applicationbound(): with catch_exceptions(""): try: dispatch_command(urltomarkdown) except SystemExit as e: # catch_exceptions is a bit too catchy pass
def grouphandlers_basic(): logger.info("out of group") with GroupHandler(StderrHandler()): logger.info("line1") time.sleep(5) logger.info("line2")
def main( proxy, ssl_insecure, listen_address, listen_port, log_level, homeserver ): ssl = None if ssl_insecure is False else False StderrHandler(level=log_level.upper()).push_application() if log_level == "info": logger.level = logbook.INFO elif log_level == "warning": logger.level = logbook.WARNING elif log_level == "error": logger.level = logbook.ERROR elif log_level == "debug": logger.level = logbook.DEBUG loop = asyncio.get_event_loop() proxy, app = loop.run_until_complete(init( homeserver.geturl(), proxy.geturl() if proxy else None, ssl )) web.run_app(app, host=str(listen_address), port=listen_port)
def download_all(): """ this is the top-level executor of the fundamentals download - just downloads everything since 2007 you may want to schedule download_all to be executed daily within out-of-market hours """ StderrHandler().push_application() data = download() return data
def grouphandlers_levels(): logger.info("out of group") #with GroupHandler(StderrHandler(level='ERROR')): with GroupHandler(StderrHandler(level='ERROR', bubble=True)): logger.info("started processing") logger.error("we failed before we started") time.sleep(5) logger.error("we failed when we ended")
def log(self, message): try: if self._logger is None: FileHandler(self.args.log, format_string="{record.time:%Y-%m-%d %H:%M:%S} {record.message}").push_application() if self.args.verbose > 1: StderrHandler(bubble=True).push_application() self._logger = Logger("tupa") self._logger.warn(message) except OSError: pass
def logging_context(path=None, level=None): from logbook import StderrHandler, FileHandler from logbook.compat import redirected_logging with StderrHandler(level=level or 'INFO').applicationbound(): if path: if not os.path.isdir(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) with FileHandler(path, bubble=True).applicationbound(): with redirected_logging(): yield else: with redirected_logging(): yield
def cli(ctx, host, user, password, verbosity, ssl_insecure, proxy_host, proxy_type): StderrHandler(level=verbosity.upper()).push_application() if verbosity == "info": nio.logger_group.level = logbook.INFO elif verbosity == "warning": nio.logger_group.level = logbook.WARNING elif verbosity == "error": nio.logger_group.level = logbook.ERROR elif verbosity == "debug": nio.logger_group.level = logbook.DEBUG ctx.obj = CliClient(user, password, host[0], host[1], ssl_insecure, proxy_host[0], proxy_host[1], proxy_type)
def main(): global log StderrHandler().push_application() log = Logger("xbbs.coordinator") XBBS_CFG_DIR = os.getenv("XBBS_CFG_DIR", "/etc/xbbs") with open(path.join(XBBS_CFG_DIR, "coordinator.toml"), "r") as fcfg: cfg = CONFIG_VALIDATOR.validate(toml.load(fcfg)) inst = Xbbs.create(cfg) for name, elem in cfg["projects"].items(): project = Project(name, **elem, base=path.join(inst.project_base, name)) inst.projects[name] = project os.makedirs(project.base, exist_ok=True) log.debug("got project {}", inst.projects[name]) with inst.zmq.socket(zmq.REP) as sock_cmd, \ inst.zmq.socket(zmq.PULL) as inst.intake, \ inst.zmq.socket(zmq.ROUTER) as inst.worker_endpoint: # XXX: potentially make perms overridable? is that useful in any # capacity? inst.intake.bind(cfg["intake"]["bind"]) _ipc_chmod(cfg["intake"]["bind"], 0o664) inst.worker_endpoint.bind(cfg["worker_endpoint"]) inst.worker_endpoint.set(zmq.ROUTER_MANDATORY, 1) _ipc_chmod(cfg["worker_endpoint"], 0o664) sock_cmd.bind(cfg["command_endpoint"]["bind"]) _ipc_chmod(cfg["command_endpoint"]["bind"], 0o664) dumper = gevent.signal_handler(signal.SIGUSR1, dump_projects, inst) log.info("startup") intake = gevent.spawn(intake_loop, inst) job_pull = gevent.spawn(job_pull_loop, inst) try: command_loop(inst, sock_cmd) finally: # XXX: This may not be the greatest way to handle this gevent.killall(inst.project_greenlets[:]) gevent.kill(intake) gevent.kill(job_pull) dumper.cancel()
def main(config_file, **kwargs): with open(config_file) as fh: config = yaml.load(fh) try: rmq_settings = config["rabbitmq_logging"] except KeyError: print("RabbitMQ logging not configured in {}".format(config_file)) sys.exit() handlers = [NullHandler()] if not kwargs["quiet"]: handlers.append(StderrHandler(bubble=True)) if kwargs["filename"]: handlers.append(FileHandler(kwargs["filename"], bubble=True)) if kwargs["log_db"]: try: cdb_settings = config["couchdb_logging"] except KeyError: print("CouchDB logging not configured in {}".format(config_file)) sys.exit() db_handler = DatabaseHandler(cdb_settings["couchdb_url"], backend=CouchDBBackend, db=cdb_settings["database"], bubble=True) handlers.append(db_handler) setup = NestedSetup(handlers) print("Now waiting for log messages") with setup: subscriber = RabbitMQSubscriber(rmq_settings["url"], queue=rmq_settings["log_queue"]) try: subscriber.dispatch_forever() except KeyboardInterrupt: print("\nLog subscriber shutting down") subscriber.close() except Exception: print("Log subscriber quit (unexpectedly)")
def test_tagging_logger(default_handler): from logbook import StderrHandler from logbook.more import TaggingLogger logger = TaggingLogger('tagged', ['a', 'b']) handler = StderrHandler(format_string="{record.msg}|{record.extra[tags]}") with handler: with capturing_stderr_context() as captured: logger.a("a") logger.b("b") stderr = captured.getvalue() assert "a|['a']" in stderr assert "a|['b']" not in stderr assert "b|['b']" in stderr assert "b|['a']" not in stderr
def main(): parser = argparse.ArgumentParser(prog='unins') parser.add_argument( "--insales-account", action=EnvDefault, envvar='UNINS_INSALES_ACCOUNT', help="InSales account name, i.e., the part before .myinsales.ru") parser.add_argument("--insales-key", action=EnvDefault, envvar='UNINS_INSALES_KEY', help="InSales API key") parser.add_argument("--insales-password", action=EnvDefault, envvar='UNINS_INSALES_PASSWORD', help="InSales API password") parser.add_argument("--unleashed-id", action=EnvDefault, envvar='UNINS_UNLEASHED_ID', help="Unleashed API ID") parser.add_argument("--unleashed-key", action=EnvDefault, envvar='UNINS_UNLEASHED_KEY', help="Unleashed private key") subparsers = parser.add_subparsers() parser_import_products = subparsers.add_parser('import-products') parser_import_products.set_defaults(func=import_products) parser_import_so = subparsers.add_parser('import-so') parser_import_so.set_defaults(func=import_so) args = parser.parse_args() with StderrHandler(): try: args.func(args) except KeyboardInterrupt: pass
def main(): logbook.concurrency.enable_gevent() global log StderrHandler().push_application() log = Logger('xbbs.worker') inst = XbbsWorker() XBBS_CFG_DIR = os.getenv("XBBS_CFG_DIR", "/etc/xbbs") with open(path.join(XBBS_CFG_DIR, "worker.toml"), "r") as fcfg: cfg = CONFIG_VALIDATOR.validate(toml.load(fcfg)) job_request = msgs.JobRequest(capabilities=cfg["capabilities"]).pack() gevent.signal_handler(signal.SIGUSR1, gevent.util.print_run_info) log.info(cfg) while True: with inst.zmq.socket(zmq.REQ) as jobs: jobs.connect(cfg["job_endpoint"]) while True: jobs.send(job_request) log.debug("waiting for job...") # the coordinator sends a heartbeat each minute, so 1.5 minutes # should be a sane duration to assume coordinator death on if jobs.poll(90000) == 0: # breaking the inner loop will cause a reconnect # since the coordinator is presumed dead, drop requests yet # unsent to it jobs.set(zmq.LINGER, 0) log.debug("dropping socket after a heartbeat timeout") break try: msg = jobs.recv() if len(msg) == 0: # drop null msgs continue process_job_msg(inst, msg) except KeyboardInterrupt: log.exception("interrupted") return except Exception as e: log.exception("job error", e)
prefix = args.vocab[0] args.vocab = [f'{prefix}.{args.langs[0]}', f'{prefix}.{args.langs[1]}'] return args, state_dict, resume if __name__ == '__main__': args, state_dict, resume = prepare() # redirect stdout and stderr to log file # redirection = open(log_name, 'a', buffering=1) # sys.stdout = redirection # sys.stderr = redirection stdout_handler = StreamHandler(sys.stdout, bubble=True) stderr_handler = StderrHandler(level=WARNING) # write logs to log.MODEL file # file_handler = FileHandler(log_name, bubble=True) # file_handler.format_string = '{record.message},{record.extra[cwd]}' # file_handler.format_string = '[{record.time:%Y-%m-%d %H:%M:%S.%f%z}] {record.level_name}: {record.message}' # with file_handler.applicationbound(): stdout_handler.format_string = '[{record.time:%Y-%m-%d %H:%M:%S.%f%z}] ' \ '{record.level_name}: {record.message}' with stdout_handler.applicationbound(): if resume: logger.info( f'Resume training from checkpoint: {Loader.get_latest(args.model)[1]}' ) try: main(args)
from __future__ import print_function from __future__ import unicode_literals import sys import os import numbers import re from collections import defaultdict from pprint import pprint as pp # Import third party library # https://github.com/mitsuhiko/logbook from logbook import Logger, StderrHandler log = Logger('Logbook') log_handler = StderrHandler() def convert_to_number(record_element): """Check if record_element is of type unicode and tries to make record_element into float. Args: record_element (element) : Variant record Return: float: If able None: If unable to turn into float """ if isinstance(record_element, (unicode)): # Unicode type
from nanomsg import Socket, PUSH, PULL, PUB, SUB, SUB_SUBSCRIBE, PAIR, SOL_SOCKET, SNDTIMEO, NanoMsgAPIError from nanomsg import wrapper as nn_wrapper from tiltai.utils import tiltai_logs_format import threading import Queue import socket from logbook import Logger, StderrHandler err = StderrHandler(format_string=tiltai_logs_format) log = Logger("network[nanolink]") sock_type = {'PUSH': PUSH, 'PULL': PULL, 'PUB': PUB, 'SUB': SUB, 'PAIR': PAIR} subbed_topics = [] out_endpoints = [] in_endpoints = [] def receiver(queue, addresses, stype): """ Bind a queue to a listening nanomsg socket's multiple endpoints in a separate thread. Parameters ---------- queue : Queue A Queue object to be filled by receiver socket
def init_logger(): system_log.handlers = [StderrHandler(bubble=True)] user_log.handlers = [StderrHandler(bubble=True)] user_system_log.handlers = [StderrHandler(bubble=True)]
def init_logger(): StderrHandler(bubble=True).push_application()
logger.debug('Start repairing problems') original_src = args.source_file targetContractName = args.targetContractName targetVul = args.targetVul targetLoc = args.targetLoc repairCore = CR((), (), args.detectorArg, args.synthesizerArg, args.coreArg) # TODO: pass num plausible from CLI rst = await repairCore.repair(original_src, targetContractName, targetVul, targetLoc, 1, RepairTarget(), None) if not rst: print("Unable to find a plausible patch...") else: print("Source files after applying plausible patches are in:") for p in rst: print(p.PathPatchedCode) from logbook import Logger, StderrHandler import logbook import os logHandler = StderrHandler( level=os.environ.get('LOG_LEVEL', logbook.get_level_name(logbook.INFO))) with logHandler.applicationbound(): main()
servers = [Server(i) for i in range(1, NUM_SERVERS + 1)] start_servers(servers) time.sleep(10) try: return test(servers) except Exception, e: logger.exception('Test failed: %s' % e) return 1 finally: logger.info('Stopping') stop(servers) if __name__ == '__main__': format = '[{record.time}] {record.level_name:>5} [{record.extra[worker_id]}] {record.message}' logging_setup = NestedSetup([ NullHandler(), FileHandler( filename=os.path.join(os.path.dirname(__file__), 'log/client.log'), format_string=format, bubble=True, ), StderrHandler(level=logbook.INFO, format_string=format, bubble=True), ]) with logging_setup.applicationbound(): sys.exit(main())
def user_std_handler_log_formatter(record, handler): from rqalpha.environment import Environment try: dt = Environment.get_instance().calendar_dt.strftime(DATETIME_FORMAT) except Exception: dt = datetime.now().strftime(DATETIME_FORMAT) log = "{dt} {level} {msg}".format( dt=dt, level=record.level_name, msg=to_utf8(record.message), ) return log user_std_handler = StderrHandler(bubble=True) user_std_handler.formatter = user_std_handler_log_formatter def formatter_builder(tag): def formatter(record, handler): log = "[{formatter_tag}] [{time}] {level}: {msg}".format( formatter_tag=tag, level=record.level_name, msg=to_utf8(record.message), time=record.time, ) if record.formatted_exception: log += "\n" + record.formatted_exception
def main(): parser = argparse.ArgumentParser(description='Extract features') parser.add_argument( '-i', '--input', required=True, help='Raw data input dir' ) parser.add_argument( '-o', '--output', required=True, help='Output dir' ) parser.add_argument( '--filter', default='lowpass', help='Filtering Type' ) parser.add_argument( '--window', type=int, required=True, help='Window length' ) parser.add_argument( '--stride', type=int, required=True, help='Stride length' ) parser.add_argument( '-f', '--featurelist', nargs='+', help='Features to extact', required=True ) parser.add_argument( '--downsample', type=int, default=1, help='Downsample step, default takes no downsample' ) parser.add_argument( '--log', default='info', choices=['debug', 'warning', 'info', 'error'], help='Logging level, default info' ) parser.add_argument( '--dataset', choices=['ninapro-db1', 'ninapro-db2', 'ninapro-db3', 'ninapro-db4', 'ninapro-db5', 'ninapro-db6', 'ninapro-db7', 'biopatrec-db1', 'biopatrec-db2', 'biopatrec-db3', 'biopatrec-db4'], help='Dataset choices', required=True ) args = parser.parse_args() with NullHandler().applicationbound(): with StderrHandler(level=args.log.upper()).applicationbound(): with FileHandler( os.path.join(ensure_dir(args.output), 'log'), level=args.log.upper(), bubble=True ).applicationbound(): try: return run(args) except: log.exception('Failed')
def main_scotch_deploy(): log = Logger('main') parser = ArgumentParser() parser.add_argument('-c', '--configuration-file', action='append', default=[], help='Configuration files to search. Can be given ' 'multiple times, default is {!r}' .format(Site.DEFAULT_CONFIGURATION_PATHS)) parser.add_argument('-d', '--debug', default=False, action='store_true') subparsers = parser.add_subparsers(dest='action', help='Action to perform') cmd_list = subparsers.add_parser('list', help='List available apps') cmd_deploy = subparsers.add_parser('deploy', help='Deploy app') cmd_deploy.add_argument('app_name', nargs='+') cmd_dump = subparsers.add_parser('dump', help='Dump app configuration') cmd_dump.add_argument('app_name', nargs='+') args = parser.parse_args() # set up logging handlers if not args.debug: NullHandler(level=logbook.DEBUG).push_application() handler = StderrHandler(level=logbook.INFO) handler.format_string = '{record.message}' handler.push_application() wd = Site(args) # set site-umask umask = int(wd.config['site']['umask'], 8) log.debug('Setting umask to {:04o}'.format(umask)) os.umask(umask) def _header(s): print(s) print('=' * len(s)) # commands: def list(): wd.load_apps() for name, app in sorted(wd.apps.items()): print(name) for domain in sorted(app.domains): print(' {}{}'.format(domain, app.url_prefix)) def deploy(): for name in args.app_name: app = wd.load_app(name) app.deploy() def dump(): for name in args.app_name: app = wd.load_app(name) app.config['app']['instance_id'] = '(INSTANCE_ID)' # dump config _header('App configuration for {}'.format(name)) for section_name, section in sorted(app.config.items()): for key, value in sorted(section.items()): print('{}:{} = {!r}'.format(section_name, key, value)) print # call appropriate command try: locals()[args.action]() except subprocess.CalledProcessError as e: log.critical('Command failed: {}'.format(' '.join(e.cmd)))
def log_with_logbook(): with StderrHandler(level='NOTICE'): yield
import sys from logbook import Logger, StreamHandler log = Logger('Stream handler logger') StreamHandler(sys.stdout).push_application() log.warn('warning') log.error("error") from logbook import StderrHandler handler = StderrHandler() handler.format_string = '{record.channel}: {record.message}' handler.push_application() log.warn('warning') log.error("error")
log.info('reading {}'.format(bname)) image = imread(image_name, as_grey=True) log.info('resizing {}'.format(bname)) return resize(image, (IMG_NROW, IMG_NCOL)) @click.command() @click.option('--output', type=click.File('wb'), default='-') @click.argument('image_files', nargs=-1, type=click.Path(exists=True)) def main(output, image_files): log.info('starting with {} image files'.format(len(image_files))) images = (prep(i) for i in image_files) log.info('starting online dictionary learning') D = None for image in images: D = dict_learning_online(image, dict_init=D, n_components=2000, verbose=True, n_jobs=-1, n_iter=N_ITER, batch_size=BATCH_SIZE, return_code=False) output.write(pd.DataFrame(D).to_csv()) log.info('done') if __name__ == '__main__': with StderrHandler().applicationbound(): main()
sns.set_style('whitegrid') pd.set_option('display.expand_frame_repr', False) np.random.seed(42) # ### Load zipline extensions # Only need this in notebook to find bundle. # In[4]: load_extensions(default=True, extensions=[], strict=True, environ=None) # In[5]: log_handler = StderrHandler( format_string='[{record.time:%Y-%m-%d %H:%M:%S.%f}]: ' + '{record.level_name}: {record.func_name}: {record.message}', level=INFO) log_handler.push_application() log = Logger('Algorithm') # ## Algo Params # We plan to hold up to 20 long and 20 short positions whenever there are at least 10 on either side that meet the criteria (positive/negative prediction for long/short position). # In[6]: N_LONGS = 20 N_SHORTS = 20 MIN_POSITIONS = 10 # ## Load Data
""" borealis ~~~~~~~~ A modular, object oriented package management API for Archlinux. :copyright: (c) 2011 David Gidwani :license: New BSD, see LICENSE """ from logbook import Logger, StderrHandler from ufl.io.shell import cformat __version__ = "0.1-pre" _message_colors = ["%K", "%B", "%B", "%R", "%R", "%R"] default_handler = StderrHandler() default_handler.formatter = (lambda record, handler: cformat("{}{}:%n {}".format(_message_colors[record.level - 1], record.level_name.lower(), record.message))) log = Logger(__name__)
async def daemon(context, log_level, debug_encryption, config, data_path): loop = asyncio.get_event_loop() conf_dir = user_config_dir("pantalaimon", "") data_dir = user_data_dir("pantalaimon", "") create_dirs(data_dir, conf_dir) config = config or os.path.join(conf_dir, "pantalaimon.conf") data_dir = data_path or data_dir if log_level: log_level = parse_log_level(log_level) pan_conf = PanConfig(config, log_level) try: pan_conf.read() except (OSError, PanConfigError) as e: context.fail(e) if not pan_conf.servers: context.fail("Homeserver is not configured.") logger.level = pan_conf.log_level if pan_conf.debug_encryption or debug_encryption: nio.crypto.logger.level = logbook.DEBUG StderrHandler().push_application() servers = [] proxies = [] if UI_ENABLED: from pantalaimon.ui import GlibT pan_queue = janus.Queue() ui_queue = janus.Queue() glib_thread = GlibT( pan_queue.sync_q, ui_queue.sync_q, data_dir, pan_conf.servers.values(), pan_conf, ) glib_fut = loop.run_in_executor(None, glib_thread.run) message_router_task = asyncio.create_task( message_router(ui_queue.async_q, pan_queue.async_q, proxies)) else: glib_thread = None glib_fut = None pan_queue = None ui_queue = None message_router_task = None try: for server_conf in pan_conf.servers.values(): proxy, runner, site = await init(data_dir, server_conf, pan_queue, ui_queue) servers.append((proxy, runner, site)) proxies.append(proxy) except keyring.errors.KeyringError as e: context.fail(f"Error initializing keyring: {e}") async def wait_for_glib(glib_thread, fut): glib_thread.stop() await fut home = os.path.expanduser("~") os.chdir(home) event = asyncio.Event() def handler(signum, frame): raise KeyboardInterrupt signal.signal(signal.SIGTERM, handler) try: for proxy, _, site in servers: click.echo(f"======== Starting daemon for homeserver " f"{proxy.name} on {site.name} ========") await site.start() click.echo("(Press CTRL+C to quit)") await event.wait() except (KeyboardInterrupt, asyncio.CancelledError): for _, runner, _ in servers: await runner.cleanup() if glib_fut: await wait_for_glib(glib_thread, glib_fut) if message_router_task: message_router_task.cancel() await asyncio.wait({message_router_task}) raise
def run(): StderrHandler().push_application() _load_all() app(obj=utils.Bunch())