def setup_logging(console_level, log_file=None): ''' Log to stderr with given console level (0: warnings, 1: info, 2+: debug). Log to log file if path given. ''' from logging import DEBUG, INFO, WARNING, Formatter from logging.handlers import WatchedFileHandler logging.getLogger().setLevel(DEBUG) if console_level < 3: # TODO: filter in a handler instead in logger logging.getLogger('botocore').setLevel(INFO) h = logging.StreamHandler() h.setFormatter(Formatter(log_format)) if console_level == 0: h.setLevel(WARNING) elif console_level == 1: h.setLevel(INFO) else: h.setLevel(DEBUG) logging.getLogger().addHandler(h) if log_file: h = WatchedFileHandler(str(log_file)) h.setFormatter(Formatter(log_format)) h.setLevel(DEBUG) logging.getLogger().addHandler(h)
def add_file_handler(self, log_file): ch = WatchedFileHandler(log_file) ch.setLevel(self.logger.level) formatter = logging.Formatter( '%(asctime)s - %(levelname)s - %(message)s', "%Y-%m-%d %H:%M:%S") ch.setFormatter(formatter) self.logger.addHandler(ch)
def getLogger(name, level=logging.INFO, handlers=[]): logger = logging.getLogger(name) if len(handlers) != 0: logger.setLevel(level) if "console" in handlers: strm = StreamHandler() fmt = logging.Formatter('%(message)s') strm.setLevel(level) strm.setFormatter(fmt) logger.addHandler(strm) if "file" in handlers: conf = handlers['file'] fl = WatchedFileHandler(conf['logfile']) fl.setLevel(level) fmt = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') fl.setFormatter(fmt) logger.addHandler(fl) if "syslog" in handlers: sysl = SysLogHandler(address='/dev/log', facility=SysLogHandler.LOG_SYSLOG) sysl.setLevel(level) formatter = logging.Formatter('%(name)s[' + str(os.getpid()) + '] %(levelname)-8s: %(message)s') sysl.setFormatter(formatter) logger.addHandler(sysl) return logger
def _initLogging(self): logging.basicConfig() logger = logging.getLogger() if self.interactive: logger.setLevel(logging.DEBUG) else: logger.removeHandler( logger.handlers[0]) # get rid of the default one if self.error_log: handler = WatchedFileHandler( filename=self.error_log ) # something that can handle logrotate handler.setFormatter( logging.Formatter( fmt= '\n\n%(asctime)s pid:%(process)d thread: %(thread)d\n%(module)s - %(lineno)d\n%(message)s' )) handler.setLevel(logging.ERROR) logger.addHandler(handler) handler = SysLogHandler(address='/dev/log', facility=SysLogHandler.LOG_DAEMON) handler.setFormatter( logging.Formatter(fmt=self.proc_name + '[%(process)d]: %(message)s')) logger.addHandler(handler) logger.setLevel(logging.INFO)
def setup_log_file(log_file_path): from logging import DEBUG, getLogger, Formatter from logging.handlers import WatchedFileHandler h = WatchedFileHandler(str(log_file_path)) h.setFormatter(Formatter(log_format)) h.setLevel(DEBUG) getLogger('').addHandler(h)
def setup_logger(configs): """Setup logging Args: configs (dict): logging configuration Returns: logging.logger: the configured logger """ # TO-DO: use logging.config.dictConfig instead logger = logging.getLogger(LOGGER) level = getattr(logging, configs['level']) log_formatter = ( '%(threadName)s::%(levelname)s::%(asctime)s' '::%(lineno)d::(%(funcName)s) %(message)s' ) fmt = logging.Formatter(log_formatter) logger.setLevel(level) if configs.get('file', False): file_h = WatchedFileHandler(configs.get('file')) file_h.setLevel(level) file_h.setFormatter(fmt) logger.addHandler(file_h) else: std_h = logging.StreamHandler(sys.stdout) std_h.setLevel(level) std_h.setFormatter(fmt) logger.addHandler(std_h) return logger
def add_logger(logger_name, file_name=None, parent=None, propagate=False, log_level='INFO', log_dir='/var/log/natrixclient'): """Add a new logger :param logger_name: :param file_name: :param parent: :param propagate: :param log_level: :param log_dir: :return: """ logger = logging.getLogger(logger_name) logger.propagate = propagate logger.parent = parent file_name = logger_name if file_name is None else file_name log_file_name = '{log_dir}/{file_name}.log'.format(log_dir=log_dir, file_name=file_name) handler = WatchedFileHandler(filename=log_file_name) handler_fmt = logging.Formatter(fmt=FILE_LOGGING_FORMAT, datefmt=FILE_LOGGING_DATE_FORMAT) handler.setLevel(log_level) handler.setFormatter(handler_fmt) logger.addHandler(handler)
def main(args): logger = logging.getLogger("localftp") logger.setLevel(args.log_level) formatter = logging.Formatter( "%(asctime)s %(levelname)-7s %(name)s|%(message)s") ch = logging.StreamHandler() if args.log_path is None: ch.setLevel(args.log_level) else: fh = WatchedFileHandler(args.log_path) fh.setLevel(args.log_level) fh.setFormatter(formatter) logger.addHandler(fh) ch.setLevel(logging.WARNING) ch.setFormatter(formatter) logger.addHandler(ch) logger.setLevel(args.log_level) logger.addHandler(ch) work_dir = args.work if args.work and os.path.isdir( args.work) else tempfile.mkdtemp() return serve(work_dir, "testuser", "password", "127.0.0.1", args.port)
def init(cls, config): formatter = logging.Formatter(config.FORMAT) root = logging.getLogger('') root.setLevel(config.ROOT_LEVEL) if config.LOG_TO_CONSOLE: console_handler = logging.StreamHandler() console_handler.setLevel(config.CONSOLE_LEVEL) console_handler.setFormatter(formatter) root.addHandler(console_handler) if config.LOG_TO_FILE: file_name = cls.generate_log_file_name() file_handler = WatchedFileHandler(os.path.join( config.FILE_BASE, file_name), encoding='utf-8') file_handler.setLevel(config.FILE_LEVEL) file_handler.setFormatter(formatter) root.addHandler(file_handler) if config.TEMP_LOG: file_name = cls.generate_log_file_name() file_handler = WatchedFileHandler(os.path.join('', file_name), encoding='utf-8') file_handler.setLevel(config.FILE_LEVEL) file_handler.setFormatter(formatter) root.addHandler(file_handler)
def setup_logfile_logger(log_path, log_level=None, log_format=None, date_format=None): """ Set up logging to a file. """ # Create the handler handler = WatchedFileHandler(log_path, mode='a', encoding='utf-8', delay=0) if log_level: # Grab and set the level level = LOG_LEVELS.get(log_level.lower(), logging.ERROR) handler.setLevel(level) # Set the default console formatter config if not log_format: log_format = '%(asctime)s [%(name)s][%(levelname)s] %(message)s' if not date_format: date_format = '%Y-%m-%d %H:%M:%S' formatter = logging.Formatter(log_format, datefmt=date_format) handler.setFormatter(formatter) root_logger.addHandler(handler) return handler
def __prepare_logger(self): self.logger = logging.getLogger('app') self.logger.propagate = False log_level = self.config.log.get("LOG_LEVEL") or self.DEFAULT_LOG_LEVEL log_level = log_level.upper() log_level = getattr(logging, log_level) if "LOG_FILE" in self.config.log: from logging.handlers import WatchedFileHandler handler = WatchedFileHandler(self.config.log["LOG_FILE"]) self.logger.addHandler(handler) if self.config.log.get("DEBUG"): handler = logging.StreamHandler(stream=sys.stdout) log_level = logging.DEBUG self.logger.addHandler(handler) log_format = self.config.log.get( "LOG_FORMAT") or self.DEFAULT_LOG_FORMAT log_format = logging.Formatter(log_format) self.logger.setLevel(log_level) for handler in self.logger.handlers: handler.setLevel(log_level) handler.setFormatter(log_format) self.logger.info("Logger created. Environment type set to %s" % self.envtype)
def set_logger(): if logger.hasHandlers(): return logger.setLevel(logging.DEBUG) formatter = CustomFormatter( '[%(levelname)s %(asctime)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S.%f' ) file_handler = WatchedFileHandler('logs/output.log', encoding='utf8') file_handler.setFormatter(formatter) file_handler.setLevel(logging.DEBUG) logger.addHandler(file_handler) logger.info('Added logging handler: ' + str(file_handler)) console_handler = StreamHandler() console_handler.setFormatter(formatter) console_handler.setLevel(logging.DEBUG) logger.addHandler(console_handler) logger.info('Added logging handler: ' + str(console_handler)) logger.info('Set new logger up.') return
def get_log(self, log_name): if log_name in self.registered_logs: return self.registered_logs[log_name] logger = logging.getLogger(log_name) logger.handlers.clear() logger.setLevel(logging.INFO) # Init File Handler file_name = os.path.join(Config.LOG_DIR, '%s.log' % log_name) file_handler = WatchedFileHandler(file_name) file_handler.setLevel(logging.INFO) file_handler.setFormatter(logging.Formatter( '%(asctime)s %(levelname)s: %(message)s ') ) logger.addHandler(file_handler) file_name = os.path.join(Config.LOG_DIR, '%s.err' % log_name) file_handler = WatchedFileHandler(file_name) file_handler.setLevel(logging.ERROR) file_handler.setFormatter(logging.Formatter( '%(asctime)s %(levelname)s: %(message)s ') ) logger.addHandler(file_handler) if Config.DEBUG: console_handler = logging.StreamHandler() console_handler.setLevel(logging.DEBUG) console_format = logging.Formatter('%(asctime)s %(levelname)s: %(message)s') console_handler.setFormatter(console_format) logger.addHandler(console_handler) self.registered_logs[log_name] = logger return logger
def _logger(level, out_stream, name=None, log_file=None, log_file_level=logging.DEBUG, milliseconds=False): """Create the actual logger instance, logging at the given level if name is None, it will get args[0] without the extension (e.g. gina). 'out_stream must be passed, the recommended value is sys.stderr' """ if name is None: # Determine the logger name from the script name name = sys.argv[0] name = re.sub('.py[oc]?$', '', name) # We install our custom handlers and formatters on the root logger. # This means that if the root logger is used, we still get correct # formatting. The root logger should probably not be used. root_logger = logging.getLogger() # reset state of root logger reset_root_logger() # Make it print output in a standard format, suitable for # both command line tools and cron jobs (command line tools often end # up being run from inside cron, so this is a good thing). hdlr = logging.StreamHandler(out_stream) # We set the level on the handler rather than the logger, so other # handlers with different levels can be added for things like debug # logs. root_logger.setLevel(0) hdlr.setLevel(level) if milliseconds: # Python default datefmt includes milliseconds. formatter = LaunchpadFormatter(datefmt=None) else: # Launchpad default datefmt does not include milliseconds. formatter = LaunchpadFormatter() hdlr.setFormatter(formatter) root_logger.addHandler(hdlr) # Add an optional aditional log file. if log_file is not None: handler = WatchedFileHandler(log_file, encoding="UTF8") handler.setFormatter(formatter) handler.setLevel(log_file_level) root_logger.addHandler(handler) # Create our logger logger = logging.getLogger(name) # Set the global log log._log = logger # Inform the user the extra log file is in operation. if log_file is not None: log.info( "Logging %s and higher messages to %s" % ( logging.getLevelName(log_file_level), log_file)) return logger
def parseArgs(): """parse arguments @return: options """ opt = optparse.OptionParser() opt.add_option( "--maxWait", "-W", type="int", action="store", default=0, help="max wait in seconds. 0=> forever", ) opt.add_option("--noWait", action="store_true", default=False, help="do not wait") opt.add_option( "--rd", "-r", action="store_true", default=False, help="drop read cache", ) opt.add_option( "--wr", "-w", action="store_true", default=False, help="drop write cache", ) opt.add_option( "--verbose", action="store_true", default=False, help="more verbose output", ) options, _ = opt.parse_args() if options.verbose: logLevel = logging.DEBUG else: logLevel = logging.WARN _logger.setLevel(logLevel) handler = WatchedFileHandler(_LOG_FILE) handler.setLevel(logLevel) logFmt = "%(asctime)s - %(message)s" handler.setFormatter(logging.Formatter(logFmt)) _logger.addHandler(handler) if not options.rd and not options.wr: _logger.error("Must provide one or more of --rd/--wr") exit(1) return options
def create_app(): from server.views.frontend import frontend as blueprint_frontend from server.views.entry import entry as blueprint_entry from server.views.filter import filter as blueprint_filter from server.views.pinboard import pinboard as blueprint_pinboard from server.db import db from server.login import login_manager app = Flask(__name__, instance_relative_config=True) app.jinja_options = dict(app.jinja_options) app.jinja_env.add_extension('pyjade.ext.jinja.PyJadeExtension') app.config.from_pyfile("default_settings.py") app.config.from_envvar('PINBOARD_SETTINGS', silent=True) if not app.debug: file_handler = WatchedFileHandler(app.config.get("LOG_FILENAME", "pinboard.log")) file_handler.setLevel(logging.WARNING) app.logger.addHandler(file_handler) assets = Environment(app) js_assets = Bundle( "scripts/jquery-1.7.2.js", "scripts/jquery-ui-1.8.16.custom.min.js", #"scripts/chosen.jquery.min.js", "scripts/bootstrap.min.js", "scripts/angular-1.0.1.js", #"scripts/angular-cookies-1.0.0.js", #"scripts/taffy.js", "scripts/sugar-1.2.4.min.js", #"scripts/jquery.couch.js", Bundle("lib/*.coffee", filters=["coffeescript", ]), filters=["rjsmin", ], output="generated_app.js", ) css_assets = Bundle( "stylesheets/jquery-ui-1.8.16.custom.css", Bundle( "stylesheets/app.less", filters=["less", ], ), filters=["cssmin", ], output="generated_app.css", ) assets.register('js_all', js_assets) assets.register('css_all', css_assets) db.init_app(app) login_manager.setup_app(app) app.register_blueprint(blueprint_frontend) app.register_blueprint(blueprint_entry, url_prefix="/entry") app.register_blueprint(blueprint_filter, url_prefix="/filter") app.register_blueprint(blueprint_pinboard, url_prefix="/pinboards") return app
def main(args): log = logging.getLogger("addisonarches.web") log.setLevel(args.log_level) formatter = logging.Formatter( "%(asctime)s %(levelname)-7s %(name)s|%(message)s") ch = logging.StreamHandler() if args.log_path is None: ch.setLevel(args.log_level) else: fh = WatchedFileHandler(args.log_path) fh.setLevel(args.log_level) fh.setFormatter(formatter) log.addHandler(fh) ch.setLevel(logging.WARNING) ch.setFormatter(formatter) log.addHandler(ch) loop = asyncio.SelectorEventLoop() asyncio.set_event_loop(loop) down = asyncio.Queue(loop=loop) up = asyncio.Queue(loop=loop) #TODO: Read service name from CLI service = "dev" # Cf qa, demo, prod, etc tok = token(args.connect, service, APP_NAME) node = create_udp_node(loop, tok, down, up) loop.create_task(node(token=tok)) app = aiohttp.web.Application() assets = Assets(app, **vars(args)) reg = Registration(app, tok, down, up, **vars(args)) transitions = Transitions(app, **vars(args)) work = Workflow(app, tok, down, up, **vars(args)) for svc in (assets, reg, transitions, work): log.info("{0.__class__.__name__} object serves {1}".format( svc, ", ".join(svc.routes.keys()))) handler = app.make_handler() f = loop.create_server(handler, args.host, args.port) srv = loop.run_until_complete(f) log.info("Serving on {0[0]}:{0[1]}".format(srv.sockets[0].getsockname())) try: loop.run_forever() except KeyboardInterrupt: pass finally: loop.run_until_complete(handler.finish_connections(1.0)) srv.close() loop.run_until_complete(srv.wait_closed()) loop.run_until_complete(app.finish()) loop.close()
def add_file_handler(self, log_file): """ Create, format & add the handler that will log to the log file """ handler = WatchedFileHandler(log_file) handler.setLevel(self.logger.level) formatter = logging.Formatter( '%(asctime)s - %(levelname)s - %(message)s', "%Y-%m-%d %H:%M:%S") handler.setFormatter(formatter) self.logger.addHandler(handler)
def setup_log_file(log_file_path): from logging import DEBUG, Formatter, getLogger from logging.handlers import WatchedFileHandler from .util.logging import CustomFormatter if log_file_path: h = WatchedFileHandler(str(log_file_path)) h.setLevel(DEBUG) h.setFormatter( CustomFormatter(strip_name_prefix=__name__.split('.')[0])) getLogger().addHandler(h)
def create_logger(filename=settings.LOG_FILE, level=settings.LOG_LEVEL, name=settings.LOG_NAME): # WatchedFileHandler watches the file it is logging to. # If the file changes, it is closed and reopened using the file name. file_handler = WatchedFileHandler(filename) file_handler.setLevel(level) logger = logging.getLogger(name) logger.addHandler(file_handler) logger.setLevel(level) return logger
def setup_logging(output_dir: str): global logger for handler in logger.handlers: logger.removeHandler(handler) logger.setLevel(logging.DEBUG) handler = WatchedFileHandler(f'{output_dir}/run_logs.log') handler.setLevel(logging.DEBUG) logger.addHandler(handler) handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.DEBUG) logger.addHandler(handler)
def create_flask(): """ Create the Flask app """ # print 'sqlmongo::create_flask()' # create app app = Flask(__name__) # configure settings app.config.from_pyfile('config.py') # setup SQL database handler db.app = app db.init_app(app) # set up mongo database handler mongo.app = app mongo.init_app(app) # configure logger # http://flask.pocoo.org/docs/0.11/api/#flask.Flask.logger # https://docs.python.org/dev/library/logging.html#logging.Logger handler = WatchedFileHandler(app.config['DEBUG_LOG_FILE']) handler.setLevel(logging.INFO) # http://flask.pocoo.org/docs/0.11/errorhandling/#controlling-the-log-format handler.setFormatter( Formatter('%(asctime)s [%(levelname)s] %(message)s ' '[%(pathname)s : %(lineno)d]')) app.logger.addHandler(handler) app.logger.setLevel('INFO') # register the module controllers # sets up URL collections, that we wrote in CONTROLLER file from modules.Countries.controller import countries from modules.Ceramics.controller import ceramics app.register_blueprint(countries) app.register_blueprint(ceramics) # http://flask.pocoo.org/docs/0.11/api/#flask.Flask.route @app.route('/') def home(): """Default homepage Args: None Returns: The homepage HTML. Currently just 'Hello World from SQLMongo'. """ return render_template('home.html') return app
def main(args): log = logging.getLogger("pyspike") log.setLevel(args.log_level) formatter = logging.Formatter( "%(asctime)s %(levelname)-7s %(name)s|%(message)s") ch = logging.StreamHandler() if args.log_path is None: ch.setLevel(args.log_level) else: fh = WatchedFileHandler(args.log_path) fh.setLevel(args.log_level) fh.setFormatter(formatter) log.addHandler(fh) ch.setLevel(logging.WARNING) ch.setFormatter(formatter) log.addHandler(ch) if args.target not in pyspike.ops.misc.targets: log.warning("No build defined for target '{}'.".format(args.target)) return 1 locn = os.path.abspath(os.path.expanduser(args.work)) log.info("Calculated working directory as {0}".format(locn)) os.chdir(locn) for url in pyspike.ops.misc.targets[args.target]: project = pyspike.ops.misc.url_to_project(url) if os.path.exists(os.path.join(locn, project)): success = pyspike.ops.misc.git_pull(locn, project) success = success and pyspike.ops.misc.git_checkout(locn, project) else: success = pyspike.ops.misc.git_clone(args.work, url) if success: pyspike.ops.misc.pip_uninstall(locn, project) success = pyspike.ops.misc.pip_install(locn, project) # TODO: python -m unittest discover <namespace> if not success: return 1 if not args.command: log.info("No command supplied.") elif args.command == "docker": log.info("Docker command supplied.") log.info(sys.executable) return 0
def main(): parser = argparse.ArgumentParser(description="Send out broker notifications") parser.add_argument("-c", "--config", dest="config", help="location of the broker configuration file") parser.add_argument("--one_shot", action="store_true", help="do just a single run and then exit") parser.add_argument("--debug", action="store_true", help="turn on debug logs on stderr") opts = parser.parse_args() config = Config(configfile=opts.config) # These modules must be imported after the configuration has been # initialized from aquilon.aqdb.db_factory import DbFactory db = DbFactory() if opts.debug: level = logging.DEBUG logging.basicConfig(level=level, stream=sys.stderr, format='%(asctime)s [%(levelname)s] %(message)s') else: level = logging.INFO logfile = os.path.join(config.get("broker", "logdir"), "aq_notifyd.log") handler = WatchedFileHandler(logfile) handler.setLevel(level) formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s') handler.setFormatter(formatter) rootlog = logging.getLogger() rootlog.addHandler(handler) rootlog.setLevel(level) # Apply configured log settings for logname, level in config.items("logging"): if level not in logging._levelNames: continue logging.getLogger(logname).setLevel(logging._levelNames[level]) logger = logging.getLogger("aq_notifyd") if opts.one_shot: update_index_and_notify(config, logger, db) else: signal.signal(signal.SIGTERM, exit_handler) signal.signal(signal.SIGINT, exit_handler) run_loop(config, logger, db)
def configure_logger(self): logger = logging.getLogger(self.name) logger.setLevel(logging.INFO) formatter = logging.Formatter('%(levelname)s %(asctime)s %(message)s') logname = self.logname if hasattr(self, 'logname') else '{}.log'.format(self.name) handler = WatchedFileHandler('{}/{}'.format(self.logging_folder, logname)) handler.setFormatter(formatter) handler.setLevel(logging.INFO) logger.addHandler(handler) return logger
def main(args): log = logging.getLogger(APP_NAME) log.setLevel(args.log_level) formatter = logging.Formatter( "%(asctime)s %(levelname)-7s %(name)s|%(message)s") ch = logging.StreamHandler() if args.log_path is None: ch.setLevel(args.log_level) else: fh = WatchedFileHandler(args.log_path) fh.setLevel(args.log_level) fh.setFormatter(formatter) log.addHandler(fh) ch.setLevel(logging.WARNING) ch.setFormatter(formatter) log.addHandler(ch) loop = asyncio.SelectorEventLoop() asyncio.set_event_loop(loop) down = asyncio.Queue(loop=loop) up = asyncio.Queue(loop=loop) tok = token(args.connect, APP_NAME) node = create_udp_node(loop, tok, down, up) loop.create_task(node(token=tok)) loop.create_task(queue_logger(loop, up)) msg = parcel( tok, Alert(datetime.datetime.now(), "Hello World!"), via=Address(tok.namespace, tok.user, tok.service, turberfield.ipc.demo.router.APP_NAME) ) log.info("Sending message: {}".format(msg)) loop.call_soon_threadsafe(functools.partial(down.put_nowait, msg)) try: loop.run_forever() except KeyboardInterrupt: for task in asyncio.Task.all_tasks(loop=loop): task.cancel() for resource in resources: resource.close() finally: loop.close()
class StartLog: def __init__(self, verbose=False, quiet=False, syslog=None, logpath=None, loglevel=None): ''' start logging facilities pass an optional logpath to log to disk (using WatchedFileHandler) ''' levels = {'DEBUG':DEBUG, 'INFO':INFO, 'WARNING':WARNING, 'ERROR':ERROR, 'CRITICAL':CRITICAL} assert loglevel in levels.keys() or loglevel == None, 'log level must be one of ' + str(levels) # set loglevel based on verbosity/quiet args if loglevel is not None: loglevel = levels[loglevel] elif verbose is True and quiet is False: loglevel = DEBUG elif quiet is True and verbose is False: loglevel = CRITICAL else: loglevel = INFO # logger formatter = Formatter('%(name)s - %(message)s') self.log = getLogger('blacklistparser') self.log.setLevel(loglevel) # setup console logger self.console_log = StreamHandler() self.console_log.setFormatter(formatter) self.console_log.setLevel(loglevel) self.log.addHandler(self.console_log) self.log.debug('Added StreamHandler() console logging') # alternative logging types below if logpath is None: self.log.debug('log path not specified') elif types.base_path_type(logpath) is not None: # setup disk log self.disk_log = WatchedFileHandler(logpath) self.disk_log.setFormatter(formatter) self.disk_log.setLevel(loglevel) self.log.addHandler(self.disk_log) self.log.debug('Added WatchedFileHandler() logging') if syslog: # setup syslog # this is using /dev/log socket that is very (linux/openbsd) # platform dependant, should add some os detection logic here # and an argument to log to remote syslog server/port self.sys_log_handler = SysLogHandler(address='/dev/log') self.sys_log_handler.setFormatter(formatter) self.sys_log_handler.setLevel(loglevel) self.log.addHandler(self.sys_log_handler) self.log.debug('Added SysLogHandler() logging') # log setup success/fail msg at DEBUG level debugmsg = ('setting log level to ' + str(loglevel)) self.log.debug(debugmsg)
def init(config_file): global logger global config log_file = None verbose = False n = 0 count = len(sys.argv) while n < count: arg = sys.argv[n] if arg.startswith('--'): buf = arg[2:] at = buf.find('=') if at != -1: var = buf[:at] val = buf[at + 1:] else: var = buf val = None del sys.argv[n] count -= 1 if var == 'config': config_file = val elif var == 'logfile': log_file = val elif var == 'verbose': verbose = True else: n += 1 logger = logging.getLogger('app') if log_file: logger_handler = WatchedFileHandler(log_file) else: logger_handler = logging.StreamHandler(stream=sys.stdout) if verbose: logger.setLevel(logging.DEBUG) logger_handler.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) logger_handler.setLevel(logging.INFO) formatter = logging.Formatter( fmt='%(levelname)s %(asctime)s.%(msecs)03d %(message)s', datefmt='%Y-%m-%d %H:%M:%S') logger_handler.setFormatter(formatter) logger.addHandler(logger_handler) config = ConfigParser.ConfigParser() config.read([config_file])
def get_scripts_logger(filename='/tmp/log.txt', level=logging.DEBUG): log_formatter = logging.Formatter( "%(asctime)s[%(levelname)s][%(name)s][%(module)s-%(lineno)s]-%(process)d %(message)s" ) root_logger = logging.getLogger('app') console_handler = logging.StreamHandler(sys.stdout) console_handler.setLevel(level) console_handler.setFormatter(log_formatter) root_logger.handlers = [] root_logger.addHandler(console_handler) file_handler = WatchedFileHandler(filename) file_handler.setFormatter(log_formatter) file_handler.setLevel(level) root_logger.addHandler(file_handler) root_logger.setLevel(level=level)
def get_logger(self, name): logger = logging.getLogger(name) if not logger.handlers: logger.setLevel(logging.INFO) # create formatter and handler formatter = logging.Formatter('%(asctime)s;%(message)s') handler = WatchedFileHandler(os.path.join(log_file_path, name)) # combine handler.setLevel(logging.INFO) handler.setFormatter(formatter) logger.addHandler(handler) return logger
def init_statuslog(self): self.statuslog = logging.getLogger('statuslog') self.statuslog.setLevel(logging.DEBUG) statuslog_file_handler = WatchedFileHandler("%s/main-status.log" % (self.config.LOG_DIR)) statuslog_file_handler.setFormatter( logging.Formatter('%(asctime)s %(levelname)s: %(message)s ')) statuslog_file_handler.setLevel(logging.INFO) self.statuslog.addHandler(statuslog_file_handler) statuslog_stream_handler = logging.StreamHandler() statuslog_stream_handler.setFormatter( logging.Formatter('%(asctime)s %(levelname)s: %(message)s ')) statuslog_stream_handler.setLevel(logging.DEBUG) self.statuslog.addHandler(statuslog_stream_handler)
def get_logger(name): logger = logging.getLogger(name) format = logging.Formatter("[%(levelname)s] %(asctime)s - %(name)s - %(message)s", datefmt="%m-%d-%Y %H:%M:%S") logger.propagate = False if PRINT_STDOUT: handler = logging.StreamHandler() handler.setLevel(LOG_LEVEL) handler.setFormatter(format) logger.addHandler(handler) handler = WatchedFileHandler(LOG_FILE) handler.setLevel(LOG_LEVEL) handler.setFormatter(format) logger.addHandler(handler) return logger
def init(config_file): global logger global config log_file = None verbose = False n = 0 count = len(sys.argv) while n < count: arg = sys.argv[n] if arg.startswith('--'): buf = arg[2:] at = buf.find('=') if at != -1: var = buf[:at] val = buf[at + 1:] else: var = buf val = None del sys.argv[n] count -= 1 if var == 'config': config_file = val elif var == 'logfile': log_file = val elif var == 'verbose': verbose = True else: n += 1 logger = logging.getLogger('app') if log_file: logger_handler = WatchedFileHandler(log_file) else: logger_handler = logging.StreamHandler(stream=sys.stdout) if verbose: logger.setLevel(logging.DEBUG) logger_handler.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) logger_handler.setLevel(logging.INFO) formatter = logging.Formatter(fmt='%(levelname)s %(asctime)s.%(msecs)03d %(message)s', datefmt='%Y-%m-%d %H:%M:%S') logger_handler.setFormatter(formatter) logger.addHandler(logger_handler) config = ConfigParser.ConfigParser() config.read([config_file])
def get_logger(name): logger = logging.getLogger(name) format = logging.Formatter('[%(levelname)s] %(asctime)s - %(name)s - %(message)s', datefmt='%m-%d-%Y %H:%M:%S') logger.propagate = False if PRINT_STDOUT: handler = logging.StreamHandler() handler.setLevel(LOG_LEVEL) handler.setFormatter(format) logger.addHandler(handler) handler = WatchedFileHandler(LOG_FILE) handler.setLevel(LOG_LEVEL) handler.setFormatter(format) logger.addHandler(handler) return logger
def _get_logger(name=settings.logger_filename()): logger = logging.getLogger(name) if not logger.handlers: logger.setLevel(logging.INFO) # create formatter and handler formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') path = settings.logger_file() handler = WatchedFileHandler(path) # combine handler.setLevel(logging.INFO) handler.setFormatter(formatter) logger.addHandler(handler) return logger
def _create_handlers(filename=LOG_FILEPATH, level=LOG_LEVEL): formatter = logging.Formatter(format) # WatchedFileHandler watches the file it is logging to. # If the file changes, it is closed and reopened using the file name. file_handler = WatchedFileHandler(filename) file_handler.setFormatter(formatter) file_handler.setLevel(level) # Used by internal log monitoring applications syslog_handler = SysLogHandler(facility=LOG_LOCAL3) syslog_handler.setFormatter(formatter) syslog_handler.setLevel(level) global handlers handlers = [file_handler, syslog_handler] return handlers
def _init_logger(self, request_info): # Init logger lazily when application info is available app_id = request_info['appId'] service_id = request_info['serviceName'] version_id = request_info['versionName'] port = request_info['port'] # Prepare filename filename = self.FILENAME_TEMPLATE.format(app=app_id, service=service_id, version=version_id, port=port) # Initialize logger formatter = logging.Formatter('%(message)s') file_handler = WatchedFileHandler(filename) file_handler.setFormatter(formatter) file_handler.setLevel(logging.INFO) self._logger = logging.Logger('request-logger', logging.INFO) self._logger.addHandler(file_handler)
def __create_producer_logger(self, producer, info): self.log('Inside Server.__create_producer_logger, Producer: {0}, Info: {1}'.format(producer, info), level=logging.DEBUG) logger = logging.getLogger(producer) logger.setLevel(logging.DEBUG) logger.propagate = False # We'll use a WatchedFileHandler and utilize some external application to rotate the logs periodically logFilePath = os.path.join(settings.config['logdir'], '{0}.log'.format(producer)) handler = WatchedFileHandler(logFilePath) handler.setLevel(info['logging.level']) formatter = logging.Formatter(fmt='%(message)s') handler.setFormatter(formatter) logger.addHandler(handler) handler = logging.StreamHandler() handler.setLevel(logging.DEBUG) formatter = logging.Formatter(fmt='%(message)s') handler.setFormatter(formatter) logger.addHandler(handler)
def register_loggers(app): if app.debug: return import logging from logging.handlers import WatchedFileHandler log_fname = app.config['DOORMAN_LOGGING_FILENAME'] if log_fname == 'sys.stdout': handler = logging.StreamHandler(log_fname) else: handler = WatchedFileHandler(log_fname) levelname = app.config['DOORMAN_LOGGING_LEVEL'] if levelname in ('DEBUG', 'INFO', 'WARN', 'WARNING', 'ERROR', 'CRITICAL'): handler.setLevel(getattr(logging, levelname)) formatter = logging.Formatter(app.config['DOORMAN_LOGGING_FORMAT']) handler.setFormatter(formatter) app.logger.addHandler(handler)
def init(cls, config): formatter = logging.Formatter(config.FORMAT) root = logging.getLogger('') root.setLevel(config.ROOT_LEVEL) if config.LOG_TO_CONSOLE: console_handler = logging.StreamHandler() console_handler.setLevel(config.CONSOLE_LEVEL) console_handler.setFormatter(formatter) root.addHandler(console_handler) if config.LOG_TO_FILE: file_name = cls.generate_log_file_name() file_handler = WatchedFileHandler( os.path.join(config.FILE_BASE, file_name), encoding='utf-8' ) file_handler.setLevel(config.FILE_LEVEL) file_handler.setFormatter(formatter) root.addHandler(file_handler)
class TextLogger(Logger): '''Logs messages with plain text. Rotating-friendly.''' FORMAT = '%(asctime)s [%(protocol)s:%(pid)s] %(srcname)s >> %(text)s' def __init__(self, filename, tz=timezone.utc): self.loghandler = WatchedFileHandler(filename, encoding='utf-8', delay=True) self.loghandler.setLevel(logging.INFO) self.loghandler.setFormatter(logging.Formatter('%(message)s')) self.tz = tz def log(self, msg: Message): d = msg._asdict() d['asctime'] = datetime.fromtimestamp(msg.time, self.tz).strftime('%Y-%m-%d %H:%M:%S') d['srcname'] = msg.src.alias d['srcid'] = msg.src.id self.loghandler.emit(logging.makeLogRecord({'msg': self.FORMAT % d})) def commit(self): pass
def main(args): log = logging.getLogger(APP_NAME) log.setLevel(args.log_level) formatter = logging.Formatter( "%(asctime)s %(levelname)-7s %(name)s|%(message)s") ch = logging.StreamHandler() if args.log_path is None: ch.setLevel(args.log_level) else: fh = WatchedFileHandler(args.log_path) fh.setLevel(args.log_level) fh.setFormatter(formatter) log.addHandler(fh) ch.setLevel(logging.WARNING) ch.setFormatter(formatter) log.addHandler(ch) loop = asyncio.SelectorEventLoop() asyncio.set_event_loop(loop) down = asyncio.Queue(loop=loop) up = asyncio.Queue(loop=loop) tok = token(args.connect, APP_NAME) node = create_udp_node(loop, tok, down, up) loop.create_task(node(token=tok)) log.info("Starting router node...") try: loop.run_forever() except KeyboardInterrupt: for task in asyncio.Task.all_tasks(loop=loop): task.cancel() for resource in resources: resource.close() finally: loop.close()
class TextLogger(Logger): """Logs messages with plain text. Rotating-friendly.""" FORMAT = "%(asctime)s [%(protocol)s:%(pid)s] %(srcname)s >> %(text)s" def __init__(self, filename, tz=timezone.utc): self.loghandler = WatchedFileHandler(filename, encoding="utf-8", delay=True) self.loghandler.setLevel(logging.INFO) self.loghandler.setFormatter(logging.Formatter("%(message)s")) self.tz = tz def log(self, msg: Message): d = msg._asdict() d["asctime"] = datetime.fromtimestamp(msg.time, self.tz).strftime("%Y-%m-%d %H:%M:%S") d["srcname"] = msg.src.alias d["srcid"] = msg.src.id self.loghandler.emit(logging.makeLogRecord({"msg": self.FORMAT % d})) def commit(self): pass
def initialize_logging(name="unknown"): """Initializes the logging module. This initializes pythons logging module: * set loglevel (from nodes config) * set logfile * define new loglevel BAN with priority 90 * format log messages * log to file as well as stderr Kwargs: name (string): name of the module initializing the logger """ c = config.Config() if not c.logLevel: c.logLevel = logging.DEBUG if not c.logFile: c.logFile = "/dev/null" logging.BAN = 90 logging.addLevelName(logging.BAN, 'BAN') logger = logging.getLogger(name) logger.ban = lambda msg, *args: logger._log(logging.BAN, msg, args) formatter = logging.Formatter('%(asctime)s - %(name)s\t%(levelname)s\t%(message)s') logger.setLevel(c.logLevel) if name == "fail2ban-p2p": # Only add new handlers when called from main.py try: log2file = WatchedFileHandler(c.logFile) log2file.setFormatter(formatter) log2file.setLevel(c.logLevel) logger.addHandler(log2file) except: print("--- WARNING --- LOGFILE " + c.logFile + " IS EITHER NONEXISTENT OR NOT WRITABLE") log2stderr = logging.StreamHandler(sys.stderr) log2stderr.setFormatter(formatter) log2stderr.setLevel(c.logLevel) logger.addHandler(log2stderr) return logger
def main(args): log = logging.getLogger("cloudhands.web.indexer") log.setLevel(args.log_level) formatter = logging.Formatter( "%(asctime)s %(levelname)-7s %(name)s|%(message)s") ch = logging.StreamHandler() if args.log_path is None: ch.setLevel(args.log_level) else: fh = WatchedFileHandler(args.log_path) fh.setLevel(args.log_level) fh.setFormatter(formatter) log.addHandler(fh) ch.setLevel(logging.WARNING) ch.setFormatter(formatter) log.addHandler(ch) provider, config = next(iter(settings.items())) loop = sched.scheduler() try: os.mkdir(args.index) except OSError: pass if args.query is not None: for p in people(args.index, args.query): print(p) return 0 if args.interval is None: return 0 if ingest(args, config) > 0 else 1 else: loop.enter(args.interval, 0, ingest, (args, config, loop)) loop.run() return 1
def prepare_service_logger(): logger = logging.getLogger('TelegramBot') logger.setLevel(logging.DEBUG) logger.propagate = False ch = WatchedFileHandler(filename=os.path.join(LOG_DIR, 'service.log')) ch.setFormatter(ServiceClientFormatter(fmt="{asctime} | {action} | {method} {full_url} | {message}", request_fmt="\nHeaders:\n{headers}\nBody:\n{body}", response_fmt=" | {status_code} {status_text} | " "{headers_elapsed}\nHeaders:\n{headers}\nBody:\n{body}", exception_fmt=" | {exception_repr}", parse_exception_fmt=" | {status_code} {status_text} | " "{headers_elapsed} | {exception_repr}\nHeaders:\n" "{headers}\nBody:\n{body}", headers_fmt="\t{name}: {value}", headers_sep="\n", datefmt="%Y-%m-%dT%H:%M:%S%z", style='{')) ch.setLevel(logging.DEBUG) logger.addHandler(ch) return logger
def main(args): log = logging.getLogger("maloja") log.setLevel(args.log_level) formatter = logging.Formatter( "%(asctime)s %(levelname)-7s %(name)s|%(message)s") ch = logging.StreamHandler() if args.log_path is None: ch.setLevel(args.log_level) else: fh = WatchedFileHandler(args.log_path) fh.setLevel(args.log_level) fh.setFormatter(formatter) log.addHandler(fh) ch.setLevel(logging.WARNING) ch.setFormatter(formatter) log.addHandler(ch) return report(args.design)
def _configure_logging(self): """This method configures the self.log entity for log handling. :return: None The method will cognate_configure the logging facilities for the derive service instance. This includes setting up logging to files and console. The configured log will be available to the service instance with `self.log` """ self.log_level = ComponentCore.LOG_LEVEL_MAP.get(self.log_level, logging.ERROR) # assign the windmill instance logger self.log = logging.getLogger(self.service_name) self.log.setLevel(self.log_level) # cognate_configure log file output if necessary if self.log_path: file_path = self.log_path if not self.log_path.endswith('.log'): file_path = os.path.join(self.log_path, self.service_name + '.log') file_handler = WatchedFileHandler(file_path) file_handler.setLevel(self.log_level) file_handler.setFormatter(self._log_formatter()) self.log.addHandler(file_handler) # if we are in verbose mode, the we send log output to console if self.verbose: # add the console logger for verbose mode console_handler = logging.StreamHandler() console_handler.setLevel(self.log_level) console_handler.setFormatter(self._log_formatter()) self.log.addHandler(console_handler) self.log.info('Logging configured for: %s', self.service_name)
def setup_log(): if not app.debug: # Send critical message by email mail_handler = SMTPHandler( "127.0.0.1", "copr-fe-error@{0}".format( app.config["SERVER_NAME"] or "fedorahosted.org"), app.config.get("SEND_LOGS_TO"), "Yay, error in copr frontend occured!") mail_handler.setFormatter(mail_error_formatter) mail_handler.setLevel(logging.CRITICAL) app.logger.addHandler(mail_handler) # store all logs to the file log log_filename = app.config.get("LOG_FILENAME") handler = WatchedFileHandler(log_filename) handler.setFormatter(default_formatter) log_level = app.config.get("LOGGING_LEVEL", logging.INFO) handler.setLevel(log_level) app.logger.addHandler(handler) app.logger.info("logging configuration finished, config: {}".format(app.config))
def setup_logfile_logger(log_path, log_level=None, log_format=None, date_format=None): """ Set up logging to a file. """ # Create the handler handler = WatchedFileHandler(log_path) if log_level: # Grab and set the level level = LOG_LEVELS.get(log_level.lower(), logging.ERROR) handler.setLevel(level) # Set the default console formatter config if not log_format: log_format = '%(asctime)s [%(name)-15s][%(levelname)-8s] %(message)s' if not date_format: date_format = '%Y-%m-%d %H:%M:%S' formatter = logging.Formatter(log_format, datefmt=date_format) handler.setFormatter(formatter) root_logger.addHandler(handler) return handler
from logging.handlers import WatchedFileHandler CURRENT_FOLDER = os.path.dirname(os.path.abspath(__file__)) PROJECT_PATH = os.path.dirname(os.path.dirname(CURRENT_FOLDER)) LOG_PATH = os.path.join(PROJECT_PATH, "log") if not os.path.exists(LOG_PATH): os.makedirs(LOG_PATH) LOG_FORMAT = '%(asctime)s - %(filename)s - %(lineno)d - %(levelname)s - %(message)s' formatter = logging.Formatter(LOG_FORMAT) debug_log_handler = logging.StreamHandler() debug_log_handler.setLevel(logging.DEBUG) debug_log_handler.setFormatter(formatter) info_log_handler = WatchedFileHandler(os.path.join(LOG_PATH, "info.log")) info_log_handler.setLevel(logging.INFO) info_log_handler.setFormatter(formatter) error_log_handler = WatchedFileHandler(os.path.join(LOG_PATH, "error.log")) error_log_handler.setLevel(logging.ERROR) error_log_handler.setFormatter(formatter) LOGGER = logging.getLogger("project_name") LOGGER.setLevel(logging.DEBUG) LOGGER.addHandler(debug_log_handler) LOGGER.addHandler(info_log_handler) LOGGER.addHandler(error_log_handler)
from geocron.web import application from geocron import settings if not settings.DEBUG: import logging from logging.handlers import SMTPHandler mail_handler = SMTPHandler(settings.SMTP_HOST, '*****@*****.**', settings.ADMINS, 'Our application failed', (settings.SMTP_USER, settings.SMTP_PASSWORD)) mail_handler.setLevel(logging.ERROR) application.logger.addHandler(mail_handler) from logging.handlers import WatchedFileHandler file_handler = WatchedFileHandler("/tmp/geocron.log") file_handler.setLevel(logging.WARNING) application.logger.addHandler(file_handler) application.secret_key = settings.SECRET_KEY applications = { '/': application, }
def __init__( self, level=logging.DEBUG, name=None, logdir="./", stdout=True, multiFile=False, post=False, ws="www-qa.coraid.com", ): self.logdir = logdir self.ws = ws self.instance = os.environ.get("instance") or "" self.level = level logging.addLevelName(COMMENT, "COMMENT") # Root Logger self.logger = logging.getLogger("otto" + self.instance) self.logger.addHandler(logging.NullHandler()) """ Root Logger Threshold is WARNING by default. We will set the threshold as low as possible """ self.logger.setLevel(DEBUG) """ The STDOUT handler will use the logger default threshold for printing. If the level is set to INFO the STDOUT should only display INFO messages and greater """ if stdout: StdOutHandler = logging.StreamHandler(sys.stdout) StdOutHandler._name = "STDOUT" StdOutHandler.setLevel(level) StdOutHandler.setFormatter(Dispatcher()) self.logger.addHandler(StdOutHandler) if name is None: frame = inspect.stack()[1] name = inspect.getfile(frame[0]).split("/")[-1].split(".py")[0] logFileBase = self.logdir + name + "-" + time.strftime("%Y%m%d_%H%M") """ The Full log will contain every level of output and will be created in any configuration for use when posting the log to the web server. """ fullLogFile = logFileBase + "_FULL.log" self.fullLogFile = fullLogFile FullLogFileHandler = WatchedFileHandler(fullLogFile) FullLogFileHandler.setLevel(level) FullLogFileHandler._name = "LogFile-FULL" FullLogFileHandler.setFormatter(Dispatcher()) self.logger.addHandler(FullLogFileHandler) """ In the case of multiFile = True: Create a FileHandler for each level and attatch the appropriate level name to the file suffix Then set a filter on each handler to return only the appropriate level per file """ if multiFile: # Set up filename variables debugLogFile = logFileBase + "_DEBUG.log" commentLogFile = logFileBase + "_COMMENT.log" infoLogFile = logFileBase + "_INFO.log" warningLogFile = logFileBase + "_WARNING.log" errorLogFile = logFileBase + "_ERROR.log" # Create FileHandler objects DebugFileHandler = WatchedFileHandler(debugLogFile) DebugFileHandler._name = "LogFile-DEBUG" CommentFileHandler = WatchedFileHandler(commentLogFile) CommentFileHandler._name = "LogFile-COMMENT" InfoFileHandler = WatchedFileHandler(infoLogFile) InfoFileHandler._name = "LogFile-INFO" WarningFileHandler = WatchedFileHandler(warningLogFile) WarningFileHandler._name = "LogFile-WARNING" ErrorFileHandler = WatchedFileHandler(errorLogFile) ErrorFileHandler._name = "LogFile-ERROR" # Add filters at corresponding levels DebugFileHandler.addFilter(LogFilter(DEBUG)) CommentFileHandler.addFilter(LogFilter(COMMENT)) InfoFileHandler.addFilter(LogFilter(INFO)) WarningFileHandler.addFilter(LogFilter(WARNING)) ErrorFileHandler.addFilter(LogFilter(ERROR)) # Add format Dispatcher DebugFileHandler.setFormatter(Dispatcher()) CommentFileHandler.setFormatter(Dispatcher()) InfoFileHandler.setFormatter(Dispatcher()) WarningFileHandler.setFormatter(Dispatcher()) ErrorFileHandler.setFormatter(Dispatcher()) # Add handlers to root logger self.logger.addHandler(DebugFileHandler) self.logger.addHandler(CommentFileHandler) self.logger.addHandler(InfoFileHandler) self.logger.addHandler(WarningFileHandler) self.logger.addHandler(ErrorFileHandler)
def main(args): logging.getLogger("asyncio").setLevel(args.log_level) log = logging.getLogger("cloudhands.burst") log.setLevel(args.log_level) formatter = logging.Formatter( "%(asctime)s %(levelname)-7s %(name)s|%(message)s") ch = logging.StreamHandler() if args.log_path is None: ch.setLevel(args.log_level) else: fh = WatchedFileHandler(args.log_path) fh.setLevel(args.log_level) fh.setFormatter(formatter) log.addHandler(fh) ch.setLevel(logging.WARNING) ch.setFormatter(formatter) log.addHandler(ch) portalName, config = next(iter(settings.items())) loop = asyncio.get_event_loop() msgQ = asyncio.Queue(loop=loop) if args.log_level == logging.DEBUG: try: loop.set_debug(True) except AttributeError: log.info("Upgrade to Python 3.4.2 for asyncio debug mode") else: log.info("Event loop debug mode is {}".format(loop.get_debug())) workers = [] for agentType in ( AcceptedAgent, PreCheckAgent, PreDeleteAgent, PreOperationalAgent, PreProvisionAgent, PreStartAgent, PreStopAgent, ProvisioningAgent, SessionAgent, # TODO: SubscriptionAgent ): workQ = agentType.queue(args, config, loop=loop) agent = agentType(workQ, args, config) for typ, handler in agent.callbacks: message_handler.register(typ, handler) workers.append(agent) try: loop.run_until_complete(operate(loop, msgQ, workers, args, config)) except KeyboardInterrupt: # TODO: Task audit pass except Exception as e: log.error(e) finally: for agent in workers: try: agent.work.close() except AttributeError: continue except Exception as e: log.error(e) loop.close() return 0
def create_app(config_filename=None): from db import db_manager from messaging import message_hub import db.campaign # noqa #from authentication import login_manager app = Flask(__name__, instance_relative_config=True) app.jinja_env.add_extension('pyjade.ext.jinja.PyJadeExtension') app.config.from_pyfile("default_settings.py") if config_filename is not None: app.config.from_pyfile(config_filename) app.config.from_envvar('MADACRA_SERVER_SETTINGS', silent=True) if not app.debug: file_handler = WatchedFileHandler(app.config["LOG_FILENAME"]) file_handler.setLevel(logging.WARNING) app.logger.addHandler(file_handler) assets = Environment(app) js_lib_assets = Bundle( "scripts/jquery-1.7.2.min.js", #"scripts/jquery-ui-1.8.16.custom.min.js", "scripts/angular-1.0.1.min.js", "scripts/angular-cookies-1.0.1.min.js", "scripts/sugar-1.2.5.min.js", "scripts/socket.io.min.js", #"scripts/jquery.couch.js", Bundle( "scripts/bootstrap.js", filters=["rjsmin", ], ), output="madacra_lib.js", ) js_app_assets = Bundle( "lib/*.coffee", "lib/controls/*.coffee", filters=["coffeescript", "rjsmin"], output="madacra_app.js", ) js_unit_test_assets = Bundle( "scripts/jasmine.js", "scripts/jasmine-html.js", "scripts/angular-mocks-1.0.1.js", Bundle( "tests/unit/*.coffee", filters=["coffeescript", ], ), #filters=["rjsmin", ], output="madacra_unit_test.js", ) js_e2e_test_assets = Bundle( "scripts/angular-scenario-1.0.1.js", Bundle( "tests/e2e/*.coffee", filters=["coffeescript", ], ), #filters=["rjsmin", ], output="madacra_e2e_test.js", ) css_unit_test_assets = Bundle( "stylesheets/jasmine.css", filters=["cssmin", ], output="madacra_unit_test.css", ) #css_lib_assets = Bundle( ##"stylesheets/jquery-ui-1.8.16.custom.css", #filters=["cssmin", ], #output="madacra_lib.css", #) css_app_assets = Bundle( "stylesheets/madacra.less", depends=[ "stylesheets/colors.less", "stylesheets/controls.less", "stylesheets/utils.less", ], filters=["less", "cssmin"], output="madacra_app.css", ) assets.register("js_lib", js_lib_assets) assets.register("js_app", js_app_assets) #assets.register("css_lib", css_lib_assets) assets.register("css_app", css_app_assets) assets.register("css_unit_tests", css_unit_test_assets) assets.register("js_e2e_tests", js_e2e_test_assets) assets.register("js_unit_tests", js_unit_test_assets) db_manager.init_app(app) if not message_hub.started: message_hub.start() #login_manager.init_app(app) from views.index import index_blueprint from views.socket import socketio_blueprint app.register_blueprint(index_blueprint) app.register_blueprint(socketio_blueprint, url_prefix="/socket.io") if app.testing: from testing import create_fixtures app.logger.debug(u"Creating fixtures...") create_fixtures(app) from views.tests import tests_blueprint app.register_blueprint(tests_blueprint, url_prefix="/tests") from messaging import MessageDebugLogger message_logger = MessageDebugLogger.from_hub(message_hub, logger=app.logger) message_logger.start() #app.register_blueprint(blueprint_frontend) #app.register_blueprint(blueprint_entry, url_prefix="/entry") #app.register_blueprint(blueprint_filter, url_prefix="/filter") #app.register_blueprint(blueprint_pinboard, url_prefix="/pinboards") return app
def main(args): log = logging.getLogger("maloja") log.setLevel(args.log_level) formatter = logging.Formatter( "%(asctime)s %(levelname)-7s %(name)s|%(message)s") ch = logging.StreamHandler() if args.log_path is None: ch.setLevel(args.log_level) else: fh = WatchedFileHandler(args.log_path) fh.setLevel(args.log_level) fh.setFormatter(formatter) log.addHandler(fh) ch.setLevel(logging.WARNING) ch.setFormatter(formatter) log.addHandler(ch) asyncio = None # TODO: Tox testing try: loop = asyncio.SelectorEventLoop() asyncio.set_event_loop(loop) operations = asyncio.Queue(loop=loop) results = asyncio.Queue(loop=loop) except AttributeError: loop = None operations = queue.Queue() results = queue.Queue() os.makedirs(args.output, exist_ok=True) try: path, proj = find_project(args.output) log.info("Using project {0}.".format(path.project)) except StopIteration: log.info("No projects detected.") path, proj = make_project(args.output) log.info("Created {0}.".format(path.project)) maloja.broker.handler.register( Survey, maloja.surveyor.Surveyor.survey_handler ) maloja.broker.handler.register( Design, maloja.builder.Builder.design_handler ) maloja.broker.handler.register( Inspection, maloja.inspector.Inspector.inspection_handler ) args.url = args.url.rstrip(" /") if not args.command: console = maloja.console.create_console(operations, results, args, path, loop=loop) results = [ i.result() for i in concurrent.futures.as_completed(set(console.tasks.values())) if i.done() ] return 0 elif args.command == "plan": with open(args.input, "r") as data: return maloja.planner.report(data) # Other commands require a broker broker = maloja.broker.create_broker(operations, results, max_workers=64, loop=loop) reply = None while not isinstance(reply, Token): password = getpass.getpass(prompt="Enter your API password: "******"survey": operations.put((1, Survey(path))) elif args.command == "build": objs = [] with open(args.input, "r") as data: objs = list(maloja.planner.read_objects(data.read())) objs = maloja.planner.check_objects(objs) if not objs: log.warning("Design failed object check. Please wait...") else: operations.put((1, Design(objs))) elif args.command == "inspect": objs = [] with open(args.input, "r") as data: objs = list(maloja.planner.read_objects(data.read())) objs = maloja.planner.check_objects(objs) operations.put((1, Inspection(args.name, objs))) while not isinstance(reply, Stop): try: status, reply = results.get(block=True, timeout=30) except queue.Empty: break else: if isinstance(status, Status): level = os.path.splitext(getattr(status.path, "file", ""))[0] log.info("{0:^10} update {1.job:04}".format(level, status)) if reply is not None: log.info(reply) time.sleep(0) operations.put((2, Stop())) time.sleep(1) done, not_done = tasks = concurrent.futures.wait( set(broker.tasks.values()), timeout=6, return_when=concurrent.futures.FIRST_EXCEPTION ) for task in not_done: log.warning("Not completed: {0}".format(task)) log.debug(task.cancel()) return 0
def _setup_logging(): if not app.debug: file_handler = WatchedFileHandler(config.deployment.log_path) file_handler.setLevel(logging.WARNING) app.logger.addHandler(file_handler)