def log_handler(args, thread_wrapping=True): """ Return log handler with given config """ if not isinstance(args, dict): args = vars(args) if args.get("quiet"): stderr_handler = ColorizedStderrHandler(level="ERROR") elif args.get("verbose"): stderr_handler = ColorizedStderrHandler(level="DEBUG") else: stderr_handler = ColorizedStderrHandler(level=args.get( "stderr_level", "NOTICE").upper(), bubble=True) if args.get("log_file"): file_handler = FileHandler(args.get("log_file"), level=args.get("log_file_level", "DEBUG").upper(), bubble=True) else: file_handler = NullHandler() if thread_wrapping: file_handler = ThreadedWrapperHandler(file_handler) stderr_handler = ThreadedWrapperHandler(stderr_handler) return NestedSetup([ NullHandler(), # catch everything else file_handler, stderr_handler ])
def defLogging(): global debug global logPath global loggingLevel global logging_setup try: if debug: logging_setup = NestedSetup([ # make sure we never bubble up to the stderr handler # if we run out of setup handling NullHandler(), StreamHandler( sys.stdout, bubble=False, level=loggingLevel ), TimedRotatingFileHandler( logPath, level=0, backup_count=3, bubble=True, date_format='%Y-%m-%d', ), ]) else: logging_setup = NestedSetup([ # make sure we never bubble up to the stderr handler # if we run out of setup handling NullHandler(), FingersCrossedHandler( TimedRotatingFileHandler( logPath, level=0, backup_count=3, bubble=False, date_format='%Y-%m-%d', ), action_level=ERROR, buffer_size=1000, # pull_information=True, # reset=False, ) ]) except (KeyboardInterrupt, SystemExit): raise except: print("Critical error attempting to setup logging. Falling back to console only.") logging_setup = NestedSetup([ # make sure we never bubble up to the stderr handler # if we run out of setup handling NullHandler(), StreamHandler( sys.stdout, bubble=False ) ])
def main(args): quiet = False if len(args) > 0 and args[0] == 'build' and '--strict' in args: LOGGER.notice('Running in strict mode') STRICT_HANDLER.push_application() if len(args ) > 0 and args[0] == 'build' and '-q' in args or '--quiet' in args: nullhandler = NullHandler() nullhandler.push_application() quiet = True global config root = get_root_dir() if root: os.chdir(root) sys.path.append('') try: import conf _reload(conf) config = conf.__dict__ except Exception: if os.path.exists('conf.py'): msg = traceback.format_exc(0).splitlines()[1] LOGGER.error('In conf.py line {0}: {1}'.format( sys.exc_info()[2].tb_lineno, msg)) sys.exit(1) config = {} site = Nikola(**config) return DoitNikola(site, quiet).run(args)
def run(): out = StringIO() with NullHandler(): with StreamHandler(out, filter=lambda r, h: False) as handler: for x in xrange(500): log.warning('this is not handled') assert not out.getvalue()
def configure_logging(log_level=None, log_file=None, simplified_console_logs=False): """ This should be called once as early as possible in app startup to configure logging handlers and formatting. :param log_level: The level at which to record log messages (DEBUG|INFO|NOTICE|WARNING|ERROR|CRITICAL) :type log_level: str :param log_file: The file to write logs to, or None to disable logging to a file :type log_file: str | None :param simplified_console_logs: Whether or not to use the simplified logging format and coloring :type simplified_console_logs: bool """ # Set datetimes in log messages to be local timezone instead of UTC logbook.set_datetime_format('local') # Redirect standard lib logging to capture third-party logs in our log files (e.g., tornado, requests) logging.root.setLevel( logging.WARNING ) # don't include DEBUG/INFO/NOTICE-level logs from third parties logbook.compat.redirect_logging(set_root_logger_level=False) # Add a NullHandler to suppress all log messages lower than our desired log_level. (Otherwise they go to stderr.) NullHandler().push_application() log_level = log_level or Configuration['log_level'] format_string, log_colors = _LOG_FORMAT_STRING, _LOG_COLORS if simplified_console_logs: format_string, log_colors = _SIMPLIFIED_LOG_FORMAT_STRING, _SIMPLIFIED_LOG_COLORS # handler for stdout log_handler = _ColorizingStreamHandler( stream=sys.stdout, level=log_level, format_string=format_string, log_colors=log_colors, bubble=True, ) log_handler.push_application() # handler for log file if log_file: fs.create_dir(os.path.dirname(log_file)) previous_log_file_exists = os.path.exists(log_file) event_handler = _ColorizingRotatingFileHandler( filename=log_file, level=log_level, format_string=_LOG_FORMAT_STRING, log_colors=_LOG_COLORS, bubble=True, max_size=Configuration['max_log_file_size'], backup_count=Configuration['max_log_file_backups'], ) event_handler.push_application() if previous_log_file_exists: # Force application to create a new log file on startup. event_handler.perform_rollover(increment_logfile_counter=False) else: event_handler.log_application_summary()
def run(): out = StringIO() with NullHandler(): with CustomStreamHandler(out) as handler: for x in xrange(500): log.warning('this is not handled') assert not out.getvalue()
def run(): f = NamedTemporaryFile() out = StringIO() with NullHandler(): with StreamHandler(out, level=WARNING): with FileHandler(f.name, level=ERROR): for x in xrange(100): list(Handler.stack_manager.iter_context_objects())
def __init__(self, f=sys.stdout, level="info"): frmt = '{record.message}' if level == "info": self.debug = NullHandler(level=DEBUG) self.info = StreamHandler(f, level=INFO, format_string=frmt) else: self.debug = StreamHandler(f, level=DEBUG, format_string=frmt) self.info = None
def setUpClass(cls): # Set up connection to Redis testconn = find_empty_redis_database() push_connection(testconn) # Store the connection (for sanity checking) cls.testconn = testconn # Shut up logbook cls.log_handler = NullHandler() cls.log_handler.push_thread()
def main(): handler = StderrHandler() # handler.formatter = color_formatter handler.level = 2 nullhandler = NullHandler() with nullhandler.applicationbound(): with handler.applicationbound(): with catch_exceptions(""): try: dispatch_command(urltomarkdown) except SystemExit as e: # catch_exceptions is a bit too catchy pass
def setup(name, path='log', enable_debug=False): """ Prepare a NestedSetup. :param name: the channel name :param path: the path where the logs will be written :param enable_debug: do we want to save the message at the DEBUG level :return a nested Setup """ path_tmpl = os.path.join(path, '{name}_{level}.log') info = path_tmpl.format(name=name, level='info') warn = path_tmpl.format(name=name, level='warn') err = path_tmpl.format(name=name, level='err') crit = path_tmpl.format(name=name, level='crit') # a nested handler setup can be used to configure more complex setups setup = [ # make sure we never bubble up to the stderr handler # if we run out of setup handling NullHandler(), # then write messages that are at least info to to a logfile TimedRotatingFileHandler(info, level='INFO', encoding='utf-8', date_format='%Y-%m-%d'), # then write messages that are at least warnings to to a logfile TimedRotatingFileHandler(warn, level='WARNING', encoding='utf-8', date_format='%Y-%m-%d'), # then write messages that are at least errors to to a logfile TimedRotatingFileHandler(err, level='ERROR', encoding='utf-8', date_format='%Y-%m-%d'), # then write messages that are at least critical errors to to a logfile TimedRotatingFileHandler(crit, level='CRITICAL', encoding='utf-8', date_format='%Y-%m-%d'), ] if enable_debug: debug = path_tmpl.format(name=name, level='debug') setup.insert(1, TimedRotatingFileHandler(debug, level='DEBUG', encoding='utf-8', date_format='%Y-%m-%d')) if src_server is not None and smtp_server is not None \ and smtp_port != 0 and len(dest_mails) != 0: mail_tmpl = '{name}_error@{src}' from_mail = mail_tmpl.format(name=name, src=src_server) subject = 'Error in {}'.format(name) # errors should then be delivered by mail and also be kept # in the application log, so we let them bubble up. setup.append(MailHandler(from_mail, dest_mails, subject, level='ERROR', bubble=True, server_addr=(smtp_server, smtp_port))) return NestedSetup(setup)
def logging_context(path=None, level=None): from logbook import StderrHandler, FileHandler, NullHandler from logbook.compat import redirected_logging with NullHandler().applicationbound(): with StderrHandler(level=level or 'INFO', bubble=False).applicationbound(): if path: if not os.path.isdir(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) with FileHandler(path, bubble=True).applicationbound(): with redirected_logging(): yield else: with redirected_logging(): yield
def main(config_file, **kwargs): with open(config_file) as fh: config = yaml.load(fh) try: rmq_settings = config["rabbitmq_logging"] except KeyError: print("RabbitMQ logging not configured in {}".format(config_file)) sys.exit() handlers = [NullHandler()] if not kwargs["quiet"]: handlers.append(StderrHandler(bubble=True)) if kwargs["filename"]: handlers.append(FileHandler(kwargs["filename"], bubble=True)) if kwargs["log_db"]: try: cdb_settings = config["couchdb_logging"] except KeyError: print("CouchDB logging not configured in {}".format(config_file)) sys.exit() db_handler = DatabaseHandler(cdb_settings["couchdb_url"], backend=CouchDBBackend, db=cdb_settings["database"], bubble=True) handlers.append(db_handler) setup = NestedSetup(handlers) print("Now waiting for log messages") with setup: subscriber = RabbitMQSubscriber(rmq_settings["url"], queue=rmq_settings["log_queue"]) try: subscriber.dispatch_forever() except KeyboardInterrupt: print("\nLog subscriber shutting down") subscriber.close() except Exception: print("Log subscriber quit (unexpectedly)")
def get_logs_dispatcher(uri=None, debug=False): handlers = [] if not debug: handlers.append(NullHandler(level=DEBUG)) handlers.append(ColorizedStderrHandler(level=INFO)) if not uri: # Find an open port. # This is a race condition as the port could be used between # the check and its binding. However, this is probably one of the # best solution without patching Logbook. tmpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) tmpsock.bind(('localhost', 0)) uri = 'tcp://{}:{}'.format(*tmpsock.getsockname()) tmpsock.close() subscriber = ZeroMQSubscriber(uri, multi=True) return uri, subscriber.dispatch_in_background(setup=NestedSetup(handlers))
def print_check_config(): from pprint import pformat from logbook import NullHandler with NullHandler(): errors = check_config(chk_celery=True) if errors["conf"]: print("Configuration problem:", errors["conf"]) return False import conf if errors["db_read"] or errors["db_write"]: print("Database configuration problem:", errors["db_read"] or errors["db_write"]) print("Database uri:", conf.database.uri) if errors["default_tariff"]: print("Default tariff is not configured") if errors["redis_write"] or errors["redis_read"]: print("Redis configuration problem:", errors["redis_write"] or errors["redis_read"]) print("Redis configuration:\n", pformat(conf.memdb)) if errors["openstack_api"]: print("Openstack API is not available:", errors["openstack_api"]) print("Openstack configuration:", pformat(conf.openstack)) if errors["ceilometer"]: print("Ceilometer is not available:", errors["ceilometer"]) print("Openstack configuration:", pformat(conf.openstack)) if errors["ceilometer"]: print("Ceilometer is not available:", errors["ceilometer"]) print("Openstack configuration:", pformat(conf.openstack)) if errors["celery"]: print("Async tasks don't work:", errors["celery"]) return not any(errors.values())
def __init__(self): # Basic setup self.__basepath = os.path.dirname( os.path.realpath(os.path.realpath( __file__ ) + '/../../' ) ) self.__router = Router() # Load container self.__container = self._create_container() # Setup logging if self.__container.has_service('log.handler'): self.__log_handler = self.__container.get_service('log.handler') else: self.__log_handler = NullHandler() self.__log_handler.push_thread() self.__logger = Logger('MiniMVC') self.__container.set_param('sys.log', self.__logger) # Import application sys.path.append(self.__basepath) import app self.__logger.info('Kernel started')
def main(): parser = argparse.ArgumentParser(description='Extract features') parser.add_argument( '-i', '--input', required=True, help='Raw data input dir' ) parser.add_argument( '-o', '--output', required=True, help='Output dir' ) parser.add_argument( '--filter', default='lowpass', help='Filtering Type' ) parser.add_argument( '--window', type=int, required=True, help='Window length' ) parser.add_argument( '--stride', type=int, required=True, help='Stride length' ) parser.add_argument( '-f', '--featurelist', nargs='+', help='Features to extact', required=True ) parser.add_argument( '--downsample', type=int, default=1, help='Downsample step, default takes no downsample' ) parser.add_argument( '--log', default='info', choices=['debug', 'warning', 'info', 'error'], help='Logging level, default info' ) parser.add_argument( '--dataset', choices=['ninapro-db1', 'ninapro-db2', 'ninapro-db3', 'ninapro-db4', 'ninapro-db5', 'ninapro-db6', 'ninapro-db7', 'biopatrec-db1', 'biopatrec-db2', 'biopatrec-db3', 'biopatrec-db4'], help='Dataset choices', required=True ) args = parser.parse_args() with NullHandler().applicationbound(): with StderrHandler(level=args.log.upper()).applicationbound(): with FileHandler( os.path.join(ensure_dir(args.output), 'log'), level=args.log.upper(), bubble=True ).applicationbound(): try: return run(args) except: log.exception('Failed')
''' if options.debug: savePath_filename = "Pyfa_debug.log" else: savePath_filename = "Pyfa.log" config.logPath = os.path.join(config.savePath, savePath_filename) try: if options.debug: logging_mode = "Debug" logging_setup = NestedSetup([ # make sure we never bubble up to the stderr handler # if we run out of setup handling NullHandler(), StreamHandler(sys.stdout, bubble=False, level=options.logginglevel), TimedRotatingFileHandler( config.logPath, level=0, backup_count=3, bubble=True, date_format='%Y-%m-%d', ), ]) else: logging_mode = "User" logging_setup = NestedSetup([ # make sure we never bubble up to the stderr handler
def get_logger(name, debug=True): logbook.set_datetime_format('local') handler = StreamHandler(sys.stdout) if debug else NullHandler() handler.push_application() return Logger(os.path.basename(name))
def defLogging(): global debug global logPath global loggingLevel global logging_setup try: if debug: logging_setup = NestedSetup([ # make sure we never bubble up to the stderr handler # if we run out of setup handling NullHandler(), StreamHandler(sys.stdout, bubble=False, level=loggingLevel), TimedRotatingFileHandler( logPath, level=0, backup_count=3, bubble=True, date_format='%Y-%m-%d', ), ]) else: logging_setup = NestedSetup([ # make sure we never bubble up to the stderr handler # if we run out of setup handling NullHandler(), FingersCrossedHandler( TimedRotatingFileHandler( logPath, level=0, backup_count=3, bubble=False, date_format='%Y-%m-%d', ), action_level=ERROR, buffer_size=1000, # pull_information=True, # reset=False, ) ]) except: print( "Critical error attempting to setup logging. Falling back to console only." ) logging_setup = NestedSetup([ # make sure we never bubble up to the stderr handler # if we run out of setup handling NullHandler(), StreamHandler(sys.stdout, bubble=False) ]) with logging_setup.threadbound(): # Output all stdout (print) messages as warnings try: sys.stdout = LoggerWriter(pyfalog.warning) except: pyfalog.critical( "Cannot redirect. Continuing without writing stdout to log.") # Output all stderr (stacktrace) messages as critical try: sys.stderr = LoggerWriter(pyfalog.critical) except: pyfalog.critical( "Cannot redirect. Continuing without writing stderr to log.")
def silence_log_output(): NullHandler().push_application() yield StreamHandler(sys.stdout, level="INFO").push_application()
def main(args=None): colorful = False if sys.stderr.isatty() and os.name != 'nt': colorful = True ColorfulStderrHandler._colorful = colorful if args is None: args = sys.argv[1:] conf_filename = 'conf.py' conf_filename_changed = False for index, arg in enumerate(args): if arg[:7] == '--conf=': del args[index] conf_filename = arg[7:] conf_filename_changed = True break quiet = False if len(args) > 0 and args[0] == b'build' and b'--strict' in args: LOGGER.notice('Running in strict mode') STRICT_HANDLER.push_application() if len(args) > 0 and args[ 0] == b'build' and b'-q' in args or b'--quiet' in args: nullhandler = NullHandler() nullhandler.push_application() quiet = True global config # Those commands do not require a `conf.py`. (Issue #1132) # Moreover, actually having one somewhere in the tree can be bad, putting # the output of that command (the new site) in an unknown directory that is # not the current working directory. (does not apply to `version`) argname = args[0] if len(args) > 0 else None if argname and argname not in ['init', 'version' ] and not argname.startswith('import_'): root = get_root_dir() if root: os.chdir(root) # help does not need a config file, but can use one. needs_config_file = argname != 'help' else: needs_config_file = False sys.path.append('') try: if sys.version_info[0] == 3: loader = importlib.machinery.SourceFileLoader( "conf", conf_filename) conf = loader.load_module() else: conf = imp.load_source("conf", conf_filename) config = conf.__dict__ except Exception: if os.path.exists(conf_filename): msg = traceback.format_exc(0) LOGGER.error('"{0}" cannot be parsed.\n{1}'.format( conf_filename, msg)) sys.exit(1) elif needs_config_file and conf_filename_changed: LOGGER.error( 'Cannot find configuration file "{0}".'.format(conf_filename)) sys.exit(1) config = {} if conf_filename_changed: LOGGER.info("Using config file '{0}'".format(conf_filename)) invariant = False if len(args) > 0 and args[0] == b'build' and b'--invariant' in args: try: import freezegun freeze = freezegun.freeze_time("2038-01-01") freeze.start() invariant = True except ImportError: req_missing(['freezegun'], 'perform invariant builds') if config: if os.path.exists( 'plugins') and not os.path.exists('plugins/__init__.py'): with open('plugins/__init__.py', 'w') as fh: fh.write('# Plugin modules go here.') config['__colorful__'] = colorful config['__invariant__'] = invariant config['__quiet__'] = quiet config['__configuration_filename__'] = conf_filename site = Nikola(**config) DN = DoitNikola(site, quiet) if _RETURN_DOITNIKOLA: return DN _ = DN.run(args) if site.invariant: freeze.stop() return _
def main(args=None): colorful = False if sys.stderr.isatty() and os.name != 'nt': colorful = True ColorfulStderrHandler._colorful = colorful if args is None: args = sys.argv[1:] quiet = False if len(args) > 0 and args[0] == b'build' and b'--strict' in args: LOGGER.notice('Running in strict mode') STRICT_HANDLER.push_application() if len(args) > 0 and args[ 0] == b'build' and b'-q' in args or b'--quiet' in args: nullhandler = NullHandler() nullhandler.push_application() quiet = True global config # Those commands do not require a `conf.py`. (Issue #1132) # Moreover, actually having one somewhere in the tree can be bad, putting # the output of that command (the new site) in an unknown directory that is # not the current working directory. (does not apply to `version`) argname = args[0] if len(args) > 0 else None if argname and argname not in ['init', 'version' ] and not argname.startswith('import_'): root = get_root_dir() if root: os.chdir(root) sys.path.append('') try: import conf _reload(conf) config = conf.__dict__ except Exception: if os.path.exists('conf.py'): msg = traceback.format_exc(0) LOGGER.error('conf.py cannot be parsed.\n{0}'.format(msg)) sys.exit(1) config = {} invariant = False if len(args) > 0 and args[0] == b'build' and b'--invariant' in args: try: import freezegun freeze = freezegun.freeze_time("2014-01-01") freeze.start() invariant = True except ImportError: req_missing(['freezegun'], 'perform invariant builds') if config: if os.path.exists( 'plugins') and not os.path.exists('plugins/__init__.py'): with open('plugins/__init__.py', 'w') as fh: fh.write('# Plugin modules go here.') config['__colorful__'] = colorful config['__invariant__'] = invariant config['__quiet__'] = quiet site = Nikola(**config) _ = DoitNikola(site, quiet).run(args) if site.invariant: freeze.stop() return _
return s def Item2(s): w = Item1(s) s = s[len(w):].strip() return Item1(s) def level(s): return r_word.search(s).start() // 4 source, dest = [], [] current_level = -1 debugging = False if debugging: dhandler = NullHandler(level = DEBUG) dhandler.format_string = '{record.message}' dhandler.push_application() handler = StreamHandler(stdout, level = NOTICE) handler.format_string = '{record.message}' handler.push_application() for s in open("todo.txt"): l = level(s) debug("levels {}, {}".format(current_level, l)) s = s.strip() if not s: continue if l > current_level: d = join(downloads_home, *dest) if not isdir(d): mkdir(d)
sys.exit(1) except KeyboardInterrupt: print sys.exit(1) with context: # Imports are here to avoid importing anything before monkeypatching from maildump import app, start from maildump.web import assets assets.debug = app.debug = args.debug assets.auto_build = args.autobuild_assets app.config['MAILDUMP_HTPASSWD'] = HtpasswdFile( args.htpasswd) if args.htpasswd else None app.config['MAILDUMP_NO_QUIT'] = args.no_quit level = logbook.DEBUG if args.debug else logbook.INFO format_string = ( u'[{record.time:%Y-%m-%d %H:%M:%S}] {record.level_name:<8} {record.channel}: {record.message}' ) stderr_handler = ColorizedStderrHandler(level=level, format_string=format_string) with NullHandler().applicationbound(): with stderr_handler.applicationbound(): start(args.http_ip, args.http_port, args.smtp_ip, args.smtp_port, args.db) if __name__ == '__main__': main()
def NullLog(): return NullHandler(level=DEBUG)
start = time.time() s = 0 for i in range(10000): s = proxy.call('add', 1, s) stop = time.time() logger.info( 'msgpack-rpc took {} seconds to perform {} calls, {} calls per second', stop - start, s, s / (stop - start)) start = time.time() for i in range(10000): proxy.notify('add', 1, s) proxy.call('add', 1, s) stop = time.time() logger.info( 'msgpack-rpc took {} seconds to perform {} notifications, {} notifies per second', stop - start, 10000, 10000 / (stop - start)) p.stop() if __name__ == "__main__": stderr_handler = ColorizedStderrHandler(level='DEBUG') null_handler = NullHandler() with null_handler.applicationbound(): with stderr_handler.applicationbound(): main()
def main(args=None): """Run Nikola.""" colorful = False if sys.stderr.isatty() and os.name != 'nt' and os.getenv( 'NIKOLA_MONO') is None and os.getenv('TERM') != 'dumb': colorful = True ColorfulStderrHandler._colorful = colorful if args is None: args = sys.argv[1:] oargs = args args = [sys_decode(arg) for arg in args] conf_filename = 'conf.py' conf_filename_changed = False for index, arg in enumerate(args): if arg[:7] == '--conf=': del args[index] del oargs[index] conf_filename = arg[7:] conf_filename_changed = True break quiet = False strict = False if len(args) > 0 and args[0] == 'build' and '--strict' in args: LOGGER.notice('Running in strict mode') STRICT_HANDLER.push_application() strict = True if len(args ) > 0 and args[0] == 'build' and '-q' in args or '--quiet' in args: NullHandler().push_application() quiet = True if not quiet and not strict: NullHandler().push_application() STDERR_HANDLER[0].push_application() global config original_cwd = os.getcwd() # Those commands do not require a `conf.py`. (Issue #1132) # Moreover, actually having one somewhere in the tree can be bad, putting # the output of that command (the new site) in an unknown directory that is # not the current working directory. (does not apply to `version`) argname = args[0] if len(args) > 0 else None if argname and argname not in ['init', 'version' ] and not argname.startswith('import_'): root = get_root_dir() if root: os.chdir(root) # Help and imports don't require config, but can use one if it exists needs_config_file = (argname != 'help') and not argname.startswith('import_') if needs_config_file: if root is None: LOGGER.error( "The command could not be executed: You're not in a nikola website." ) return 1 else: LOGGER.info("Website root: '{0}'".format(root)) else: needs_config_file = False old_path = sys.path old_modules = sys.modules try: sys.path = sys.path[:] sys.modules = sys.modules.copy() sys.path.insert(0, os.path.dirname(conf_filename)) with open(conf_filename, "rb") as file: config = imp.load_module(conf_filename, file, conf_filename, (None, "rb", imp.PY_SOURCE)).__dict__ except Exception: config = {} if os.path.exists(conf_filename): msg = traceback.format_exc(0) LOGGER.error('"{0}" cannot be parsed.\n{1}'.format( conf_filename, msg)) return 1 elif needs_config_file and conf_filename_changed: LOGGER.error( 'Cannot find configuration file "{0}".'.format(conf_filename)) return 1 finally: sys.path = old_path sys.modules = old_modules if conf_filename_changed: LOGGER.info("Using config file '{0}'".format(conf_filename)) invariant = False if len(args) > 0 and args[0] == 'build' and '--invariant' in args: try: import freezegun freeze = freezegun.freeze_time("2038-01-01") freeze.start() invariant = True except ImportError: req_missing(['freezegun'], 'perform invariant builds') if config: if os.path.isdir( 'plugins') and not os.path.exists('plugins/__init__.py'): with open('plugins/__init__.py', 'w') as fh: fh.write('# Plugin modules go here.') config['__colorful__'] = colorful config['__invariant__'] = invariant config['__quiet__'] = quiet config['__configuration_filename__'] = conf_filename config['__cwd__'] = original_cwd site = Nikola(**config) DN = DoitNikola(site, quiet) if _RETURN_DOITNIKOLA: return DN _ = DN.run(oargs) if site.invariant: freeze.stop() return _
def main(): parser = argparse.ArgumentParser() parser.add_argument('--smtp-ip', default='127.0.0.1', metavar='IP', help='SMTP ip (default: 127.0.0.1)') parser.add_argument('--smtp-port', default=1025, type=int, metavar='PORT', help='SMTP port (deault: 1025)') parser.add_argument('--http-ip', default='127.0.0.1', metavar='IP', help='HTTP ip (default: 127.0.0.1)') parser.add_argument('--http-port', default=1080, type=int, metavar='PORT', help='HTTP port (deault: 1080)') parser.add_argument('--db', metavar='PATH', help='SQLite database - in-memory if missing') parser.add_argument('--htpasswd', metavar='HTPASSWD', help='Apache-style htpasswd file') parser.add_argument('-v', '--version', help='Display the version and exit', action='store_true') parser.add_argument('-f', '--foreground', help='Run in the foreground (default if no pid file is specified)', action='store_true') parser.add_argument('-d', '--debug', help='Run the web app in debug mode', action='store_true') parser.add_argument('-a', '--autobuild-assets', help='Automatically rebuild assets if necessary', action='store_true') parser.add_argument('-n', '--no-quit', help='Do not allow clients to terminate the application', action='store_true') parser.add_argument('-p', '--pidfile', help='Use a PID file') parser.add_argument('--stop', help='Sends SIGTERM to the running daemon (needs --pidfile)', action='store_true') args = parser.parse_args() if args.version: from maildump.util import get_version print('MailDump {0}'.format(get_version())) sys.exit(0) # Do we just want to stop a running daemon? if args.stop: if not args.pidfile or not os.path.exists(args.pidfile): print('PID file not specified or not found') sys.exit(1) try: pid = read_pidfile(args.pidfile) except ValueError as e: print('Could not read PID file: {0}'.format(e)) sys.exit(1) try: os.kill(pid, signal.SIGTERM) except OSError as e: print('Could not send SIGTERM: {0}'.format(e)) sys.exit(1) sys.exit(0) # Default to foreground mode if no pid file is specified if not args.pidfile and not args.foreground: print('No PID file specified; runnning in foreground') args.foreground = True # Warn about relative paths and absolutize them if args.db and not os.path.isabs(args.db): args.db = os.path.abspath(args.db) print('Database path is relative, using {0}'.format(args.db)) if args.htpasswd and not os.path.isabs(args.htpasswd): args.htpasswd = os.path.abspath(args.htpasswd) print('Htpasswd path is relative, using {0}'.format(args.htpasswd)) # Check if the password file is valid if args.htpasswd and not os.path.isfile(args.htpasswd): print('Htpasswd file does not exist') sys.exit(1) # Conditionally check for name or filename property (PY3.x or filename PY2.X) PY2 = sys.version_info[0] == 2 if PY2: asset_folder = os.path.join(pkgutil.get_loader('maildump').filename, 'static') else: asset_folder = os.path.join(pkgutil.get_loader('maildump').name, 'static') # Check if the static folder is writable if args.autobuild_assets and not os.access(asset_folder, os.W_OK): print('Autobuilding assets requires write access to {0}'.format(asset_folder)) sys.exit(1) daemon_kw = {'monkey_greenlet_report': False, 'signal_map': {signal.SIGTERM: terminate_server, signal.SIGINT: terminate_server}} if args.foreground: # Do not detach and keep std streams open daemon_kw.update({'detach_process': False, 'stdin': sys.stdin, 'stdout': sys.stdout, 'stderr': sys.stderr}) pidfile = None if args.pidfile: pidfile = os.path.abspath(args.pidfile) if not os.path.isabs(args.pidfile) else args.pidfile if os.path.exists(pidfile): pid = read_pidfile(pidfile) if not os.path.exists(os.path.join('/proc', str(pid))): print('Deleting obsolete PID file (process {0} does not exist)'.format(pid)) os.unlink(pidfile) daemon_kw['pidfile'] = TimeoutPIDLockFile(pidfile, 5) # Unload threading module to avoid error on exit (it's loaded by lockfile) if 'threading' in sys.modules: del sys.modules['threading'] context = GeventDaemonContext(**daemon_kw) try: context.open() except lockfile.LockTimeout: print('Could not acquire lock on pid file {0}'.format(pidfile)) print('Check if the daemon is already running.') sys.exit(1) except KeyboardInterrupt: print() sys.exit(1) with context: # Imports are here to avoid importing anything before monkeypatching from maildump import app, start from maildump.web import assets assets.debug = app.debug = args.debug assets.auto_build = args.autobuild_assets app.config['MAILDUMP_HTPASSWD'] = HtpasswdFile(args.htpasswd) if args.htpasswd else None app.config['MAILDUMP_NO_QUIT'] = args.no_quit level = logbook.DEBUG if args.debug else logbook.INFO format_string = ( u'[{record.time:%Y-%m-%d %H:%M:%S}] {record.level_name:<8} {record.channel}: {record.message}' ) stderr_handler = ColorizedStderrHandler(level=level, format_string=format_string) with NullHandler().applicationbound(): with stderr_handler.applicationbound(): start(args.http_ip, args.http_port, args.smtp_ip, args.smtp_port, args.db)