def init_logging(**keys): """ file_log_name=None # name of log file (defaults to output dir) file_log_lvl='DEBUG' # level to log in file (None to not log to file) console_log_lvl='DEBUG' # level to log to console show_warning=True # show warning for not writing to the file or console. # valid log_lvls: None,DEBUG,INFO,WARNING,ERROR,CRITICAL """ # set values file_log_name = keys.get('file_log_name',None) file_log_lvl = keys.get('file_log_lvl','DEBUG') console_log_lvl = keys.get('console_log_lvl','DEBUG') show_warning = keys.get('show_warning',True) # raise error if bad value passed valid_log_lvls = (None,'DEBUG','INFO','WARNING','ERROR','CRITICAL') if file_log_lvl not in valid_log_lvls: raise ValueError('bad param passed for file_log_lvl {0!r}'.format(file_log_lvl)) if console_log_lvl not in valid_log_lvls: raise ValueError('bad param passed for console_log_lvl {0!r}'.format(console_log_lvl)) # set logging level logging.getLogger().setLevel(logging.DEBUG) # create logging formatter .%(msecs)-3d f = '%(asctime)-23s:%(threadName)-10s:%(levelname)-7s:%(name)s.%(funcName)s:%(message)s' log_formatter = logging.Formatter(f) # create handlers based on request if file_log_lvl: # add file handler if file_log_name==None: file_log_name = MyLoggingBase.get_output_fd('log{}.log'.format(MyLoggingBase.get_current_timestamp(for_file=True))) h = logging.FileHandler(file_log_name)#,mode='w') #to not append for the day h.setLevel(logging.__getattribute__(file_log_lvl)) # @UndefinedVariable h.setFormatter(log_formatter) logging.getLogger().addHandler(h) if console_log_lvl: # add console handler #import sys h2 = logging.StreamHandler(sys.stdout) # to change the coloring! from stderr to stdout h2.setFormatter(log_formatter) h2.setLevel(logging.__getattribute__(console_log_lvl)) # @UndefinedVariable logging.getLogger().addHandler(h2) elif show_warning: print '=======================================' print 'not showing log in console per request!' print file_log_name print '=======================================' logging.warning('=======================================') logging.warning('not showing log in console per request!') logging.warning('=======================================') if not file_log_lvl and show_warning: logging.warning('=======================================') logging.warning('= not saving log to file per request! =') logging.warning('=======================================') '''
def setFileLog(self, level, fname, encoding='ascii', maxbytes=0, backupCount=3): """ Setup file logging. This adds the corresponding logging channel, and sets its log level, formatting, etc. @param string level Defines the verbosity (see setScreenLog for details) @param string fname Name of the log file (w/ path) @param optional string encoding Encoding to use. Defaults to 'ascii' @param optional integer maxbytes If this is != 0, it enables automatic log rotation to take place at the given size @param optional integer backupCount How many copies to keep on log rotation (default: 3). Has no effect with maxbytes=0 (obviously). """ if maxbytes == 0: self.fh = logging.FileHandler(fname, 'a', encoding) else: from logging.handlers import RotatingFileHandler self.fh = RotatingFileHandler(fname, 'a', maxbytes, backupCount, encoding) #fh.setFormatter( logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") ) self.fh.setFormatter( logging.Formatter( "%(asctime)s %(name)-10s %(levelname)-8s %(message)s")) try: self.fh.setLevel(logging.__getattribute__(level.upper())) except AttributeError: self.fh.setLevel(logging.WARNING) self.logger.addHandler(self.fh)
def setFileLog(self,level,fname,encoding='ascii',maxbytes=0,backupCount=3): """ Setup file logging. This adds the corresponding logging channel, and sets its log level, formatting, etc. @param string level Defines the verbosity (see setScreenLog for details) @param string fname Name of the log file (w/ path) @param optional string encoding Encoding to use. Defaults to 'ascii' @param optional integer maxbytes If this is != 0, it enables automatic log rotation to take place at the given size @param optional integer backupCount How many copies to keep on log rotation (default: 3). Has no effect with maxbytes=0 (obviously). """ if maxbytes==0: self.fh = logging.FileHandler( fname, 'a', encoding ) else: from logging.handlers import RotatingFileHandler self.fh = RotatingFileHandler( fname, 'a', maxbytes, backupCount, encoding ) #fh.setFormatter( logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") ) self.fh.setFormatter( logging.Formatter("%(asctime)s %(name)-10s %(levelname)-8s %(message)s") ) try: self.fh.setLevel( logging.__getattribute__(level.upper()) ) except AttributeError: self.fh.setLevel(logging.WARNING) self.logger.addHandler(self.fh)
def logger(self): """Return a configured logger.""" logger = logging.getLogger("flowtype") if not self.logging_configured: log_level = get_settings("log_level", "info") if log_level not in [ "debug", "info", "warning", "error", "critical", ]: log_level = "warning" logger.propagate = False logger.setLevel(logging.__getattribute__(log_level.upper())) log_handler = logging.StreamHandler(sys.stdout) log_handler.setFormatter( logging.Formatter("%(name)s: %(levelname)s - %(message)s") ) logger.addHandler(log_handler) self.logging_configured = True return logger
def init_logging(conf): global logger if conf.has_key('loglevel'): loglevel = conf['loglevel'].upper() else: loglevel = 'INFO' if conf.has_key('logfile'): logfile = conf['logfile'] if logfile[:2] == "./": # convert relative to absolute path logfile = os.path.realpath(os.path.dirname(__file__)) + logfile[1:] else: filename = None if conf.has_key('logformat'): logformat = conf['logformat'] else: logformat = "%(asctime)s %(levelname)-8s %(module)s.%(funcName)s:%(lineno)d %(message)s" logging.basicConfig(level=logging.__getattribute__(loglevel), format=logformat, filename=logfile, filemode='a') logger = logging.getLogger("upq") return logger
def __init__(self, config): self.config = config self.connection = None self.is_online = False self.logger = logging.getLogger('ChannelServer') handler = logging.StreamHandler() formatter = logging.Formatter( config.get('Logging', 'log_format', raw=True)) handler.setFormatter(formatter) self.logger.addHandler(handler) self.logger.setLevel( logging.__getattribute__(self.config.get('Logging', 'log_level'))) # Component config section self.jid = None self.allow_register = False self.component_binding = False self.use_route_wrap = False # MainServer config section self.main_server = None # Auth config section self.sasl_username = None self.secret = None # Storage section self.storage = init_storage(config) # Do the set-up self._parse_config(config)
def init_logging(conf): global logger if conf.has_key('loglevel'): loglevel = conf['loglevel'].upper() else: loglevel = 'INFO' if conf.has_key('logfile'): logfile = conf['logfile'] if logfile[:2] == "./": # convert relative to absolute path logfile = os.path.realpath(os.path.dirname(__file__))+logfile[1:] else: filename=None if conf.has_key('logformat'): logformat = conf['logformat'] else: logformat = "%(asctime)s %(levelname)-8s %(module)s.%(funcName)s:%(lineno)d %(message)s" logging.basicConfig(level=logging.__getattribute__(loglevel), format=logformat, filename=logfile, filemode='a') logger = logging.getLogger("upq") return logger
def configure_logging(): try: __logger.setLevel(logging.__getattribute__(LOGLEVEL)) except: __logger.setLevel(logging.CRITICAL) # rotating logger handler = logging.handlers.RotatingFileHandler( 'log', maxBytes=10240, backupCount=LOG_FILES) __logger.addHandler(handler)
def get_log_level_as_num(lvl): lvl_num = None if isinstance(lvl, str): try: lvl_num = logging.__getattribute__(lvl.upper()) except AttributeError: log.warn("Failed to set log level to '%s'" % lvl) return else: lvl_num = lvl return lvl_num
def set_config(self, **kwargs): """Set the configuration of this back-end.""" uri = kwargs['uri'] database = create_database(uri) self.store = Store(database) self.logger = logging.getLogger('StormStorageBackend') handler = logging.StreamHandler() formatter = logging.Formatter(kwargs['log_format']) handler.setFormatter(formatter) self.logger.addHandler(handler) self.logger.setLevel( logging.__getattribute__(kwargs['log_level']))
def set_loglevel(lvl): global log lvl_num = None if isinstance(lvl, str): try: lvl_num = logging.__getattribute__(lvl.upper()) except AttributeError: log.warn("Failed to set log level to '%s'" % lvl) return else: lvl_num = lvl log.setLevel(lvl_num)
def parse_main_config(filename): index = 0 config = {} f = open(filename) for index, line in enumerate(f): line = line.strip() if line == "" or CommentPattern.match(line): continue try: directive, value = line.split() except ValueError: raise ConfigurationParseError("Malformed configuration line", filename, index) if directive not in MAIN_DEFAULTS: raise ConfigurationParseError("Invalid directive: %s" % directive, filename, index) if directive == "threads": try: value = int(value) except: raise ConfigurationParseError("Invalid thread number", filename, index) if directive == "loglevel": try: value = logging.__getattribute__(value.upper()) except AttributeError: raise ConfigurationParseError("Invalid log level", filename, index) config[directive] = value try: host, port = config['bind'].split(':') except ValueError: raise ConfigurationParseError( "Invalid bind option: %s" % config['bind'], filename, index) if host == '': host = '127.0.0.1' try: port = int(port) except: raise ConfigurationParseError("Invalid port: %s" % port, filename, index) config['bind'] = (host, port) for key in MAIN_DEFAULTS: if not key in config: config[key] = MAIN_DEFAULTS[key] return config
def configure_logging(): mylevel = logging.DEBUG try: newlevel = logging.__getattribute__(settings.RTS_LOG_LEVEL) if type(newlevel) == int: mylevel = newlevel except: pass # well, we can't log the error yet, can we! logging.basicConfig( level=mylevel, format= '[%(asctime)s] %(levelname)s (%(filename)s:%(lineno)d) %(message)s', filename=settings.RTS_LOGFILE, filemode='a')
def configure_logging(app): """ ABOUT Sets up our logging style, file rotations, etc. TODO Think of a clever way to pull in requests that occured around the same time some logging happened, to avoid any manual comparison between a web log file and this log file """ megabyte = 1048576 logsize = 25 * megabyte rotations = 10 # default name flask.log name = 'flask.log' if app.settings['DEBUG']: level = logging.DEBUG else: level = logging.INFO if app.settings['LOGGING']: if app.settings['LOGGING']['SIZE_MB']: logsize = int(app.settings['LOGGING']['SIZE_MB']) * megabyte if app.settings['LOGGING']['LEVEL']: level = logging.__getattribute__(app.settings['LOGGING']['LEVEL']) if app.settings['LOGGING']['NAME']: name = app.settings['LOGGING']['NAME'] if app.settings['LOGGING']['ROTATIONS']: rotations = app.settings['LOGGING']['ROTATIONS'] handler = RotatingFileHandler(app.settings['LOGGING']['NAME'], maxBytes=(25 * megabyte), backupCount=rotations) handler.setLevel(level) handler.setFormatter(logging.Formatter( '%(asctime)s %(levelname)s: %(message)s ' '[%(pathname)s:%(funcName)s:%(lineno)d]') ) app.logger.addHandler(handler)
def setScreenLog(self,level): """ Setup screen logging. This adds the corresponding logging channel, and sets its log level and formatting. @param string level Defines the verbosity. Accepts the levels defined by the logging package, plus 'NONE' to switch it off. If an invalid level was specified, it falls back to 'ERROR'. This is case insensitive (will be converted to upper()) """ #ch.setFormatter( logging.Formatter("* %(name)s %(levelname)s %(message)s") ) self.ch.setFormatter( logging.Formatter("* %(name)-12s %(levelname)-8s %(message)s") ) try: self.ch.setLevel( logging.__getattribute__(level.upper()) ) except AttributeError: self.ch.setLevel(logging.ERROR) self.logger.addHandler(self.ch)
def __init__(self, **kwargs): self.logger = logging.getLogger("apx") self.logger.setLevel(logging.DEBUG) slh = logging.handlers.SysLogHandler(address='/dev/log') slh.setLevel(logging.__getattribute__(kwargs['log_level'].upper())) self.logger.addHandler(slh) if not kwargs['quiet']: she = logging.StreamHandler(sys.stderr) she.setLevel(logging.ERROR) self.logger.addHandler(she) sho = logging.StreamHandler(sys.stdout) sho.setLevel(40-kwargs['verbosity']*10) sho.addFilter(StdoutFilter()) self.logger.addHandler(sho)
def parse_main_config(filename): index = 0 config = {} f = open(filename) for index, line in enumerate(f): line = line.strip() if line == "" or CommentPattern.match(line): continue try: directive, value = line.split() except ValueError: raise ConfigurationParseError("Malformed configuration line", filename, index) if directive not in MAIN_DEFAULTS: raise ConfigurationParseError("Invalid directive: %s" % directive, filename, index) if directive == "threads": try: value = int(value) except: raise ConfigurationParseError("Invalid thread number", filename, index) if directive == "loglevel": try: value = logging.__getattribute__(value.upper()) except AttributeError: raise ConfigurationParseError("Invalid log level", filename, index) config[directive] = value try: host, port = config['bind'].split(':') except ValueError: raise ConfigurationParseError("Invalid bind option: %s" % config['bind'], filename, index) if host == '': host = '127.0.0.1' try: port = int(port) except: raise ConfigurationParseError("Invalid port: %s" % port, filename, index) config['bind'] = (host, port) for key in MAIN_DEFAULTS: if not key in config: config[key] = MAIN_DEFAULTS[key] return config
def configure_logging(app): """ ABOUT Sets up our logging style, file rotations, etc. TODO Think of a clever way to pull in requests that occured around the same time some logging happened, to avoid any manual comparison between a web log file and this log file """ megabyte = 1048576 logsize = 25 * megabyte rotations = 10 # default name flask.log name = 'flask.log' if app.settings['DEBUG']: level = logging.DEBUG else: level = logging.INFO if app.settings['LOGGING']: if app.settings['LOGGING']['SIZE_MB']: logsize = int(app.settings['LOGGING']['SIZE_MB']) * megabyte if app.settings['LOGGING']['LEVEL']: level = logging.__getattribute__(app.settings['LOGGING']['LEVEL']) if app.settings['LOGGING']['NAME']: name = app.settings['LOGGING']['NAME'] if app.settings['LOGGING']['ROTATIONS']: rotations = app.settings['LOGGING']['ROTATIONS'] handler = RotatingFileHandler(app.settings['LOGGING']['NAME'], maxBytes=(25 * megabyte), backupCount=rotations) handler.setLevel(level) handler.setFormatter( logging.Formatter('%(asctime)s %(levelname)s: %(message)s ' '[%(pathname)s:%(funcName)s:%(lineno)d]')) app.logger.addHandler(handler)
def setScreenLog(self, level): """ Setup screen logging. This adds the corresponding logging channel, and sets its log level and formatting. @param string level Defines the verbosity. Accepts the levels defined by the logging package, plus 'NONE' to switch it off. If an invalid level was specified, it falls back to 'ERROR'. This is case insensitive (will be converted to upper()) """ #ch.setFormatter( logging.Formatter("* %(name)s %(levelname)s %(message)s") ) self.ch.setFormatter( logging.Formatter("* %(name)-12s %(levelname)-8s %(message)s")) try: self.ch.setLevel(logging.__getattribute__(level.upper())) except AttributeError: self.ch.setLevel(logging.ERROR) self.logger.addHandler(self.ch)
def post(self): """Log message from client.""" try: args = logSchema().load(request.json) except ValidationError as e: return {"message": f"incorrect arguments: {e}"}, 400 # add IP address message = f"[request.remote_addr] {args['message']}" # get the enum from the string log level level = logging.__getattribute__(args["logLevel"].upper()) # log the message logger.log(level, message) return 200
def __init__(self, **kwargs): self.logger = logging.getLogger("FuseGUI") self.logger.setLevel(logging.DEBUG) slh = logging.handlers.SysLogHandler(address='/dev/log', facility=logging.handlers.SysLogHandler.LOG_MAIL) slh.setFormatter(logging.Formatter('%(name)s[%(process)d]: %(message)s')) slh.setLevel(logging.__getattribute__(kwargs['log_level'].upper())) self.logger.addHandler(slh) if not kwargs['quiet']: she = logging.StreamHandler(sys.stderr) she.setLevel(logging.ERROR) self.logger.addHandler(she) sho = logging.StreamHandler(sys.stdout) sho.setLevel(40-kwargs['verbosity']*10) sho.addFilter(self.StdoutFilter()) self.logger.addHandler(sho)
def __new__(cls, name, bases, attrs, **kwargs): log_level = get_settings(active_view(), 'log_level', 'info') if log_level not in ['debug', 'info', 'warning', 'error', 'fatal']: log_level = 'warning' cls._logger = logging.getLogger('anacondaST3') cls._logger.setLevel(logging.__getattribute__(log_level.upper())) log_handler = logging.StreamHandler(sys.stdout) log_handler.setFormatter( logging.Formatter('%(name)s: %(levelname)s - %(message)s')) cls._logger.addHandler(log_handler) cls._logger.propagate = False obj = super().__new__(cls, name, bases, attrs) for method in ['debug', 'info', 'warning', 'error', 'fatal']: setattr(obj, method, functools.partial(obj.write, method)) return obj
def __new__(cls, name, bases, attrs, **kwargs): log_level = get_settings(active_view(), 'log_level', 'info') if log_level not in ['debug', 'info', 'warning', 'error', 'fatal']: log_level = 'warning' cls._logger = logging.getLogger('anacondaST3') cls._logger.setLevel(logging.__getattribute__(log_level.upper())) log_handler = logging.StreamHandler(sys.stdout) log_handler.setFormatter(logging.Formatter( '%(name)s: %(levelname)s - %(message)s' )) cls._logger.addHandler(log_handler) cls._logger.propagate = False obj = super().__new__(cls, name, bases, attrs) for method in ['debug', 'info', 'warning', 'error', 'fatal']: setattr(obj, method, functools.partial(obj.write, method)) return obj
def logger(self): """Return a configured logger.""" logger = logging.getLogger('flowtype') if not self.logging_configured: log_level = get_settings('log_level', 'info') if log_level not in [ 'debug', 'info', 'warning', 'error', 'critical']: log_level = 'warning' logger.propagate = False logger.setLevel(logging.__getattribute__(log_level.upper())) log_handler = logging.StreamHandler(sys.stdout) log_handler.setFormatter(logging.Formatter( '%(name)s: %(levelname)s - %(message)s' )) logger.addHandler(log_handler) self.logging_configured = True return logger
def get_logger(name, format_string, level=None): """ Creates a generic python logger with configurable name, formatter, and log level. Parameters: name (str): The name of the logger. format_string (str): Format string to use when creating the logger. level (str): Culls out messages with lower priority level. (debug, info, warning, error, critical) Returns: logging.Logger: An instance logger. """ level = "info" if level is None else level log_level = logging.__getattribute__(level.upper()) logging.basicConfig(format=format_string) root = logging.getLogger() for h in root.handlers: h.setFormatter(logging.Formatter(format_string)) logger = logging.getLogger(name) logger.setLevel(log_level) return logger
from config import Config import core from core import NotFoundError, CanNotUpdateError from auth import authenticate app = Flask(__name__) store = DictStore() KVSessionExtension(store, app) # configuration app.config.from_object(Config) # logger setting handler = FileHandler(app.config['LOGFILE'], encoding='utf-8') handler.setLevel(logging.__getattribute__(app.config['LOGLEVEL'])) app.logger.addHandler(handler) # constants and helper functions _MAX_CSRF_KEY = 18446744073709551616L if hasattr(random, 'SystemRandom'): randrange = random.SystemRandom().randrange else: randrange = random.randrange API_VERSION = '1.0' def jsonify(data): return json.dumps(data, ensure_ascii=False)
return "Value error in YAML: {0}".format(e) else: return "Undefined input_type: {0}".format(request.form['input_type']) # If ve have empty var array or other errors we need to catch it and show try: rendered_jinja2_tpl = jinja2_tpl.render(values) except (ValueError, TypeError) as e: return "Error in your values input filed: {0}".format(e) if bool(int(request.form['showwhitespaces'])): # Replace whitespaces with a visible character (will be grayed with javascript) rendered_jinja2_tpl = rendered_jinja2_tpl.replace(' ', u'•') return rendered_jinja2_tpl.replace('\n', '<br />') if __name__ == "__main__": # Set up logging app.logger.setLevel(logging.__getattribute__(config.LOGGING_LEVEL)) file_handler = logging.handlers.RotatingFileHandler(filename=config.LOGGING_LOCATION, maxBytes=10*1024*1024, backupCount=5) file_handler.setFormatter(logging.Formatter(config.LOGGING_FORMAT)) file_handler.setLevel(logging.__getattribute__(config.LOGGING_LEVEL)) app.logger.addHandler(file_handler) app.run( host=config.HOST, port=config.PORT, debug=config.DEBUG, )
return "Value error in YAML: {0}".format(e) else: return "Undefined input_type: {0}".format(request.form['input_type']) # If ve have empty var array or other errors we need to catch it and show try: rendered_jinja2_tpl = jinja2_tpl.render(values) except (ValueError, TypeError) as e: return "Error in your values input filed: {0}".format(e) if bool(int(request.form['showwhitespaces'])): # Replace whitespaces with a visible character (will be grayed with javascript) rendered_jinja2_tpl = rendered_jinja2_tpl.replace(' ', u'•') return escape(rendered_jinja2_tpl).replace('\n', '<br />') if __name__ == "__main__": # Set up logging app.logger.setLevel(logging.__getattribute__(config.LOGGING_LEVEL)) file_handler = logging.handlers.RotatingFileHandler(filename=config.LOGGING_LOCATION, maxBytes=10*1024*1024, backupCount=5) file_handler.setFormatter(logging.Formatter(config.LOGGING_FORMAT)) file_handler.setLevel(logging.__getattribute__(config.LOGGING_LEVEL)) app.logger.addHandler(file_handler) app.run( host=config.HOST, port=config.PORT, debug=config.DEBUG, )
cp = ConfigParser({ 'pid_file' : PID_FILE, 'log_file' : LOG_FILE, 'threads' : THREADS_NUM, 'bind_host' : BIND_HOST, 'bind_port' : BIND_PORT, 'plugin_dir' : PLUGINS_DIR, 'log_level' : LOG_LEVEL }) try: cp.readfp(open(CONFIG_FILE)) except Exception, e: print "Error reading config file: " + repr(e) sys.exit(1) logfile = cp.get('main', 'log_file') plugin_dir = cp.get('main', 'plugin_dir') bind_host = cp.get('main', 'bind_host') bind_port = cp.getint('main', 'bind_port') threads = cp.getint('main', 'threads') pidfile = cp.get('main', 'pid_file') logdir = os.path.dirname(logfile) try: loglevel = logging.__getattribute__(cp.get('main', 'log_level').upper()) except AttributeError: loglevel = logging.__getattribute__(LOG_LEVEL) watcher = Watcher(COLLECTOR_CONFIG_DIR, logfile, plugin_dir, threads, loglevel) app = Flask(__name__) def __startApplication(): watcher.start() app.run(host=bind_host, port=bind_port) def startApplicationAsDaemon(): os.closerange(0,3) sys.stdout = open('%s/stdout.log' % logdir, "a") sys.stderr = open('%s/stderr.log' % logdir, "a") startDaemon(__startApplication, pidfile)
import logging from flask import Flask, jsonify, abort, request from config import config from model import User, ApiKey, Article, db from functools import wraps from datetime import datetime from peewee import IntegrityError import validators from validators import ValidationFailure import hashlib app = Flask(__name__) SALT = config.get_database_salt() #logging.basicConfig(filename=config.get_log_path(),level=logging.__getattribute__(config.get_log_level())) logging.basicConfig(level=logging.__getattribute__(config.get_log_level())) def locate_user(username, apikey): ''' Select user from database using Username + API Key. Returns None upon failure. ''' if not username or not apikey: logging.info('Trying to locate user but Username/APIKey empty.') return None results = User.select().join(ApiKey).where((User.username == username) & (ApiKey.key == apikey)) if results.count() != 1:
streamformatter = logging.Formatter( '%(levelname)s: %(message)s' ) fileformatter = logging.Formatter( '%(levelname)s %(asctime)s %(module)s.%(funcName)s: %(message)s' ) streamhandler = logging.StreamHandler() streamhandler.setFormatter(streamformatter) filehandler = logging.FileHandler(cfg.get('logging', 'filename'), mode='a') #has a default filehandler.setFormatter(fileformatter) log.addHandler(filehandler) log.addHandler(streamhandler) level = cfg.get('logging', 'level') log.setLevel(logging.__getattribute__(level.upper())) def set_filehandler(filename): """ remove existing `filehandler` and set a new one. `global filehandler` will become the new handler. """ fh = logging.FileHandler(filename) fh.setFormatter(fileformatter) global filehandler log.removeHandler(filehandler) filehandler = fh log.addHandler(fh)
def _output(self, log_line): return LOGGER.log( logging.__getattribute__(log_line['meta']['level']), log_line)
), optparse.make_option('-l', '--loglevel', type = 'choice', choices = ['INFO', 'DEBUG'], default = 'INFO', ), optparse.make_option('-r', '--readonly', action = 'store_true', default = False, help = 'dont interpret the data', ), optparse.make_option('-b', '--baud', default = 9600, type = int, help = 'set baud-rate', ), ) parser = optparse.OptionParser(option_list=option_list) options, args = parser.parse_args() logging.basicConfig(level=logging.__getattribute__(options.loglevel)) logging.debug(options) logging.info("Commit to database is %s" % options.commit) while(1): mon = Monitor(**options.__dict__) mon.run()
def parseConf(configFile): ''' Reads and parses the configuration file. It also creates a reference to the logger created from the configuration file Returns options, plugins, logger If config file cannot be parsed, it loads the default config from conf/default.conf ''' #create logger for startup file logger = createLogger('/var/log/ceph-influxdb-metricsCollector-startup.log') #create array for plugins plugins={} options={} try: #set up the config parser config = ConfigParser.ConfigParser() config.readfp(open(configFile)) #try to read config file #reporting #create options dictionary options['clusters']={} #for each cluster get array of configurationFile,keyringFile for k,v in config.items('reporting'): #split list of conf,keyring by comma argList=v.split(',') c=argList[0] keyring=argList[1] if c=='none': c=None if keyring=='none': keyring=None options['clusters'][k]={'conf':c,'keyring':keyring} #hosts options['host'] = config.get('connection','host') options['port'] = config.get('connection','port') #connection settings options['db'] = config.get('connection','db') options['user'] = config.get('connection','user') options['password'] = config.get('connection','pass') options['ssl'] = config.getboolean('connection','ssl') options['verify_ssl'] = config.getboolean('connection','verify_ssl') options['retention_policy'] = config.get('connection','retention_policy') options['compresison_level'] = config.getint('connection','compresison_level') options['batch_size'] = config.getint('connection','batch_size') #load logging settings options['loggingPath'] = config.get('logging','path') options['loggingLevel'] = config.get('logging','level') #load plugins for k,v in config.items('plugins'): #remove outer brackets v=v.strip('[]') plugins[k]=set(v.split(',')) except Exception as e: logger.critical('The' + str(configFile) +' file is misconfigured. Cannot load configuration: {0}'.format(e)) #use default configuration return parseConf(defaultConf) #if retention policy set to 'none', set to None if options['retention_policy'].lower() == 'none': retention_policy=None #format the path into an absolute path to the directory of the log if '[BaseDirectory]' in options['loggingPath']: options['loggingPath'] = os.path.join(script_dir,options['loggingPath'][16:]) #format the value of level into the ENUM equivalent if options['loggingLevel'] in ('DEBUG','INFO','WARNING','ERROR','CRITICAL'): options['loggingLevel'] = logging.__getattribute__(options['loggingLevel']) else: #anything else set to default logger.warning('Could not understand logging option: "{0}". Defaulting to level WARNING'.format(options['loggingLevel'])) options['loggingLevel'] = logging.WARNING try: #make path to the log file options['loggingPath'] = os.path.join(options['loggingPath'],'ceph-influxdb-metricsCollector.log') #get logger logger = createLogger(options['loggingPath'],loggingLevel=options['loggingLevel']) except Exception as e: logger.critical('The' + configFile +' file is misconfigured. Cannot create logger: {0}'.format(e)) #Use default configurations return parseConf(defaultConf) return options, plugins, logger
# Set formatter for logging formatter = logging.Formatter("%(asctime)s %(ip)s |%(levelname)s| %(message)s <%(filename)s:%(lineno)d>") # Log identifier/file will be the same as the file being run try: name = os.path.basename(__main__.__file__).split('.')[0] except AttributeError, e: name = 'main' log = logging.getLogger(name) # Set log level to Debug (TODO: This should be pulled from config file) loglevel = None try: loglevel = Config.get("loglevel") log.setLevel(logging.__getattribute__(loglevel)) except: print "Unable to set loglevel to %s. Defaulting to DEBUG" % loglevel log.setLevel(logging.DEBUG) logfile = Config.get('logfile') # %s/../logs/%s.log' % (os.path.dirname(os.path.realpath(__file__)), name) fh = logging.handlers.TimedRotatingFileHandler(logfile, 'midnight') fh.setLevel(logging.DEBUG) fh.setFormatter(formatter) log.addHandler(fh) # Extend log module with Info class defined above. log = logging.LoggerAdapter(log, Info())
from core import NotFoundError, CanNotUpdateError from auth import authenticate app = Flask(__name__) store = DictStore() KVSessionExtension(store, app) # configuration app.config.from_object(Config) # logger setting handler = FileHandler(app.config['LOGFILE'], encoding='utf-8') handler.setLevel(logging.__getattribute__(app.config['LOGLEVEL'])) app.logger.addHandler(handler) # constants and helper functions _MAX_CSRF_KEY = 18446744073709551616L if hasattr(random, 'SystemRandom'): randrange = random.SystemRandom().randrange else: randrange = random.randrange API_VERSION = '1.0' def jsonify(data):
import logging from flask import Flask, jsonify, abort, request from config import config from model import User, ApiKey, Article, db from functools import wraps from datetime import datetime from peewee import IntegrityError import validators from validators import ValidationFailure import hashlib app = Flask(__name__) SALT = config.get_database_salt() #logging.basicConfig(filename=config.get_log_path(),level=logging.__getattribute__(config.get_log_level())) logging.basicConfig(level=logging.__getattribute__(config.get_log_level())) def locate_user(username, apikey): ''' Select user from database using Username + API Key. Returns None upon failure. ''' if not username or not apikey: logging.info('Trying to locate user but Username/APIKey empty.') return None results = User.select().join(ApiKey).where((User.username==username) & (ApiKey.key==apikey)) if results.count() != 1: logging.info("Unable to locate user.") return None
log = logging.getLogger(__pkgname__) streamformatter = logging.Formatter('%(levelname)s: %(message)s') fileformatter = logging.Formatter( '%(levelname)s %(asctime)s %(module)s.%(funcName)s: %(message)s') streamhandler = logging.StreamHandler() streamhandler.setFormatter(streamformatter) filehandler = logging.FileHandler(cfg.get('logging', 'filename'), mode='a') #has a default filehandler.setFormatter(fileformatter) log.addHandler(filehandler) log.addHandler(streamhandler) level = cfg.get('logging', 'level') log.setLevel(logging.__getattribute__(level.upper())) def set_filehandler(filename): """ remove existing `filehandler` and set a new one. `global filehandler` will become the new handler. """ fh = logging.FileHandler(filename) fh.setFormatter(fileformatter) global filehandler log.removeHandler(filehandler) filehandler = fh log.addHandler(fh)
def main(): parser = ArgumentParser() parser.add_argument( 'directory', help='Your TV directory. Can be any directory containing TV shows.' ) parser.add_argument( 'dest_directory', nargs='?', help='Destination TV directory when renaming/symlinks is enabled. (optional)', default=None ) parser.add_argument( '-u', '--update', dest='update_database', action='store_true', default=appconfig.get('database', 'update', bool), help='Only import episodes which don\'t already exist in database. (Default)' ) parser.add_argument( '-c', '--clear', dest='clear_database', action='store_true', default=appconfig.get('database', 'clear', bool), help='Overwrite existing database (if any) in directory.(overrides --update)' ) parser.add_argument( '-r', '--rename-files', dest='rename_files', action='store_true', default=appconfig.get('importer', 'rename-files', bool), help='Rename and organize TV show files while importing.' ) parser.add_argument( '-s', '--symlinks', dest='symlinks', action='store_true', default=appconfig.get('importer', 'symlinks', bool), help='Create a virtual filesystem of TV dir with symlinks. Requires `dest_directory`.' ) parser.add_argument( '-n', '--naming-scheme', dest='naming_scheme', default=appconfig.get('importer', 'naming-scheme'), help='The naming scheme to use when renaming.' ) parser.add_argument( '--force-rename', dest='force_rename', action='store_true', default=appconfig.get('importer', 'force-rename', bool), help='Allow overwriting of existing files when renaming.' ) parser.add_argument( '-b', '--brute', dest='brute', action='store_true', default=appconfig.get('importer', 'brute', bool), help="Don't do quality comparison for duplicate episodes in database, just replace." ) parser.add_argument( '-e', '--extract-rars', dest='extract_rars', action='store_true', default=appconfig.get('importer', 'unrar', bool), help='Extract episodes which are in rar format before scraping them.'\ +'Rar files will be deleted afterwards.' ) parser.add_argument( #TODO: This no work (cause value is set in __init__) '-l', '--log-file', dest='log_file', help='Path to log file.' ) parser.add_argument( '-v', '--verbosity', dest='log_level', default=appconfig.get('logging', 'level'), help='Set log level.'\ +'Available values (in order of highest -> lowest verbosity: '\ +'DEBUG, INFO, WARNING, ERROR, FATAL' ) parser.add_argument( '--clear-log-file', dest='clear_log_file', action='store_true', default=False, help='Clear any existing log file.' ) args = parser.parse_args() argsd = {} argsd['logging'] = { 'level':args.log_level, 'clear_log_file':args.clear_log_file } argsd['database'] = { 'clear':args.clear_database, 'update':args.update_database } argsd['importer'] = { 'unrar':args.extract_rars, 'delete-rar':args.extract_rars, 'brute':args.brute, 'rename-files':args.rename_files, 'naming-scheme':args.naming_scheme, 'force-rename':args.force_rename, 'symlinks':args.symlinks, } if not args.dest_directory: args.dest_directory = args.directory if args.log_file: argsd['logging']['filename'] = args.log_file appconfig.import_to_runtime_parser(argsd) lfn = appconfig.get('logging', 'filename') if args.clear_log_file: try: os.unlink(lfn) except: pass logger.set_filehandler(lfn) logger.log.setLevel(logging.__getattribute__(args.log_level.upper())) entrypoint.start_importer(args.directory, args.dest_directory)
def init_logging(cls, **keys): """ file_log_name=None # name of log file (defaults to output dir) file_log_lvl='DEBUG' # level to log in file (None to not log to file) console_log_lvl='DEBUG' # level to log to console show_warning=True # show warning for not writing to the file or console. config_file='ABS/FILE/PATH.[ini|json]' # load config details from config file # valid log_lvls: None,DEBUG,INFO,WARNING,ERROR,CRITICAL """ # check config file first! config_file = keys.get('config_file', None) if config_file: # extension? config_ext = os.path.splitext(config_file)[1].lower() if config_ext == '.json': # imports import json from logging.config import dictConfig # open and parse json with open(config_file, 'r') as f: config = json.load(f) dictConfig(config) # load! elif config_ext == '.ini': from logging.config import fileConfig fileConfig(config_file) else: raise ValueError( 'unable to load extension for config file, "{0}"'.format( config_file)) return #exit # set values file_log_name = keys.get('file_log_name', None) file_log_lvl = keys.get('file_log_lvl', 'DEBUG') console_log_lvl = keys.get('console_log_lvl', 'DEBUG') show_warning = keys.get('show_warning', True) # raise error if bad value passed valid_log_lvls = (None, 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL') if file_log_lvl not in valid_log_lvls: raise ValueError( 'bad param passed for file_log_lvl {0!r}'.format(file_log_lvl)) if console_log_lvl not in valid_log_lvls: raise ValueError( 'bad param passed for console_log_lvl {0!r}'.format( console_log_lvl)) # set logging level logging.getLogger().setLevel(logging.DEBUG) # create logging formatter .%(msecs)-3d fmt = '%(asctime)-23s:%(threadName)-10s:%(levelname)-7s:%(name)s.%(funcName)s:%(message)s' log_formatter = logging.Formatter(fmt) # create handlers based on request if file_log_lvl: # --- add file handler --- # set default name if file_log_name is None: file_log_name = 'log.log' # set default directory directory = os.path.dirname(file_log_name) if directory == '': # get_output_fd inside this will return this classes path output dir file_log_name = cls.get_fd(filename=file_log_name, fd='output', call_depth=2) directory = os.path.dirname(file_log_name) # create parent directory(s) if needed if not os.path.exists(directory): os.makedirs(directory) #fhndl = logging.FileHandler(file_log_name)#,mode='w') #to not append for the day from logging.handlers import RotatingFileHandler fhndl = RotatingFileHandler( filename=file_log_name, mode='a', maxBytes=10485760, #10MB backupCount=3) fhndl.setLevel(logging.__getattribute__(file_log_lvl)) #@UndefinedVariable pylint: disable=no-member fhndl.setFormatter(log_formatter) logging.getLogger().addHandler(fhndl) if console_log_lvl: # add console handler #import sys shndl = logging.StreamHandler( sys.stdout) # change the coloring! from stderr to stdout shndl.setFormatter(log_formatter) shndl.setLevel(logging.__getattribute__(console_log_lvl)) #@UndefinedVariable pylint: disable=no-member logging.getLogger().addHandler(shndl) elif show_warning: print('=======================================') print('not showing log in console per request!') print(file_log_name) print('=======================================') logging.warning('=======================================') logging.warning('not showing log in console per request!') logging.warning('=======================================') if not file_log_lvl and show_warning: logging.warning('=======================================') logging.warning('= not saving log to file per request! =') logging.warning('=======================================') '''
import yaml import asyncio import logging with open("./config.yml", "r") as ymlfile: cfg = yaml.load(ymlfile, Loader=yaml.SafeLoader) logcfg = cfg["logging"] logging.basicConfig( format=logcfg["format"], datefmt=logcfg["datefmt"], level=logging.__getattribute__(logcfg["level"].upper()), ) if __name__ == "__main__": from cycler import Cycler cycler = Cycler(cfg) asyncio.run(cycler.cycle_forever())
keys = ['ip'] keys.extend(self.__dict__.keys()) return keys.__iter__() # Set formatter for logging formatter = logging.Formatter("%(asctime)s %(ip)s |%(levelname)s| %(message)s <%(filename)s:%(lineno)d>") # Log identifier/file will be the same as the file being run name = os.path.basename(__main__.__file__).split('.')[0] log = logging.getLogger(name) # Set log level to Debug (TODO: This should be pulled from config file) loglevel = None try: loglevel = Config.get("loglevel") log.setLevel(logging.__getattribute__(loglevel)) except: print "Unable to set loglevel to %s. Defaulting to DEBUG" % loglevel log.setLevel(logging.DEBUG) logfile = Config.get('logfile') # %s/../logs/%s.log' % (os.path.dirname(os.path.realpath(__file__)), name) fh = logging.handlers.TimedRotatingFileHandler(logfile, 'midnight') fh.setLevel(logging.DEBUG) fh.setFormatter(formatter) log.addHandler(fh) # Extend log module with Info class defined above. log = logging.LoggerAdapter(log, Info())
import os import logging from logging import getLogger, StreamHandler, Formatter, FileHandler from slack_log_handler import SlackLogHandler import datetime from config import config config_log = config['log'] LOG_FILE_DIR = None if config_log.get('LOG_DIR'): LOG_FILE_DIR = '{}/log_{}.txt'.format( config_log['LOG_DIR'], datetime.datetime.now().strftime('%Y%m%dT%H%M%S')) LOG_LEVEL_BASE = logging.__getattribute__(config_log['LOG_LEVEL_BASE']) LOG_LEVEL_FILE = logging.__getattribute__(config_log['LOG_LEVEL_FILE']) LOG_LEVEL_SLACK = logging.__getattribute__(config_log['LOG_LEVEL_SLACK']) SLACK_WEBHOOK_URL = config_log.get('SLACK_WEBHOOK_URL') import subprocess subprocess.run("dir={}; [ ! -e $dir ] && mkdir -p $dir".format( config_log['LOG_DIR']), shell=True) HOSTNAME = os.environ.get('HOSTNAME') if os.environ.get( 'HOSTNAME') is not None else os.uname()[1] def get_module_logger(name=None): global loggers if 'loggers' not in globals(): loggers = {}
parser.add_option('--config', dest='config_file', default='conf/channel_server.conf', help='The configuration file to use.') options, args = parser.parse_args() if len(args) > 0: parser.error('Garbage args after command line.') if not os.path.isfile(options.config_file): parser.error('Specified config file %s does not exist!' % options.config_file) config = ConfigParser.ConfigParser() config.read(options.config_file) logger = logging.getLogger('main') handler = logging.StreamHandler() formatter = logging.Formatter( config.get('Logging', 'log_format', raw=True)) handler.setFormatter(formatter) logger.setLevel(logging.__getattribute__( config.get('Logging', 'log_level'))) logger.addHandler(handler) channel_server = ChannelServer(config) if not channel_server.xmpp_connect(): logger.fatal('Could not connect to server, or password mismatch!') sys.exit(1) # Set the signal handlers signal.signal(signal.SIGINT, sigHandler) signal.signal(signal.SIGTERM, sigHandler) channel_server.run()
def main(config_ini): print 'Starting up main' _settings = configparser.SafeConfigParser() with open(config_ini) as fp: _settings.readfp(fp) settings = dict(_settings.items('app:main')) #formatting = dict(_settings.items('formatter_generic')) logger_settings = dict(_settings.items('logger_uploader')) settings.update(parse_asset_settings(settings, dev_ini_path=config_ini)) fh = RotatingFileHandler( logger_settings.get('filename', 'uploader.log'), maxBytes=int(logger_settings.get('maxBytes', 10000)), encoding='utf-8' ) lvl = logger_settings.get('level', 'DEBUG').upper() lvl = logging.__getattribute__(lvl) fh.setLevel(lvl) formatter = logging.Formatter('%(asctime)s %(levelname)-5.5s [%(name)s][%(threadName)s] %(message)s') fh.setFormatter(formatter) log.addHandler(fh) log.setLevel(lvl) url_worker = settings['zmq.worker_socket'] url_client = settings['zmq.socket'] context = zmq.Context(1) # The 'frontend' facing the clients where new jobs are being sent clients = context.socket(zmq.PULL) clients.bind(url_client) # The 'backend' facing the workers where received jobs are being pushed workers = context.socket(zmq.PUSH) workers.bind(url_worker) conn = pymongo.Connection( host=settings['db.mongo.host'], port=int(settings['db.mongo.port']) ) db = conn[settings['db.mongo.collection_name']] cloud = None if not settings['store_locally']: log.info('Initializing cloud connection...') cloud = settings['service'](**settings) cloud.connect() save_path = settings['save_path'] for i in range(int(settings['zmq.workers'])): worker = UploadWorker( name='[worker-thread-{}]'.format(i), imager=ImageProcessor( db=db, image_save_path=settings['save_path'] ), context=context, worker_url=url_worker, db=db, cloud=cloud, save_path=save_path ) worker.start() try: log.info('Starting zmq streamer') log.info('Now waiting for jobs...') zmq.device(zmq.STREAMER, clients, workers) except KeyboardInterrupt: pass # We never get here... but if we do, shut down! log.info('Shutting down...') clients.close() workers.close() context.term()
dest='config_file', default='conf/channel_server.conf', help='The configuration file to use.') options, args = parser.parse_args() if len(args) > 0: parser.error('Garbage args after command line.') if not os.path.isfile(options.config_file): parser.error('Specified config file %s does not exist!' % options.config_file) config = ConfigParser.ConfigParser() config.read(options.config_file) logger = logging.getLogger('main') handler = logging.StreamHandler() formatter = logging.Formatter(config.get('Logging', 'log_format', raw=True)) handler.setFormatter(formatter) logger.setLevel( logging.__getattribute__(config.get('Logging', 'log_level'))) logger.addHandler(handler) channel_server = ChannelServer(config) if not channel_server.xmpp_connect(): logger.fatal('Could not connect to server, or password mismatch!') sys.exit(1) # Set the signal handlers signal.signal(signal.SIGINT, sigHandler) signal.signal(signal.SIGTERM, sigHandler) channel_server.run()
logger = logging.getLogger("Sample") # create console handler with a higher log level ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) if len(sys.argv) > 1: if sys.argv[1] == "log": ch.setFormatter( logging.Formatter( "%(asctime)s : %(levelname)s : %(name)s : %(message)s")) elif sys.argv[1] == "color": ch.setFormatter(CustomFormatter()) if len(sys.argv) > 2: logger.setLevel(logging.__getattribute__(sys.argv[2])) else: logger.setLevel(logging.DEBUG) logger.addHandler(ch) # logger.debug("debug message") # logger.info("info message") # logger.warning("warning message") # logger.error("error message") # logger.critical("critical message") import random import time for _ in range(100):
if len(threads) <= settings.MAXIMUM_CRAWLER_THREADS: logger.debug(u'Creating new thread %d', len(threads)) thread = FetchIt(page) threads.append(thread) thread.start() logger.debug(u'Sleeping for 60 seconds') time.sleep(60) from django.core.management.base import NoArgsCommand from django.conf import settings from django.utils.daemonize import become_daemon logger = logging.getLogger('crawler') if settings.LOG_LEVEL is not None: logger.setLevel(logging.__getattribute__(settings.LOG_LEVEL)) # enable logging to file if requested if settings.LOG_TO_FILE: fh = logging.FileHandler("crawler.log") # This needs to be False to stop the FileHandler outputting to # stdout logger.propagate = False logger.addHandler(fh) else: # enable logging to the stdout ch = logging.StreamHandler() logger.addHandler(ch) else: logger.setLevel(sys.maxint)