def add_handlers(self, logger, handlers): """Add handlers to a logger from a list of names.""" for h in handlers: try: logger.addHandler(self.config['handlers'][h]) except Exception as e: raise ValueError('Unable to add handler %r' % h) from e
def setup_logger(level=logging.INFO): # First obtain a logger logger = logging.getLogger('GoogleScraper') logger.setLevel(level) ch = logging.StreamHandler(stream=sys.stderr) ch.setLevel(level) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) logger.addHandler(ch) # add logger to the modules namespace setattr(sys.modules[__name__], 'logger', logger)
def setup_loggers(logger, *args, **kwargs): """ Additional configuration for Celery The only way to add/change handlers for Celery logging :param logger: root logger of Celery :param args: :param kwargs: :return: """ formatter = logging.Formatter( '[%(asctime)s: %(levelname)s] %(name)s: %(message)s') file_handler = logging.FileHandler('logs/celery.log') file_handler.setFormatter(formatter) logger.addHandler(file_handler)
def _setup_standard_logger(): """A helper to init the standard logger""" logger = logging.getLogger(__package__) # For the meantime we allow only one handler. # This is the simplest way to avoid duplicates. if logger.handlers: return logger # Create a handler with logging.DEBUG as the default logging level, # means - all messages will be processed by the handler ch = logging.StreamHandler() formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s') ch.setFormatter(formatter) ch.setLevel(logging.DEBUG) logger.addHandler(ch) return logger
def initLogger(config): logger = logging.getLogger('cactus') logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) fh = logging.FileHandler('runinfo.log', mode='w') fh.setLevel(logging.DEBUG) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) fh.setFormatter(formatter) logger.addHandler(ch) logger.addHandler(fh)
async def on_ready(): await bot.change_presence(status=discord.Status.online, activity=discord.Game('a!')) #says who its logged in as and gives logs logger = logging.getLogger('discord') logger.setLevel(logging.DEBUG) handler = logging.FileHandler(filename='discord_alcebot.log', encoding='utf-8', mode='w') handler.setFormatter( logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s')) logger.addHandler(handler) print('Logged in as') print(bot.user.name) print(bot.user.id) print('valid token') print('passcode: ' + str(passcode)) print('------')
logging.debug("Test") if __name__ == "__main__": main() # =================================================================== """ rotate log, split log files when size limit is reached """ logLevel = logging.DEBUG lcdLogger = logging.getLogger("lcd") lcdLogger.setLevel(logLevel) formatter = logging.Formatter("%(asctime)s-%(name)s-%(levelname)s-%(pathname)s(%(funcName)s): %(message)s") rfh = logging.handlers.RotatingFileHandler("lcd.log", maxBytes=1024 * 1024, backupCount=2) rfh.setFormatter(formatter) lcdLogger.addHandler(rfh) # ==================================================================== import logger logger = logging.getLogger("stock") hdlr = logging.FileHandler("stock.log") formatter = logging.Formatter("%(asctime)s-%(levelname)s:%(message)s") hdlr.setFormatter(formatter) logger.addHandler(hdlr) logger.setLevel(logging.DEBUG) logget.debug("123 error")
""" Removes old versions of Lambda functions. """ import logging import sys from pathlib import Path file = Path(__file__).resolve() sys.path.append(str(file.parent)) import logger import boto3 # Initialize log logger.logger_init() logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler()) logger.propagate = False try: CLIENT = boto3.client('lambda', region_name='eu-west-1') except Exception as exception: logger.error(str(exception), exc_info=True) # Number of versions to keep KEEP_LAST = 10 def clean_lambda_versions(event, context): """ List all Lambda functions and call the delete_version function. Check if the paginator token exist that's included if more results are available.
log = logging.getLogger(__name__) res = Flask(__name__) res.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db' res.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False api = Api(res) db = SQLAlchemy(res) parser = reqparse.RequestParser() logger = logging.getLogger() logger.setLevel(logging.INFO) handler = logging.FileHandler( '/home/stajyer/.virtualenvs/rest/flaskk/deneme.log') logger.addHandler(handler) class User(db.Model): id = db.Column(db.Integer, primary_key=True, autoincrement=True) username = db.Column(db.String(50), unique=True, nullable=False) email = db.Column(db.String(60), unique=True, nullable=False) password = db.Column(db.Integer, unique=True, nullable=False) def __repr__(self): return '<user %r>' % self.username def save(self): db.session.add(self) db.session.commit()
def _install_loggers(cp, handlers, disable_existing): """Create and install loggers""" # configure the root first llist = cp["loggers"]["keys"] llist = llist.split(",") llist = list(_strip_spaces(llist)) llist.remove("root") section = cp["logger_root"] root = logger.root log = root if "level" in section: level = section["level"] log.setLevel(level) for h in root.handlers[:]: root.removeHandler(h) hlist = section["handlers"] if len(hlist): hlist = hlist.split(",") hlist = _strip_spaces(hlist) for hand in hlist: log.addHandler(handlers[hand]) #and now the others... #we don't want to lose the existing loggers, #since other threads may have pointers to them. #existing is set to contain all existing loggers, #and as we go through the new configuration we #remove any which are configured. At the end, #what's left in existing is the set of loggers #which were in the previous configuration but #which are not in the new configuration. existing = list(root.manager.loggerDict.keys()) #The list needs to be sorted so that we can #avoid disabling child loggers of explicitly #named loggers. With a sorted list it is easier #to find the child loggers. existing.sort() #We'll keep the list of existing loggers #which are children of named loggers here... child_loggers = [] #now set up the new ones... for log in llist: section = cp["logger_%s" % log] qn = section["qualname"] propagate = section.getint("propagate", fallback=1) logger = logger.getLogger(qn) if qn in existing: i = existing.index(qn) + 1 # start with the entry after qn prefixed = qn + "." pflen = len(prefixed) num_existing = len(existing) while i < num_existing: if existing[i][:pflen] == prefixed: child_loggers.append(existing[i]) i += 1 existing.remove(qn) if "level" in section: level = section["level"] logger.setLevel(level) for h in logger.handlers[:]: logger.removeHandler(h) logger.propagate = propagate logger.disabled = 0 hlist = section["handlers"] if len(hlist): hlist = hlist.split(",") hlist = _strip_spaces(hlist) for hand in hlist: logger.addHandler(handlers[hand]) #Disable any old loggers. There's no point deleting #them as other threads may continue to hold references #and by disabling them, you stop them doing any logger. #However, don't disable children of named loggers, as that's #probably not what was intended by the user. #for log in existing: # logger = root.manager.loggerDict[log] # if log in child_loggers: # logger.level = logger.NOTSET # logger.handlers = [] # logger.propagate = 1 # elif disable_existing_loggers: # logger.disabled = 1 _handle_existing_loggers(existing, child_loggers, disable_existing)
#(1)要记录所有级别的日志,因此日志器的有效level需要设置为最低级别 --DEBUG #(2)日志需要被发送到两个不同的目的地,因此需要为日志设置两个handler;另外。两个目的地都是磁盘文件。因此这两个handler都是与fileHander #(3)all.log要求按照时间进行日志切割,因此它需要logging.handler.TimeRotatingFileHandler;而error.log没有要求日志切割。因此 #(4)两个日志文件的格式不同,因此需要对两个handler分别进行设置格式器 import logger import logging.handlers import datetime #定义Logger logger = logging.getLogger("mylogger") logging.setLevel(logging.DEBUG) rf_handler = logging.handlers.TimedRotatingFileHandler("all.log",when="midnight",interval=1,backupCount=7,atTime=None) rf_handler.setFormatter(logging.Formatter("%(asctime)s-%(levelname)s-%(message)s")) f_handler = logging.FileHandler("error.log") f_handler = setLevel(logging.ERROR) f_handler.setFormat(logging.Formatter("%(asctime)s-%(levelname)s-%(filename)s[:%(lineno)d-%(message)s)") #把相应的处理器组装到logger上 logger.addHandler(rf_handler) logger.addHandler(f_handler) logger.debug("debug message") logger.info("info message") logger.warning("warning message") logger.error("error message") logger.critical("critical message")
parser.add_argument( 'file_name', help=" minimum number of cells that overlaps between two tripBCs", ) args = parser.parse_args() tBC_lists = get_clone_info_fast(args.trio) final_network = merge_network(tBC_lists) with open(arg.file_name + '.pkl', 'wb') as f: pickle.dump(final_network, f) if __name__ == "__main__": logger = logging.getLogger('network_analysis_log') logger.setLevel(logging.DEBUG) # create console handler and set level to debug ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) # create formatter formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') # add formatter to ch ch.setFormatter(formatter) # add ch to logger logger.addHandler(ch) main()
'''Program to explore logging module in python''' import logging import requests import logger logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) file_handler = logging.FileHandler("logger.log") formatter = logging.Formatter("%(asctime)s:%(name)s:%(message)s:%(levelname)s") file_handler.setFormatter(formatter) logger.addHandler(file_handler) # logging.basicConfig(filename="logger.log", level=logging.DEBUG, # format="%(filename)s:%(levelname)s:%(message)s") try: r = requests.get("http://httpbin.org/basic-auth/user/pass", auth=("user", "pass")) logger.info(r.url) except Exception as e: logger.error(e)