def init(level, sentry_dsn, colored=False): root = logging.getLogger() root.setLevel(0) hdlr = logging.FileHandler('client_log.txt', encoding='utf-8') hdlr.setLevel(logging.INFO) root.addHandler(hdlr) hdlr = SentryHandler(raven.Client(sentry_dsn, transport=GeventedHTTPTransport)) hdlr.setLevel(logging.ERROR) root.addHandler(hdlr) hdlr = logging.StreamHandler(sys.stdout) hdlr.setLevel(getattr(logging, level)) if colored: from colorlog import ColoredFormatter formatter = ColoredFormatter( "%(log_color)s%(message)s%(reset)s", log_colors={ 'CRITICAL': 'bold_red', 'ERROR': 'red', 'WARNING': 'yellow', 'INFO': 'green', 'DEBUG': 'blue', } ) hdlr.setFormatter(formatter) root.addHandler(hdlr) root.info(datetime.datetime.now().strftime("%Y-%m-%d %H:%M")) root.info('==============================================')
def set_logger(cls, filename, to_sentry=False): logger = logging.getLogger() logger.setLevel(getattr(logging, config['logging']['level'].upper())) logger.handlers = [] format_ = logging.Formatter( '[%(levelname)s %(asctime)s.%(msecs)d %(module)s:%(lineno)d]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S' ) if config['logging']['path']: channel = logging.handlers.RotatingFileHandler( filename=os.path.join(config['logging']['path'], filename), maxBytes=config['logging']['max_size'], backupCount=config['logging']['num_backups'] ) channel.setFormatter(format_) logger.addHandler(channel) else:# send to console instead of file channel = logging.StreamHandler() channel.setFormatter(format_) logger.addHandler(channel) if to_sentry and config['sentry_dsn']: handler = SentryHandler( config['sentry_dsn'] ) handler.setLevel('ERROR') logger.addHandler(handler)
def on_inst_register_clicked(self, *args, **kwargs): '''send the registration data as sentry info log message ''' # create the handler first from raven import Client from raven.handlers.logging import SentryHandler sentry_client = Client('https://*****:*****@' 'app.getsentry.com/45704') handler = SentryHandler(sentry_client) handler.setLevel(logging.INFO) # the registration logger gets the above handler registrations = logging.getLogger('bauble.registrations') registrations.setLevel(logging.INFO) registrations.addHandler(handler) # produce the log record registrations.info([(key, getattr(self.model, key)) for key in self.widget_to_field_map.values()]) # remove the handler after usage registrations.removeHandler(handler) # disable button, so user will not send registration twice self.view.widget_set_sensitive('inst_register', False)
def sentry_logger(sender, logger, loglevel, logfile, format, colorize, **kw): filter_ = CeleryFilter() handler = SentryHandler(client) handler.setLevel(loglevel) handler.addFilter(filter_) logger.addHandler(handler)
def start(self): # remove all handlers self.handlers = [] # sentry log handler sentry_client = raven.Client('https://*****:*****@sentry.sickrage.ca/4?verify_ssl=0', release=sickrage.version(), repos={'sickrage': {'name': 'sickrage/sickrage'}}) sentry_handler = SentryHandler(client=sentry_client, tags={'platform': platform.platform()}) sentry_handler.setLevel(self.logLevels['ERROR']) sentry_handler.set_name('sentry') self.addHandler(sentry_handler) # console log handler if self.consoleLogging: console_handler = logging.StreamHandler() formatter = logging.Formatter('%(asctime)s %(levelname)s::%(threadName)s::%(message)s', '%H:%M:%S') console_handler.setFormatter(formatter) console_handler.setLevel(self.logLevels['INFO'] if not self.debugLogging else self.logLevels['DEBUG']) self.addHandler(console_handler) # file log handlers if self.logFile: # make logs folder if it doesn't exist if not os.path.exists(os.path.dirname(self.logFile)): if not makeDir(os.path.dirname(self.logFile)): return if sickrage.app.developer: rfh = FileHandler( filename=self.logFile, ) else: rfh = RotatingFileHandler( filename=self.logFile, maxBytes=self.logSize, backupCount=self.logNr ) rfh_errors = RotatingFileHandler( filename=self.logFile.replace('.log', '.error.log'), maxBytes=self.logSize, backupCount=self.logNr ) formatter = logging.Formatter('%(asctime)s %(levelname)s::%(threadName)s::%(message)s', '%Y-%m-%d %H:%M:%S') rfh.setFormatter(formatter) rfh.setLevel(self.logLevels['INFO'] if not self.debugLogging else self.logLevels['DEBUG']) self.addHandler(rfh) rfh_errors.setFormatter(formatter) rfh_errors.setLevel(self.logLevels['ERROR']) self.addHandler(rfh_errors)
def process_logger_event(sender, logger, loglevel, logfile, format, colorize, **kw): import logging logger = logging.getLogger() handler = SentryHandler(client) if handler.__class__ in list(map(type, logger.handlers)): return False handler.setLevel(logging.ERROR) handler.addFilter(CeleryFilter()) logger.addHandler(handler)
def create_app(load_admin=True): from redash import extensions, handlers from redash.handlers.webpack import configure_webpack from redash.handlers import chrome_logger from redash.admin import init_admin from redash.models import db from redash.authentication import setup_authentication from redash.metrics.request import provision_app app = Flask(__name__, template_folder=settings.STATIC_ASSETS_PATH, static_folder=settings.STATIC_ASSETS_PATH, static_path='/static') # Make sure we get the right referral address even behind proxies like nginx. app.wsgi_app = ProxyFix(app.wsgi_app, settings.PROXIES_COUNT) #app.wsgi_app = ReverseProxied(app.wsgi_app) app.url_map.converters['org_slug'] = SlugConverter if settings.ENFORCE_HTTPS: SSLify(app, skips=['ping']) if settings.SENTRY_DSN: from raven import Client from raven.contrib.flask import Sentry from raven.handlers.logging import SentryHandler client = Client(settings.SENTRY_DSN, release=__version__, install_logging_hook=False) sentry = Sentry(app, client=client) sentry.client.release = __version__ sentry_handler = SentryHandler(client=client) sentry_handler.setLevel(logging.ERROR) logging.getLogger().addHandler(sentry_handler) # configure our database app.config['SQLALCHEMY_DATABASE_URI'] = settings.SQLALCHEMY_DATABASE_URI app.config.update(settings.all_settings()) provision_app(app) db.init_app(app) migrate.init_app(app, db) if load_admin: init_admin(app) mail.init_app(app) setup_authentication(app) limiter.init_app(app) handlers.init_app(app) configure_webpack(app) extensions.init_extensions(app) chrome_logger.init_app(app) return app
def add_sentry_handler_to_celery_task_logger(client, sentry_handler_log_level): handler = SentryHandler(client) handler.setLevel(sentry_handler_log_level) def process_task_logger_event(sender, logger, loglevel, logfile, format, colorize, **kw): for h in logger.handlers: if type(h) == SentryHandler: return False logger.addHandler(handler) after_setup_task_logger.connect(process_task_logger_event, weak=False)
def setup_raven(): '''we setup sentry to get all stuff from our logs''' pcfg = AppBuilder.get_pcfg() from raven.handlers.logging import SentryHandler from raven import Client from raven.conf import setup_logging client = Client(pcfg['raven_dsn']) handler = SentryHandler(client) # TODO VERIFY THIS -> This is the way to do it if you have a paid account, each log call is an event so this isn't going to work for free accounts... handler.setLevel(pcfg["raven_loglevel"]) setup_logging(handler) return client
def setup_logger(): """Set up our logger with sentry support. Args: None Returns: None Raises: None """ myLogger = logging.getLogger('osm-reporter') myLogger.setLevel(logging.DEBUG) myDefaultHanderLevel = logging.DEBUG # create formatter that will be added to the handlers myFormatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') myTempDir = '/tmp' # so e.g. jenkins can override log dir. if 'OSM_REPORTER_LOGFILE' in os.environ: myFilename = os.environ['OSM_REPORTER_LOGFILE'] else: myFilename = os.path.join(myTempDir, 'reporter.log') myFileHandler = logging.FileHandler(myFilename) myFileHandler.setLevel(myDefaultHanderLevel) # create console handler with a higher log level myConsoleHandler = logging.StreamHandler() myConsoleHandler.setLevel(logging.ERROR) try: #pylint: disable=F0401 from raven.handlers.logging import SentryHandler # noinspection PyUnresolvedReferences from raven import Client #pylint: enable=F0401 myClient = Client( 'http://*****:*****@sentry.linfiniti.com/6') mySentryHandler = SentryHandler(myClient) mySentryHandler.setFormatter(myFormatter) mySentryHandler.setLevel(logging.ERROR) add_handler_once(myLogger, mySentryHandler) myLogger.debug('Sentry logging enabled') except ImportError: myLogger.debug('Sentry logging disabled. Try pip install raven') #Set formatters myFileHandler.setFormatter(myFormatter) myConsoleHandler.setFormatter(myFormatter) # add the handlers to the logger add_handler_once(myLogger, myFileHandler) add_handler_once(myLogger, myConsoleHandler)
def init_raven_client(dsn): global _sentry _sentry = raven.Client( dsn=dsn, transport=raven.transport.threaded_requests.ThreadedRequestsHTTPTransport, ignore_exceptions={'KeyboardInterrupt'}, logging=True, ) sentry_errors_logger = logging.getLogger("sentry.errors") sentry_errors_logger.addHandler(logging.StreamHandler()) handler = SentryHandler(_sentry) handler.setLevel(logging.ERROR) setup_logging(handler)
def init_unity(level, sentry_dsn): root = logging.getLogger() root.setLevel(0) hdlr = SentryHandler(raven.Client(sentry_dsn, transport=GeventedHTTPTransport)) hdlr.setLevel(logging.ERROR) root.addHandler(hdlr) hdlr = UnityLogHandler() hdlr.setLevel(getattr(logging, level)) root.addHandler(hdlr) root.info(datetime.datetime.now().strftime("%Y-%m-%d %H:%M")) root.info('==============================================')
def setup(dsn, level, propagate_sentry_errors=False): client = make_client(dsn, False) from raven.handlers.logging import SentryHandler global handler handler = SentryHandler(client) handler.setLevel(level) handler.dsn = dsn from raven.conf import setup_logging kwargs = {} if propagate_sentry_errors: kwargs["exclude"] = [] setup_logging(handler, **kwargs)
def check_args(self, args): # check allow_other and allow_root if args.allow_other: args.allow_root = False else: args.allow_root = True # check log_level if args.debug: args.log_level = 'debug' # setup logging if args.log != "syslog": if args.log in ('-', '/dev/stdout'): handler = StreamHandler(sys.stdout) else: handler = TimedRotatingFileHandler(args.log, when="midnight") handler.setFormatter(Formatter(fmt='%(asctime)s %(threadName)s: ' '%(message)s', datefmt='%B-%d-%Y %H:%M:%S')) else: if sys.platform == 'darwin': handler = SysLogHandler(address="/var/run/syslog") else: handler = SysLogHandler(address="/dev/log") logger_fmt = 'GitFS on {mount_point} [%(process)d]: %(threadName)s: '\ '%(message)s'.format(mount_point=args.mount_point) handler.setFormatter(Formatter(fmt=logger_fmt)) if args.sentry_dsn != '': from raven.conf import setup_logging from raven.handlers.logging import SentryHandler sentry_handler = SentryHandler(args.sentry_dsn) sentry_handler.setLevel("ERROR") setup_logging(sentry_handler) log.addHandler(sentry_handler) handler.setLevel(args.log_level.upper()) log.setLevel(args.log_level.upper()) log.addHandler(handler) # set cache size lru_cache.maxsize = args.cache_size # return absolute repository's path args.repo_path = os.path.abspath(args.repo_path) return args
def setup_logging(): logger = app.logger file_handler = logging.FileHandler(CONF.get("logging", "file")) file_handler.setFormatter(logging.Formatter( '%(asctime)s %(levelname)s %(message)s')) log_level = getattr(logging, CONF.get("logging", "level")) logger.setLevel(log_level) logger.addHandler(file_handler) raven_dsn = CONF.get("logging", "sentry") if raven_dsn: raven_log_level = getattr(logging, CONF.get("logging", "sentry_level")) sentry_handler = SentryHandler(raven_dsn) sentry_handler.setLevel(raven_log_level) logger.addHandler(sentry_handler)
def setupLogger(): """Set up our logger. Args: None Returns: None Raises: None """ myLogger = logging.getLogger('osm-reporter') myLogger.setLevel(logging.DEBUG) myDefaultHanderLevel = logging.DEBUG # create formatter that will be added to the handlers myFormatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') myTempDir = ('/tmp') myFilename = os.path.join(myTempDir, 'reporter.log') myFileHandler = logging.FileHandler(myFilename) myFileHandler.setLevel(myDefaultHanderLevel) # create console handler with a higher log level myConsoleHandler = logging.StreamHandler() myConsoleHandler.setLevel(logging.ERROR) try: #pylint: disable=F0401 from raven.handlers.logging import SentryHandler from raven import Client #pylint: enable=F0401 myClient = Client('http://*****:*****@sentry.linfiniti.com/6') mySentryHandler = SentryHandler(myClient) mySentryHandler.setFormatter(myFormatter) mySentryHandler.setLevel(logging.ERROR) addLoggingHanderOnce(myLogger, mySentryHandler) myLogger.debug('Sentry logging enabled') except: myLogger.debug('Sentry logging disabled. Try pip install raven') #Set formatters myFileHandler.setFormatter(myFormatter) myConsoleHandler.setFormatter(myFormatter) # add the handlers to the logger addLoggingHanderOnce(myLogger, myFileHandler) addLoggingHanderOnce(myLogger, myConsoleHandler)
def _child(self, control, data): '''Main function for child process.''' # Close supervisor log, open child log baselog = logging.getLogger() baselog.removeHandler(self._logfile_handler) del self._logfile_handler now = datetime.now().strftime(SEARCH_LOG_DATE_FORMAT) logname = SEARCH_LOG_FORMAT % (now, os.getpid()) logpath = os.path.join(self.config.logdir, logname) handler = logging.FileHandler(logpath) handler.setFormatter(_TimestampedLogFormatter()) baselog.addHandler(handler) if self.config.sentry_dsn: sentry_handler = SentryHandler(self.config.sentry_dsn) sentry_handler.setLevel(logging.ERROR) setup_logging(sentry_handler) # Okay, now we have logging search = None try: try: # Close listening socket and half-open connections self._listener.shutdown() # Log startup of child _log.info('Starting search %s, pid %d', opendiamond.__version__, os.getpid()) _log.info('Peer: %s', control.getpeername()[0]) _log.info('Worker threads: %d', self.config.threads) # Set up connection wrappers and search object control = RPCConnection(control) search = Search(self.config, RPCConnection(data)) # Dispatch RPCs on the control connection until we die while True: control.dispatch(search) finally: # Ensure that further signals (particularly SIGUSR1 from # worker threads) don't interfere with the shutdown process. self._ignore_signals = True except ConnectionFailure: # Client closed connection _log.info('Client closed connection') except _Signalled, s: # Worker threads raise SIGUSR1 when they've encountered a # fatal error if s.signal != signal.SIGUSR1: _log.info('Search exiting on %s', s.signame)
def setup_logger(): """Set up our logger with sentry support. """ logger = logging.getLogger('user_map') logger.setLevel(logging.DEBUG) handler_level = logging.DEBUG # create formatter that will be added to the handlers formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') temp_dir = '/tmp' # so e.g. jenkins can override log dir. if 'USER_MAP_LOGFILE' in os.environ: file_name = os.environ['USER_MAP_LOGFILE'] else: file_name = os.path.join(temp_dir, 'user-map.log') file_handler = logging.FileHandler(file_name) file_handler.setLevel(handler_level) # create console handler with a higher log level console_handler = logging.StreamHandler() console_handler.setLevel(logging.ERROR) try: #pylint: disable=F0401 from raven.handlers.logging import SentryHandler # noinspection PyUnresolvedReferences from raven import Client #pylint: enable=F0401 #client = Client( # 'http://*****:*****@sentry.linfiniti.com/6') sentry_handler = SentryHandler(client) sentry_handler.setFormatter(formatter) sentry_handler.setLevel(logging.ERROR) add_handler_once(logger, sentry_handler) logger.debug('Sentry logging enabled') except ImportError: logger.debug('Sentry logging disabled. Try pip install raven') #Set formatters file_handler.setFormatter(formatter) console_handler.setFormatter(formatter) # add the handlers to the logger add_handler_once(logger, file_handler) add_handler_once(logger, console_handler)
def init_logging(): root = logging.getLogger() root.setLevel(0) if State.config['sentry']: patch_gevent_hub_print_exception() hdlr = SentryHandler(raven.Client(State.config['sentry'], transport=GeventedHTTPTransport)) hdlr.setLevel(logging.ERROR) root.addHandler(hdlr) hdlr = logging.StreamHandler(sys.stdout) hdlr.setLevel(getattr(logging, logging.DEBUG)) root.addHandler(hdlr) root.info(datetime.datetime.now().strftime("%Y-%m-%d %H:%M")) root.info('==============================================')
def create_app(): from redash import handlers from redash.admin import init_admin from redash.models import db from redash.authentication import setup_authentication from redash.metrics.request import provision_app app = Flask( __name__, template_folder=settings.STATIC_ASSETS_PATHS[-1], static_folder=settings.STATIC_ASSETS_PATHS[-1], static_path="/static", ) # Make sure we get the right referral address even behind proxies like nginx. app.wsgi_app = ProxyFix(app.wsgi_app, settings.PROXIES_COUNT) app.url_map.converters["org_slug"] = SlugConverter if settings.ENFORCE_HTTPS: SSLify(app, skips=["ping"]) if settings.SENTRY_DSN: from raven.contrib.flask import Sentry from raven.handlers.logging import SentryHandler sentry = Sentry(app, dsn=settings.SENTRY_DSN) sentry.client.release = __version__ sentry_handler = SentryHandler(settings.SENTRY_DSN) sentry_handler.setLevel(logging.ERROR) logging.getLogger().addHandler(sentry_handler) # configure our database settings.DATABASE_CONFIG.update({"threadlocals": True}) app.config["DATABASE"] = settings.DATABASE_CONFIG app.config.update(settings.all_settings()) provision_app(app) init_admin(app) db.init_app(app) mail.init_app(app) setup_authentication(app) handlers.init_app(app) return app
def _configure_logging(self, config): """Configure loggers here, to hook sentry handler""" import logging from raven.handlers.logging import SentryHandler handler = SentryHandler(config.get('sentry.dsn')) handler.setLevel(logging.NOTSET) loggers = ['', 'ckan', 'ckanext', 'sentry.errors'] for name in loggers: logger = logging.getLogger(name) logger.addHandler(handler) logger.setLevel(logging.INFO) ckan_logger = logging.getLogger('ckan') ckan_logger.info("Sentry configured for this ckan instance (INFO)") ckan_logger.warning("Sentry configured for this ckan instance (WARN)")
def configure_logging(logger="respa_exchange", level=logging.INFO, handler=None): logger = logging.getLogger(logger) logger.setLevel(level) if not handler: handler = logging.StreamHandler(stream=sys.stdout) handler.setFormatter(logging.Formatter( fmt="%(asctime)s - %(name)s - %(levelname)s: %(message)s", datefmt=logging.Formatter.default_time_format )) logger.addHandler(handler) if hasattr(settings, 'RAVEN_CONFIG') and 'dsn' in settings.RAVEN_CONFIG: from raven.handlers.logging import SentryHandler from raven.conf import setup_logging sentry_handler = SentryHandler(settings.RAVEN_CONFIG['dsn']) sentry_handler.setLevel(logging.ERROR) logger.addHandler(sentry_handler) setup_logging(sentry_handler)
def _configure_logging(self, config): """ Configure the Sentry log handler to the specified level Based on @rshk work on https://github.com/opendatatrentino/ckanext-sentry """ handler = SentryHandler(config.get("sentry.dsn")) handler.setLevel(logging.NOTSET) loggers = ["", "ckan", "ckanext", "sentry.errors"] sentry_log_level = config.get("sentry.log_level", logging.INFO) for name in loggers: logger = logging.getLogger(name) logger.addHandler(handler) logger.setLevel(sentry_log_level) log.debug("Setting up Sentry logger with level {0}".format(sentry_log_level))
def configure_logging( config ): """ Allow some basic logging configuration to be read from the cherrpy config. """ # PasteScript will have already configured the logger if the appropriate # sections were found in the config file, so we do nothing if the # config has a loggers section, otherwise we do some simple setup # using the 'log_*' values from the config. if config.global_conf_parser.has_section( "loggers" ): return format = config.get( "log_format", "%(name)s %(levelname)s %(asctime)s %(message)s" ) level = logging._levelNames[ config.get( "log_level", "DEBUG" ) ] destination = config.get( "log_destination", "stdout" ) log.info( "Logging at '%s' level to '%s'" % ( level, destination ) ) # Get root logger root = logging.getLogger() # Set level root.setLevel( level ) # Turn down paste httpserver logging if level <= logging.DEBUG: logging.getLogger( "paste.httpserver.ThreadPool" ).setLevel( logging.WARN ) # Remove old handlers for h in root.handlers[:]: root.removeHandler(h) # Create handler if destination == "stdout": handler = logging.StreamHandler( sys.stdout ) else: handler = logging.FileHandler( destination ) # Create formatter formatter = logging.Formatter( format ) # Hook everything up handler.setFormatter( formatter ) root.addHandler( handler ) # If sentry is configured, also log to it if config.sentry_dsn: eggs.require( "raven" ) from raven.handlers.logging import SentryHandler sentry_handler = SentryHandler( config.sentry_dsn ) sentry_handler.setLevel( logging.WARN ) root.addHandler( sentry_handler )
def initialize(fmt, level_console, level_file, file_path, level_syslog, syslog_host, syslog_port, level_sentry, sentry_DSN): level_console = Log.__level(level_console) level_file = Log.__level(level_file) level_syslog = Log.__level(level_syslog) level_sentry = Log.__level(level_sentry) formatter = logging.Formatter(fmt) root = logging.getLogger('') root.setLevel(logging.ERROR) if level_console != logging.NOTSET: console = logging.StreamHandler() console.setLevel(level_console) console.setFormatter(formatter) if not Log.__logging_handler_console_disabled: root.addHandler(console) Log.__logging_handler_console = console if level_file != logging.NOTSET: file_path_keys = { # values to use for substition within the file_path 'program': Log.getServerName().lower(), 'version': Log.getVersionName(), } Log.__file_path = 'a.log' Log.__file_level = level_file file_output = _LogRotateFileHandler(Log.__file_path) file_output.setLevel(Log.__file_level) file_output.setFormatter(formatter) root.addHandler(file_output) if level_syslog != logging.NOTSET: syslog = SysLogHandler((syslog_host, syslog_port)) syslog.setLevel(level_syslog) syslog.setFormatter(formatter) root.addHandler(syslog) if level_sentry != logging.NOTSET: from raven.handlers.logging import SentryHandler from raven.conf import setup_logging sentry = SentryHandler(sentry_DSN) sentry.setLevel(level_sentry) setup_logging(sentry)
def register_logger_signal(client, logger=None, loglevel=logging.ERROR): filter_ = CeleryFilter() handler = SentryHandler(client) handler.setLevel(loglevel) handler.addFilter(filter_) def process_logger_event(sender, logger, loglevel, logfile, format, colorize, **kw): # Attempt to find an existing SentryHandler, and if it exists ensure # that the CeleryFilter is installed. # If one is found, we do not attempt to install another one. for h in logger.handlers: if type(h) == SentryHandler: h.addFilter(filter_) return False logger.addHandler(handler) after_setup_logger.connect(process_logger_event, weak=False)
def enable_sentry_handler(sentry_dsn, logger_name=None, logger_level=logging.WARN, auto_log_stacks=True, capture_locals=True, tags=None): from raven.handlers.logging import SentryHandler from raven import Client sentry_conf = conf.SENTRY_CONF if logger_level is None: logger_level = sentry_conf['logger_level'] if auto_log_stacks is None: auto_log_stacks = sentry_conf['auto_log_stacks'] client = Client(sentry_dsn, auto_log_stacks=auto_log_stacks, capture_locals=capture_locals) sentry_handler = SentryHandler(client, tags=tags) sentry_handler.setLevel(logger_level) logger = logging.getLogger(logger_name) logger.addHandler(sentry_handler) return sentry_handler
def init_logging(level='INFO'): root = logging.getLogger() root.setLevel(0) patch_gevent_hub_print_exception() if State.config.get('sentry'): hdlr = SentryHandler(raven.Client(State.config['sentry'], transport=GeventedHTTPTransport)) hdlr.setLevel(logging.ERROR) root.addHandler(hdlr) fmter = ServerLogFormatter() hdlr = logging.StreamHandler(sys.stdout) hdlr.setLevel(getattr(logging, level)) hdlr.setFormatter(fmter) root.addHandler(hdlr) root.info(datetime.datetime.now().strftime("%Y-%m-%d %H:%M")) root.info('==============================================')
def init_server(level, sentry_dsn, release, logfile, with_gr_name=True): patch_gevent_hub_print_exception() root = logging.getLogger() root.setLevel(level) fmter = ServerLogFormatter(with_gr_name=with_gr_name) std = logging.StreamHandler(stream=sys.stdout) std.setFormatter(fmter) root.addHandler(std) hdlr = SentryHandler(raven.Client(sentry_dsn, transport=GeventedHTTPTransport, release=release)) hdlr.setLevel(logging.ERROR) root.addHandler(hdlr) logging.getLogger('sentry.errors').setLevel(1000) if logfile: from logging.handlers import WatchedFileHandler filehdlr = WatchedFileHandler(logfile) filehdlr.setFormatter(fmter) root.addHandler(filehdlr)
def setup_logging(log_name, debug=False, filepath=None, sentry_dsn=None, file_lvl="INFO", sentry_lvl="WARN"): """Setup logging according to the specified options. Return the Logger object. """ fmt = logging.Formatter( '%(asctime)s [%(process)d] %(levelname)s %(message)s' ) stream_handler = logging.StreamHandler() if debug: stream_log_level = logging.DEBUG file_log_level = logging.DEBUG else: stream_log_level = logging.CRITICAL file_log_level = getattr(logging, file_lvl) logger = logging.getLogger(log_name) logger.setLevel(file_log_level) stream_handler.setLevel(stream_log_level) stream_handler.setFormatter(fmt) logger.addHandler(stream_handler) if filepath: file_handler = logging.FileHandler(filepath) file_handler.setLevel(file_log_level) file_handler.setFormatter(fmt) logger.addHandler(file_handler) if sentry_dsn and _HAS_RAVEN: sentry_level = getattr(logging, sentry_lvl) sentry_handler = SentryHandler(sentry_dsn) sentry_handler.setLevel(sentry_level) logger.addHandler(sentry_handler) return logger
def register_logger_signal(client, logger=None): filter_ = CeleryFilter() if logger is None: logger = logging.getLogger() handler = SentryHandler(client) handler.setLevel(logging.ERROR) handler.addFilter(filter_) def process_logger_event(sender, logger, loglevel, logfile, format, colorize, **kw): # Attempt to find an existing SentryHandler, and if it exists ensure # that the CeleryFilter is installed. # If one is found, we do not attempt to install another one. for h in logger.handlers: if type(h) == SentryHandler: if not any(type(f) == CeleryFilter for f in h.filters): h.addFilter(filter_) return False logger.addHandler(handler) after_setup_logger.connect(process_logger_event, weak=False)
def _install_sentry(): if "AVALON_SENTRY" not in Session: return try: from raven import Client from raven.handlers.logging import SentryHandler from raven.conf import setup_logging except ImportError: # Note: There was a Sentry address in this Session return log.warning("Sentry disabled, raven not installed") client = Client(Session["AVALON_SENTRY"]) # Transmit log messages to Sentry handler = SentryHandler(client) handler.setLevel(logging.WARNING) setup_logging(handler) self._sentry_client = client self._sentry_logging_handler = handler log.info("Connected to Sentry @ %s" % Session["AVALON_SENTRY"])
def filter(self, record): record.hostname = ContextFilter.hostname return True stream_handler = logging.StreamHandler() stream_handler.setLevel(setting['logging']['term_level']) stream_handler.addFilter(ContextFilter()) stream_handler.setFormatter( logging.Formatter('%(asctime)s - %(levelname)-10s - %(hostname)s - [in %(pathname)s:%(lineno)d]: - %(message)s')) logger.addHandler(stream_handler) sentry_url = setting.get('sentry_url') if sentry_url: handler = SentryHandler(sentry_url) handler.setLevel(setting['logging']['sentry_level']) setup_logging(handler) # MONGO ########### mongo_host, mongo_port = setting['mongodb']['host'], setting['mongodb']['port'] mongo_lock = threading.Lock() self_params = setting.get('self') if not self_params: raise Exception('Setting error. Self params not found') class MongoException(Exception): pass
def setup_handlers(): """ sets up the sentry handler """ if not __opts__.get("sentry_handler"): log.debug("'sentry_handler' config is empty or not defined") return False # Regenerating dunders can be expensive, so only do it if the user enables # `sentry_handler` as checked above __grains__ = salt.loader.grains(__opts__) __salt__ = salt.loader.minion_mods(__opts__) options = {} dsn = get_config_value("dsn") if dsn is not None: try: # support raven ver 5.5.0 from raven.transport import TransportRegistry, default_transports from raven.utils.urlparse import urlparse transport_registry = TransportRegistry(default_transports) url = urlparse(dsn) if not transport_registry.supported_scheme(url.scheme): raise ValueError("Unsupported Sentry DSN scheme: {}".format( url.scheme)) except ValueError as exc: log.info( "Raven failed to parse the configuration provided DSN: %s", exc) if not dsn: for key in ("project", "servers", "public_key", "secret_key"): config_value = get_config_value(key) if config_value is None and key not in options: log.debug( "The required 'sentry_handler' configuration key, " "'%s', is not properly configured. Not configuring " "the sentry logging handler.", key, ) return elif config_value is None: continue options[key] = config_value # site: An optional, arbitrary string to identify this client installation. options.update({ # site: An optional, arbitrary string to identify this client # installation "site": get_config_value("site"), # name: This will override the server_name value for this installation. # Defaults to socket.gethostname() "name": get_config_value("name"), # exclude_paths: Extending this allow you to ignore module prefixes # when sentry attempts to discover which function an error comes from "exclude_paths": get_config_value("exclude_paths", ()), # include_paths: For example, in Django this defaults to your list of # INSTALLED_APPS, and is used for drilling down where an exception is # located "include_paths": get_config_value("include_paths", ()), # list_max_length: The maximum number of items a list-like container # should store. "list_max_length": get_config_value("list_max_length"), # string_max_length: The maximum characters of a string that should be # stored. "string_max_length": get_config_value("string_max_length"), # auto_log_stacks: Should Raven automatically log frame stacks # (including locals) all calls as it would for exceptions. "auto_log_stacks": get_config_value("auto_log_stacks"), # timeout: If supported, the timeout value for sending messages to # remote. "timeout": get_config_value("timeout", 1), # processors: A list of processors to apply to events before sending # them to the Sentry server. Useful for sending additional global state # data or sanitizing data that you want to keep off of the server. "processors": get_config_value("processors"), # dsn: Ensure the DSN is passed into the client "dsn": dsn, }) client = raven.Client(**options) context = get_config_value("context") context_dict = {} if context is not None: for tag in context: try: tag_value = __grains__[tag] except KeyError: log.debug("Sentry tag '%s' not found in grains.", tag) continue if tag_value: context_dict[tag] = tag_value if context_dict: client.context.merge({"tags": context_dict}) try: handler = SentryHandler(client) exclude_patterns = get_config_value("exclude_patterns", None) if exclude_patterns: filter_regexes = [ re.compile(pattern) for pattern in exclude_patterns ] class FilterExcludedMessages: @staticmethod def filter(record): m = record.getMessage() return not any(regex.search(m) for regex in filter_regexes) handler.addFilter(FilterExcludedMessages()) handler.setLevel(LOG_LEVELS[get_config_value("log_level", "error")]) return handler except ValueError as exc: log.debug("Failed to setup the sentry logging handler", exc_info=True)
from raven import Client DSN = 'http://*****:*****@192.168.0.127:9000/2' # client = Client(DSN) import logging from raven.handlers.logging import SentryHandler from raven.conf import setup_logging handler = SentryHandler(DSN) handler.setLevel(logging.INFO) setup_logging(handler) import logging if __name__ == '__main__': logger = logging.getLogger(__name__) logger = logging.Logger(__file__) logger.info('info message', extra={'stack': True}) import requests logger.error('error message', extra={'stack': True}) logger.critical('critical message', extra={'stack': True})
if 'SENTRY_DSN' in os.environ: sentry_dsn = os.environ['SENTRY_DSN'] else: sentry_dsn = config.get('sentry.dsn') client = Client(sentry_dsn) application = Sentry(application, client=client) ## This is to make sure 404 are redirected to a page that looks nice, ## normally done by setting full_stack = True from pylons.middleware import StatusCodeRedirect application = StatusCodeRedirect(application, [400, 404, 500]) ## Configure logging import logging from raven.handlers.logging import SentryHandler handler = SentryHandler(sentry_dsn) handler.setLevel(logging.NOTSET) loggers = ['', 'ckan', 'ckanext', 'sentry.errors'] for name in loggers: logger = logging.getLogger(name) logger.addHandler(handler) logger.setLevel(logging.WARN) ## Just for debugging purposes.. ckan_logger = logging.getLogger('ckan') ckan_logger.info("Sentry configured for this ckan instance (INFO)") ckan_logger.warning("Sentry configured for this ckan instance (WARN)")
def setupLogger(): """Run once when the module is loaded and enable logging Args: None Returns: None Raises: None Borrowed heavily from this: http://docs.python.org/howto/logging-cookbook.html Use this to first initialise the logger (see safe/__init__.py):: from safe_qgis import utilities utilities.setupLogger() You would typically only need to do the above once ever as the safe modle is initialised early and will set up the logger globally so it is available to all packages / subpackages as shown below. In a module that wants to do logging then use this example as a guide to get the initialised logger instance:: # The LOGGER is intialised in utilities.py by init import logging LOGGER = logging.getLogger('InaSAFE') Now to log a message do:: LOGGER.debug('Some debug message') .. note:: The file logs are written to the inasafe user tmp dir e.g.: /tmp/inasafe/23-08-2012/timlinux/logs/inasafe.log """ myLogger = logging.getLogger('InaSAFE') myLogger.setLevel(logging.DEBUG) myDefaultHanderLevel = logging.DEBUG # create formatter that will be added to the handlers myFormatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') # create syslog handler which logs even debug messages # (ariel): Make this log to /var/log/safe.log instead of # /var/log/syslog # (Tim) Ole and I discussed this - we prefer to log into the # user's temporary working directory. myTempDir = temp_dir('logs') myFilename = os.path.join(myTempDir, 'inasafe.log') myFileHandler = logging.FileHandler(myFilename) myFileHandler.setLevel(myDefaultHanderLevel) # create console handler with a higher log level myConsoleHandler = logging.StreamHandler() myConsoleHandler.setLevel(logging.ERROR) myQgisHandler = QgsLogHandler() # TODO: User opt in before we enable email based logging. # Email handler for errors #myEmailServer = 'localhost' #myEmailServerPort = 25 #mySenderAddress = '*****@*****.**' #myRecipientAddresses = ['*****@*****.**'] #mySubject = 'Error' #myEmailHandler = logging.handlers.SMTPHandler( # (myEmailServer, myEmailServerPort), # mySenderAddress, # myRecipientAddresses, # mySubject) #myEmailHandler.setLevel(logging.ERROR) # Sentry handler - this is optional hence the localised import # It will only log if pip install raven. If raven is available # logging messages will be sent to http://sentry.linfiniti.com # We will log exceptions only there. Only if you have the env var # 'INSAFE_SENTRY=1' present (value can be anything) will this be enabled. if 'INASAFE_SENTRY' in os.environ: try: #pylint: disable=F0401 from raven.handlers.logging import SentryHandler from raven import Client #pylint: enable=F0401 myClient = Client('http://*****:*****@sentry.linfiniti.com/' '4') mySentryHandler = SentryHandler(myClient) mySentryHandler.setFormatter(myFormatter) mySentryHandler.setLevel(logging.ERROR) if addLoggingHanderOnce(myLogger, mySentryHandler): myLogger.debug('Sentry logging enabled') except ImportError: myLogger.debug('Sentry logging disabled') #Set formatters myFileHandler.setFormatter(myFormatter) myConsoleHandler.setFormatter(myFormatter) #myEmailHandler.setFormatter(myFormatter) myQgisHandler.setFormatter(myFormatter) # add the handlers to the logger addLoggingHanderOnce(myLogger, myFileHandler) addLoggingHanderOnce(myLogger, myConsoleHandler) #addLoggingHanderOnce(myLogger, myEmailHandler) addLoggingHanderOnce(myLogger, myQgisHandler)
def start(self): # remove all handlers self.handlers = [] # sentry log handler sentry_client = raven.Client( 'https://*****:*****@sentry.sickrage.ca/4?verify_ssl=0', release=sickrage.version(), repos={'sickrage': { 'name': 'sickrage/sickrage' }}) sentry_handler = SentryHandler(client=sentry_client, tags={'platform': platform.platform()}) sentry_handler.setLevel(self.logLevels['ERROR']) sentry_handler.set_name('sentry') self.addHandler(sentry_handler) # console log handler if self.consoleLogging: console_handler = logging.StreamHandler() formatter = logging.Formatter( '%(asctime)s %(levelname)s::%(threadName)s::%(message)s', '%H:%M:%S') console_handler.setFormatter(formatter) console_handler.setLevel(self.logLevels['INFO'] if not self. debugLogging else self.logLevels['DEBUG']) self.addHandler(console_handler) # file log handlers if self.logFile: # make logs folder if it doesn't exist if not os.path.exists(os.path.dirname(self.logFile)): if not makeDir(os.path.dirname(self.logFile)): return if sickrage.app.developer: rfh = FileHandler(filename=self.logFile, ) else: rfh = RotatingFileHandler(filename=self.logFile, maxBytes=self.logSize, backupCount=self.logNr) rfh_errors = RotatingFileHandler(filename=self.logFile.replace( '.log', '.error.log'), maxBytes=self.logSize, backupCount=self.logNr) formatter = logging.Formatter( '%(asctime)s %(levelname)s::%(threadName)s::%(message)s', '%Y-%m-%d %H:%M:%S') rfh.setFormatter(formatter) rfh.setLevel(self.logLevels['INFO'] if not self.debugLogging else self.logLevels['DEBUG']) self.addHandler(rfh) rfh_errors.setFormatter(formatter) rfh_errors.setLevel(self.logLevels['ERROR']) self.addHandler(rfh_errors)
def setup_logger(sentry_url=None): """Run once when the module is loaded and enable logging. :param sentry_url: Optional url to sentry api for remote logging. Defaults to http://c64a83978732474ea751d432ab943a6b: [email protected]/5 which is the sentry project for InaSAFE desktop. :type sentry_url: str Borrowed heavily from this: http://docs.python.org/howto/logging-cookbook.html Use this to first initialise the logger (see safe/__init__.py):: from safe_qgis import utilities utilities.setupLogger() You would typically only need to do the above once ever as the safe model is initialised early and will set up the logger globally so it is available to all packages / subpackages as shown below. In a module that wants to do logging then use this example as a guide to get the initialised logger instance:: # The LOGGER is initialised in utilities.py by init import logging LOGGER = logging.getLogger('InaSAFE') Now to log a message do:: LOGGER.debug('Some debug message') .. note:: The file logs are written to the inasafe user tmp dir e.g.: /tmp/inasafe/23-08-2012/timlinux/logs/inasafe.log """ logger = logging.getLogger('InaSAFE') # create formatter that will be added to the handlers formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') qgis_handler = QgsLogHandler() qgis_handler.setFormatter(formatter) add_logging_handler_once(logger, qgis_handler) # Sentry handler - this is optional hence the localised import # It will only log if pip install raven. If raven is available # logging messages will be sent to http://sentry.linfiniti.com # We will log exceptions only there. You need to either: # * Set env var 'INSAFE_SENTRY=1' present (value can be anything) # * Enable the 'help improve InaSAFE by submitting errors to a remove # server' option in InaSAFE options dialog # before this will be enabled. settings = QtCore.QSettings() flag = settings.value('inasafe/useSentry', False) if 'INASAFE_SENTRY' in os.environ or flag: if sentry_url is None: client = Client( 'http://c64a83978732474ea751d432ab943a6b' ':[email protected]/5') else: client = Client(sentry_url) sentry_handler = SentryHandler(client) sentry_handler.setFormatter(formatter) sentry_handler.setLevel(logging.ERROR) if add_logging_handler_once(logger, sentry_handler): logger.debug('Sentry logging enabled in safe_qgis') elif 'INASAFE_SENTRY' in os.environ: logger.debug('Sentry logging already enabled in safe') else: logger.debug('Sentry logging disabled in safe_qgis')
def run(base_dir, start_gunicorn_app=True): # Filter out warnings we are not interested in warnings.filterwarnings('ignore', 'Mean of empty slice.') warnings.filterwarnings('ignore', 'invalid value encountered in double_scalars') # Store a pidfile before doing anything else store_pidfile(base_dir) # For dumping stacktraces register_diag_handlers() # Start initializing the server now os.chdir(base_dir) try: import pymysql pymysql.install_as_MySQLdb() except ImportError: pass # We're doing it here even if someone doesn't use PostgreSQL at all # so we're not suprised when someone suddenly starts using PG. # TODO: Make sure it's registered for each of the subprocess psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY) repo_location = os.path.join(base_dir, 'config', 'repo') # Configure the logging first, before configuring the actual server. logging.addLevelName('TRACE1', TRACE1) with open(os.path.join(repo_location, 'logging.conf')) as f: dictConfig(yaml.load(f)) logger = logging.getLogger(__name__) kvdb_logger = logging.getLogger('zato_kvdb') config = get_config(repo_location, 'server.conf') # New in 2.0 - Start monitoring as soon as possible if config.get('newrelic', {}).get('config'): import newrelic.agent newrelic.agent.initialize(config.newrelic.config, config.newrelic.environment or None, config.newrelic.ignore_errors or None, config.newrelic.log_file or None, config.newrelic.log_level or None) # New in 2.0 - override gunicorn-set Server HTTP header gunicorn.SERVER_SOFTWARE = config.misc.get('http_server_header', 'Zato') # Store KVDB config in logs, possibly replacing its password if told to kvdb_config = get_kvdb_config_for_log(config.kvdb) kvdb_logger.info('Master process config `%s`', kvdb_config) # New in 2.0 hence optional user_locale = config.misc.get('locale', None) if user_locale: locale.setlocale(locale.LC_ALL, user_locale) value = 12345 logger.info('Locale is `%s`, amount of %s -> `%s`', user_locale, value, locale.currency(value, grouping=True).decode('utf-8')) # Spring Python app_context = get_app_context(config) # Makes queries against Postgres asynchronous if asbool( config.odb.use_async_driver) and config.odb.engine == 'postgresql': make_psycopg_green() # New in 2.0 - Put HTTP_PROXY in os.environ. http_proxy = config.misc.get('http_proxy', False) if http_proxy: os.environ['http_proxy'] = http_proxy crypto_manager = get_crypto_manager(repo_location, app_context, config) parallel_server = app_context.get_object('parallel_server') zato_gunicorn_app = ZatoGunicornApplication(parallel_server, repo_location, config.main, config.crypto) parallel_server.crypto_manager = crypto_manager parallel_server.odb_data = config.odb parallel_server.host = zato_gunicorn_app.zato_host parallel_server.port = zato_gunicorn_app.zato_port parallel_server.repo_location = repo_location parallel_server.base_dir = base_dir parallel_server.tls_dir = os.path.join(parallel_server.base_dir, 'config', 'repo', 'tls') parallel_server.fs_server_config = config parallel_server.user_config.update(config.user_config_items) parallel_server.startup_jobs = app_context.get_object('startup_jobs') parallel_server.app_context = app_context # Remove all locks possibly left over by previous server instances kvdb = app_context.get_object('kvdb') kvdb.component = 'master-proc' clear_locks(kvdb, config.main.token, config.kvdb, crypto_manager.decrypt) # Turn the repo dir into an actual repository and commit any new/modified files RepoManager(repo_location).ensure_repo_consistency() # New in 2.0 so it's optional. profiler_enabled = config.get('profiler', {}).get('enabled', False) # New in 2.0 so it's optional. sentry_config = config.get('sentry') dsn = sentry_config.pop('dsn', None) if dsn: from raven import Client from raven.conf import setup_logging from raven.handlers.logging import SentryHandler handler_level = sentry_config.pop('level') client = Client(dsn, **sentry_config) handler = SentryHandler(client=client) handler.setLevel(getattr(logging, handler_level)) logger = logging.getLogger('') logger.addHandler(handler) for name in logging.Logger.manager.loggerDict: if name.startswith('zato'): logger = logging.getLogger(name) logger.addHandler(handler) if asbool(profiler_enabled): profiler_dir = os.path.abspath( os.path.join(base_dir, config.profiler.profiler_dir)) parallel_server.on_wsgi_request = ProfileMiddleware( parallel_server.on_wsgi_request, log_filename=os.path.join(profiler_dir, config.profiler.log_filename), cachegrind_filename=os.path.join( profiler_dir, config.profiler.cachegrind_filename), discard_first_request=config.profiler.discard_first_request, flush_at_shutdown=config.profiler.flush_at_shutdown, path=config.profiler.url_path, unwind=config.profiler.unwind) # Run the app at last we execute from command line if start_gunicorn_app: zato_gunicorn_app.run() else: return zato_gunicorn_app.zato_wsgi_app
def start(self): # remove all handlers self.handlers.clear() sentry_ignore_exceptions = [ 'KeyboardInterrupt', 'PermissionError', 'FileNotFoundError', 'EpisodeNotFoundException' ] # sentry log handler sentry_client = raven.Client( 'https://[email protected]/2?verify_ssl=0', release=sickrage.version(), repos={'sickrage': {'name': 'sickrage/sickrage'}}, ignore_exceptions=sentry_ignore_exceptions ) sentry_tags = { 'platform': platform.platform(), 'locale': locale.getdefaultlocale(), 'python': platform.python_version() } if sickrage.app.config and sickrage.app.config.sub_id: sentry_tags.update({'sub_id': sickrage.app.config.sub_id}) if sickrage.app.config and sickrage.app.config.server_id: sentry_tags.update({'server_id': sickrage.app.config.server_id}) sentry_handler = SentryHandler(client=sentry_client, ignore_exceptions=sentry_ignore_exceptions, tags=sentry_tags) sentry_handler.setLevel(self.logLevels['ERROR']) sentry_handler.set_name('sentry') self.addHandler(sentry_handler) # console log handler if self.consoleLogging: console_handler = logging.StreamHandler() formatter = logging.Formatter('%(asctime)s %(levelname)s::%(threadName)s::%(message)s', '%H:%M:%S') console_handler.setFormatter(formatter) console_handler.setLevel(self.logLevels['INFO'] if not self.debugLogging else self.logLevels['DEBUG']) self.addHandler(console_handler) # file log handlers if self.logFile: # make logs folder if it doesn't exist if not os.path.exists(os.path.dirname(self.logFile)): if not make_dir(os.path.dirname(self.logFile)): return if sickrage.app.developer: rfh = FileHandler( filename=self.logFile, ) else: rfh = RotatingFileHandler( filename=self.logFile, maxBytes=self.logSize, backupCount=self.logNr ) rfh_errors = RotatingFileHandler( filename=self.logFile.replace('.log', '.error.log'), maxBytes=self.logSize, backupCount=self.logNr ) formatter = logging.Formatter('%(asctime)s %(levelname)s::%(threadName)s::%(message)s', '%Y-%m-%d %H:%M:%S') rfh.setFormatter(formatter) rfh.setLevel(self.logLevels['INFO'] if not self.debugLogging else self.logLevels['DEBUG']) self.addHandler(rfh) rfh_errors.setFormatter(formatter) rfh_errors.setLevel(self.logLevels['ERROR']) self.addHandler(rfh_errors)
config = ConfigParser.RawConfigParser() config.read('posmon.ini') keys = [(config.getint(section, 'keyID'), config.get(section, 'vCode')) for section in config.sections() if section.startswith('key:')] cache_path = config.get('posmon', 'cache') try: sentry_uri = config.get('posmon', 'sentry.uri') except ConfigParser.NoOptionError: sentry_uri = None # Set up logging logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG) if sentry_uri: from raven.handlers.logging import SentryHandler sentry_handler = SentryHandler(sentry_uri) sentry_handler.setLevel(logging.WARNING) logging.getLogger().addHandler(sentry_handler) # Run! cache=ShelveCache(cache_path) fmt = sys.argv[1] if len(sys.argv) > 1 else 'text' try: for key_id, vcode in keys: api_key = API(api_key=(key_id, vcode), cache=cache) try: process(api_key, format=fmt) except Exception as e: if fmt == 'text': print "error processing key: %s" % (str(e),) else: print json.dumps({'error': str(e)})
def init_logger(self): """ Initialize the logger. Call exactly once. """ self.handlers = {} self.root_log = logging.getLogger( ) # we init the root logger so all child loggers inherit this functionality # set the root log level if self.verbose: self.root_log.setLevel(logging.DEBUG) else: self.root_log.setLevel(logging.INFO) if self.root_log.hasHandlers(): self.root_log.info('Logger already initialized.') if self.gui: # GUI will only pop up a dialog box - it's important that GUI not try to output to stdout or stderr # since that would likely cause a permissions error. dialog_box_handler = DialogBoxHandler() if self.verbose: dialog_box_handler.setLevel(logging.WARNING) else: dialog_box_handler.setLevel(logging.ERROR) self.root_log.addHandler(dialog_box_handler) self.handlers[HandlerType.DialogBox] = dialog_box_handler else: console_handler = logging.StreamHandler() console_handler.setFormatter(self.log_formatter) if self.verbose: console_handler.setLevel(logging.INFO) else: console_handler.setLevel(logging.WARNING) self.root_log.addHandler(console_handler) self.handlers[HandlerType.DialogBox.Console] = console_handler # create file handler if self.log_directory is None: self.log_directory = appdirs.user_log_dir(self.name, self.author) if self.log_directory is not None: if self.delete_existing_log_files: shutil.rmtree(self.log_directory, ignore_errors=True) os.makedirs(self.log_directory, exist_ok=True) self.log_path = os.path.join(self.log_directory, '%s.log' % self.name) file_handler = logging.handlers.RotatingFileHandler( self.log_path, maxBytes=self.max_bytes, backupCount=self.backup_count) file_handler.setFormatter(self.log_formatter) if self.verbose: file_handler.setLevel(logging.DEBUG) else: file_handler.setLevel(logging.INFO) self.root_log.addHandler(file_handler) self.handlers[HandlerType.File] = file_handler self.root_log.info('log file path : "%s" ("%s")' % (self.log_path, os.path.abspath(self.log_path))) # error handler for callback on error or above if self.error_callback is not None: error_callback_handler = BalsaNullHandler(self.error_callback) error_callback_handler.setLevel(logging.ERROR) self.root_log.addHandler(error_callback_handler) self.handlers[HandlerType.Callback] = error_callback_handler # setting up Sentry error handling # For the Client to work you need a SENTRY_DSN environmental variable set, or one must be provided if self.sentry: if self.sentry_dsn: client = raven.Client( dsn=self.sentry_dsn, sample_rate=0.0 if self.sentry_testing else 1.0, ) else: client = raven.Client( dsn=os.environ['SENTRY_DSN'], sample_rate=0.0 if self.sentry_testing else 1.0, ) sentry_handler = SentryHandler(client) sentry_handler.setLevel(logging.ERROR) self.root_log.addHandler(sentry_handler)
def setup_sentry(): """Starts `Sentry` and attaches the `SentryHandler` to `Dobby`'s logger.""" client = Client(release=__version__) handler = SentryHandler(client) handler.setLevel(logging.ERROR) logging.getLogger(__package__).addHandler(handler)
ch.setLevel(logging.WARNING if "debug" not in config or not config["debug"] else logging.DEBUG) ch.setFormatter(formatter) logger.addHandler(ch) fh = logging.FileHandler('debug.log') fh.setFormatter(formatter) fh.setLevel(logging.DEBUG) logger.addHandler(fh) if "sentry_url" in config and ("debug" not in config or not config["debug"]): client = Client( dsn=config["sentry_url"], processors=('raven.processors.SanitizePasswordsProcessor', )) handler = SentryHandler(client) handler.setLevel(logging.WARNING) setup_logging(handler) with open("sites.yaml", 'r') as stream: try: sites = yaml.safe_load(stream) except (yaml.YAMLError, FileNotFoundError) as exception: logger.error(exception) sites = None exit(1) async def fetch(session, url): try: async with session.get(url) as response: return await response.text()
def run(base_dir, start_gunicorn_app=True, options=None): options = options or {} # Store a pidfile before doing anything else store_pidfile(base_dir) # For dumping stacktraces register_diag_handlers() # Capture warnings to log files logging.captureWarnings(True) # Start initializing the server now os.chdir(base_dir) try: import pymysql pymysql.install_as_MySQLdb() except ImportError: pass # We're doing it here even if someone doesn't use PostgreSQL at all # so we're not suprised when someone suddenly starts using PG. # TODO: Make sure it's registered for each of the subprocess psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY) # We know we don't need warnings because users may explicitly configure no certificate validation. # We don't want for urllib3 to warn us about it. import requests as _r _r.packages.urllib3.disable_warnings() repo_location = os.path.join(base_dir, 'config', 'repo') # Configure the logging first, before configuring the actual server. logging.addLevelName('TRACE1', TRACE1) logging_conf_path = os.path.join(repo_location, 'logging.conf') with open(logging_conf_path) as f: logging_config = yaml.load(f) dictConfig(logging_config) logger = logging.getLogger(__name__) kvdb_logger = logging.getLogger('zato_kvdb') crypto_manager = ServerCryptoManager(repo_location, secret_key=options['secret_key'], stdin_data=read_stdin_data()) secrets_config = ConfigObj(os.path.join(repo_location, 'secrets.conf'), use_zato=False) server_config = get_config(repo_location, 'server.conf', crypto_manager=crypto_manager, secrets_conf=secrets_config) pickup_config = get_config(repo_location, 'pickup.conf') sio_config = get_config(repo_location, 'simple-io.conf', needs_user_config=False) sso_config = get_config(repo_location, 'sso.conf', needs_user_config=False) normalize_sso_config(sso_config) # Now that we have access to server.conf, greenify libraries required to be made greenlet-friendly, # assuming that there are any - otherwise do not do anything. to_greenify = [] for key, value in server_config.get('greenify', {}).items(): if asbool(value): if not os.path.exists(key): raise ValueError('No such path `{}`'.format(key)) else: to_greenify.append(key) # Go ahead only if we actually have anything to greenify if to_greenify: import greenify greenify.greenify() for name in to_greenify: result = greenify.patch_lib(name) if not result: raise ValueError( 'Library `{}` could not be greenified'.format(name)) else: logger.info('Greenified library `%s`', name) server_config.main.token = server_config.main.token.encode('utf8') # Do not proceed unless we can be certain our own preferred address or IP can be obtained. preferred_address = server_config.preferred_address.get('address') if not preferred_address: preferred_address = get_preferred_ip(server_config.main.gunicorn_bind, server_config.preferred_address) if not preferred_address and not server_config.server_to_server.boot_if_preferred_not_found: msg = 'Unable to start the server. Could not obtain a preferred address, please configure [bind_options] in server.conf' logger.warn(msg) raise Exception(msg) # Create the startup callable tool as soon as practical startup_callable_tool = StartupCallableTool(server_config) # Run the hook before there is any server object created startup_callable_tool.invoke(SERVER_STARTUP.PHASE.FS_CONFIG_ONLY, kwargs={ 'server_config': server_config, 'pickup_config': pickup_config, 'sio_config': sio_config, 'sso_config': sso_config, }) # New in 2.0 - Start monitoring as soon as possible if server_config.get('newrelic', {}).get('config'): import newrelic.agent newrelic.agent.initialize(server_config.newrelic.config, server_config.newrelic.environment or None, server_config.newrelic.ignore_errors or None, server_config.newrelic.log_file or None, server_config.newrelic.log_level or None) zunicorn.SERVER_SOFTWARE = server_config.misc.get('http_server_header', 'Zato') # Store KVDB config in logs, possibly replacing its password if told to kvdb_config = get_kvdb_config_for_log(server_config.kvdb) kvdb_logger.info('Main process config `%s`', kvdb_config) # New in 2.0 hence optional user_locale = server_config.misc.get('locale', None) if user_locale: locale.setlocale(locale.LC_ALL, user_locale) value = 12345 logger.info('Locale is `%s`, amount of %s -> `%s`', user_locale, value, locale.currency(value, grouping=True).decode('utf-8')) # Makes queries against Postgres asynchronous if asbool(server_config.odb.use_async_driver ) and server_config.odb.engine == 'postgresql': make_psycopg_green() if server_config.misc.http_proxy: os.environ['http_proxy'] = server_config.misc.http_proxy # Basic components needed for the server to boot up kvdb = KVDB() odb_manager = ODBManager(well_known_data=ZATO_CRYPTO_WELL_KNOWN_DATA) sql_pool_store = PoolStore() service_store = ServiceStore() service_store.odb = odb_manager service_store.services = {} server = ParallelServer() server.odb = odb_manager server.service_store = service_store server.service_store.server = server server.sql_pool_store = sql_pool_store server.service_modules = [] server.kvdb = kvdb # Assigned here because it is a circular dependency odb_manager.parallel_server = server zato_gunicorn_app = ZatoGunicornApplication(server, repo_location, server_config.main, server_config.crypto) server.has_fg = options.get('fg') server.crypto_manager = crypto_manager server.odb_data = server_config.odb server.host = zato_gunicorn_app.zato_host server.port = zato_gunicorn_app.zato_port server.repo_location = repo_location server.user_conf_location = os.path.join(server.repo_location, 'user-conf') server.base_dir = base_dir server.logs_dir = os.path.join(server.base_dir, 'logs') server.tls_dir = os.path.join(server.base_dir, 'config', 'repo', 'tls') server.static_dir = os.path.join(server.base_dir, 'config', 'repo', 'static') server.json_schema_dir = os.path.join(server.base_dir, 'config', 'repo', 'schema', 'json') server.fs_server_config = server_config server.fs_sql_config = get_config(repo_location, 'sql.conf', needs_user_config=False) server.pickup_config = pickup_config server.logging_config = logging_config server.logging_conf_path = logging_conf_path server.sio_config = sio_config server.sso_config = sso_config server.user_config.update(server_config.user_config_items) server.preferred_address = preferred_address server.sync_internal = options['sync_internal'] server.jwt_secret = server.fs_server_config.misc.jwt_secret.encode('utf8') server.startup_callable_tool = startup_callable_tool server.is_sso_enabled = server.fs_server_config.component_enabled.sso if server.is_sso_enabled: server.sso_api = SSOAPI(server, sso_config, None, crypto_manager.encrypt, crypto_manager.decrypt, crypto_manager.hash_secret, crypto_manager.verify_hash, new_user_id) # Remove all locks possibly left over by previous server instances kvdb.component = 'master-proc' clear_locks(kvdb, server_config.main.token, server_config.kvdb, crypto_manager.decrypt) # New in 2.0.8 server.return_tracebacks = asbool( server_config.misc.get('return_tracebacks', True)) server.default_error_message = server_config.misc.get( 'default_error_message', 'An error has occurred') # Turn the repo dir into an actual repository and commit any new/modified files RepoManager(repo_location).ensure_repo_consistency() # New in 2.0 so it's optional. profiler_enabled = server_config.get('profiler', {}).get('enabled', False) # New in 2.0 so it's optional. sentry_config = server_config.get('sentry') dsn = sentry_config.pop('dsn', None) if dsn: from raven import Client from raven.handlers.logging import SentryHandler handler_level = sentry_config.pop('level') client = Client(dsn, **sentry_config) handler = SentryHandler(client=client) handler.setLevel(getattr(logging, handler_level)) logger = logging.getLogger('') logger.addHandler(handler) for name in logging.Logger.manager.loggerDict: if name.startswith('zato'): logger = logging.getLogger(name) logger.addHandler(handler) if asbool(profiler_enabled): profiler_dir = os.path.abspath( os.path.join(base_dir, server_config.profiler.profiler_dir)) server.on_wsgi_request = ProfileMiddleware( server.on_wsgi_request, log_filename=os.path.join(profiler_dir, server_config.profiler.log_filename), cachegrind_filename=os.path.join( profiler_dir, server_config.profiler.cachegrind_filename), discard_first_request=server_config.profiler.discard_first_request, flush_at_shutdown=server_config.profiler.flush_at_shutdown, path=server_config.profiler.url_path, unwind=server_config.profiler.unwind) # New in 2.0 - set environmet variables for servers to inherit os_environ = server_config.get('os_environ', {}) for key, value in os_environ.items(): os.environ[key] = value # Run the hook right before the Gunicorn-level server actually starts startup_callable_tool.invoke(SERVER_STARTUP.PHASE.IMPL_BEFORE_RUN, kwargs={ 'zato_gunicorn_app': zato_gunicorn_app, }) # Run the app at last if start_gunicorn_app: zato_gunicorn_app.run() else: return zato_gunicorn_app.zato_wsgi_app
def create_app(load_admin=True): # 1.redash的__init___文件包含了一些通用的函数或者变量 # 2.下面引入的模块使用了从这个模块导出的变量 # 3.这里又从这些模块,导入进来一些函数或者变量 # 4.形成了相互依赖 # 5.解决方案 # 6.调用置后于初始化的过程,比如requeset文件中的使用的 from redash import statsd_client 一定要在 from redash.metrics.request import provision_app前面 # 7.或者放入函数内部 # 8.保证首次进入sys.moudle的模块,已经包含了那些初始化的通用变量 # import语句中的 模块 包含了这个import语句所在模块的 依赖,那么这个import语句应该在这个依赖声明的后面 from redash import extensions, handlers from redash.handlers.webpack import configure_webpack from redash.handlers import chrome_logger from redash.admin import init_admin from redash.models import db from redash.authentication import setup_authentication from redash.metrics.request import provision_app # https: // www.v2ex.com / t / 289972 # 创建 flask 对象时候,是需要传一个模块一般是__name__过去,你改下就行了,那个是被当作根地址,确定了template位置 # 也可以在蓝图的时候指定 app = Flask(__name__, # 指定静态文件目录 # fix_assets_path(os.environ.get("REDASH_STATIC_ASSETS_PATH", "../client/dist/")) template_folder=settings.STATIC_ASSETS_PATH, # https://blog.csdn.net/qq_40952927/article/details/81157204 static_folder=settings.STATIC_ASSETS_PATH, static_path='/static') # https://www.kancloud.cn/wizardforcel/explore-flask/140842 # http://python.jobbole.com/84003/ # 使用了Nginx后,为了获取真实的请求IP app.wsgi_app = ProxyFix(app.wsgi_app, settings.PROXIES_COUNT) # 定制url app.url_map.converters['org_slug'] = SlugConverter # 根据setting文件配置 if settings.ENFORCE_HTTPS: # https: // www.helplib.com / GitHub / article_82448 # 所有的http重定向为https # 什么时候使用 ?????????????????????? SSLify(app, skips=['ping']) # 异常警报和通知处理 if settings.SENTRY_DSN: from raven import Client from raven.contrib.flask import Sentry from raven.handlers.logging import SentryHandler client = Client(settings.SENTRY_DSN, release=__version__, install_logging_hook=False) sentry = Sentry(app, client=client) sentry.client.release = __version__ sentry_handler = SentryHandler(client=client) sentry_handler.setLevel(logging.ERROR) logging.getLogger().addHandler(sentry_handler) # 数据库配置 app.config['SQLALCHEMY_DATABASE_URI'] = settings.SQLALCHEMY_DATABASE_URI # 默认自带的配置 app.config.update(settings.all_settings()) # 插件和自定义插件初始化 if load_admin: init_admin(app) # 数据库 # db = SQLAlchemy(app) db.init_app(app) # 数据库迁移 migrate.init_app(app, db) # 邮件 mail.init_app(app) # 请求次数 limiter.init_app(app) # logger chrome_logger.init_app(app) extensions.init_extensions(app) # 一些请求前后的狗子,用于性能测试等 provision_app(app) # 所有的controller入口!!!!! handlers.init_app(app) # api 认证接口注册!!!! setup_authentication(app) # webpack!!!!! configure_webpack(app) return app
def setup_handlers(): ''' sets up the sentry handler ''' __grains__ = salt.loader.grains(__opts__) __salt__ = salt.loader.minion_mods(__opts__) if 'sentry_handler' not in __opts__: log.debug('No \'sentry_handler\' key was found in the configuration') return False options = {} dsn = get_config_value('dsn') if dsn is not None: try: # support raven ver 5.5.0 from raven.transport import TransportRegistry, default_transports from raven.utils.urlparse import urlparse transport_registry = TransportRegistry(default_transports) url = urlparse(dsn) if not transport_registry.supported_scheme(url.scheme): raise ValueError('Unsupported Sentry DSN scheme: {0}'.format( url.scheme)) except ValueError as exc: log.info( 'Raven failed to parse the configuration provided DSN: %s', exc) if not dsn: for key in ('project', 'servers', 'public_key', 'secret_key'): config_value = get_config_value(key) if config_value is None and key not in options: log.debug( 'The required \'sentry_handler\' configuration key, ' '\'%s\', is not properly configured. Not configuring ' 'the sentry logging handler.', key) return elif config_value is None: continue options[key] = config_value # site: An optional, arbitrary string to identify this client installation. options.update({ # site: An optional, arbitrary string to identify this client # installation 'site': get_config_value('site'), # name: This will override the server_name value for this installation. # Defaults to socket.gethostname() 'name': get_config_value('name'), # exclude_paths: Extending this allow you to ignore module prefixes # when sentry attempts to discover which function an error comes from 'exclude_paths': get_config_value('exclude_paths', ()), # include_paths: For example, in Django this defaults to your list of # INSTALLED_APPS, and is used for drilling down where an exception is # located 'include_paths': get_config_value('include_paths', ()), # list_max_length: The maximum number of items a list-like container # should store. 'list_max_length': get_config_value('list_max_length'), # string_max_length: The maximum characters of a string that should be # stored. 'string_max_length': get_config_value('string_max_length'), # auto_log_stacks: Should Raven automatically log frame stacks # (including locals) all calls as it would for exceptions. 'auto_log_stacks': get_config_value('auto_log_stacks'), # timeout: If supported, the timeout value for sending messages to # remote. 'timeout': get_config_value('timeout', 1), # processors: A list of processors to apply to events before sending # them to the Sentry server. Useful for sending additional global state # data or sanitizing data that you want to keep off of the server. 'processors': get_config_value('processors'), # dsn: Ensure the DSN is passed into the client 'dsn': dsn }) client = raven.Client(**options) context = get_config_value('context') context_dict = {} if context is not None: for tag in context: try: tag_value = __grains__[tag] except KeyError: log.debug('Sentry tag \'%s\' not found in grains.', tag) continue if len(tag_value) > 0: context_dict[tag] = tag_value if len(context_dict) > 0: client.context.merge({'tags': context_dict}) try: handler = SentryHandler(client) exclude_patterns = get_config_value('exclude_patterns', None) if exclude_patterns: filter_regexes = [ re.compile(pattern) for pattern in exclude_patterns ] class FilterExcludedMessages(object): @staticmethod def filter(record): m = record.getMessage() return not any(regex.search(m) for regex in filter_regexes) handler.addFilter(FilterExcludedMessages()) handler.setLevel(LOG_LEVELS[get_config_value('log_level', 'error')]) return handler except ValueError as exc: log.debug('Failed to setup the sentry logging handler', exc_info=True)
def logger_init(self): """ Initialize the logger globally. :returns: True """ # Let's attempt to make the log directory if it doesn't exist os.makedirs(self.log_dir, exist_ok=True) # Instantiate a logger self.log = logging.getLogger("stoq") # Set the default logging level self.log.setLevel(self.log_level.upper()) # Define the log filename and path log_file = "stoq.log" self.log_path = os.path.abspath(os.path.join(self.log_dir, log_file)) # Setup our logfile file_handler = RotatingFileHandler(filename=self.log_path, mode='a', maxBytes=int(self.log_maxbytes), backupCount=int( self.log_backup_count)) # Setup our STDERR output stderr_handler = logging.StreamHandler() if self.log_syntax == "json": formatter = jsonlogger.JsonFormatter else: formatter = logging.Formatter # Define the format of the log file log_format = formatter( "%(asctime)s %(levelname)s %(name)s:%(filename)s:%(funcName)s:%(lineno)s: " "%(message)s", datefmt='%Y-%m-%d %H:%M:%S') stderr_logformat = formatter( "[%(asctime)s %(levelname)s] %(name)s: %(message)s") file_handler.setFormatter(log_format) stderr_handler.setFormatter(stderr_logformat) # Attach the handler to the logger self.log.addHandler(file_handler) self.log.addHandler(stderr_handler) # If logging to sentry.io, setup the logger if raven_imported and self.sentry_url: try: sentry_handler = SentryHandler( self.sentry_url, ignore_exceptions=self.sentry_ignore_list) sentry_handler.setFormatter( "[%(asctime)s][%(levelname)s] %(name)s " "%(filename)s:%(funcName)s:%(lineno)d | %(message)s") sentry_handler.setLevel(logging.WARN) self.log.addHandler(sentry_handler) except: self.log.error("Unable to initiate logging to Sentry")
PURPOSE_DOMAIN, CONSTRUCTION_DOMAIN, DAM_CONDITION_DOMAIN, BARRIER_SEVERITY_DOMAIN, ) app = Flask(__name__) CORS(app, resources={r"/api/*": {"origins": "*"}}) log = app.logger SENTRY_DSN = os.getenv("SENTRY_DSN", None) sentry = Sentry(app, dsn=SENTRY_DSN) if SENTRY_DSN is not None: print("Configuring Sentry logging...") handler = SentryHandler(SENTRY_DSN) handler.setLevel(logging.ERROR) setup_logging(handler) else: print("Sentry not configured") TYPES = ("dams", "barriers") LAYERS = ("HUC6", "HUC8", "HUC12", "State", "County", "ECO3", "ECO4") FORMATS = ("csv",) # TODO: "shp" DAM_FILTER_FIELDS = [ "Feasibility", "HeightClass", "Condition", "Construction",
import logging from raven.conf import setup_logging from raven.handlers.logging import SentryHandler from app.config import SENTRY_DSN # Sentry logging setup (optional) if SENTRY_DSN: sentry_handler = SentryHandler(SENTRY_DSN, tags={'component': "flask_app"}) sentry_handler.setLevel(logging.ERROR) setup_logging(sentry_handler)
def init_logger(self): """ Initialize the logger. Call exactly once. """ assert (self.name is not None) assert (self.author is not None) self.handlers = {} if self.is_root: self.log = logging.getLogger() else: self.log = logging.getLogger(self.name) if not self.propagate: self.log.propagate = False # set the root log level if self.verbose: self.log.setLevel(logging.DEBUG) else: self.log.setLevel(logging.INFO) if self.log.hasHandlers(): self.log.info("Logger already initialized.") # create file handler if self.log_directory is None: self.log_directory = appdirs.user_log_dir(self.name, self.author) if self.log_directory is not None: if self.delete_existing_log_files: for file_path in glob( os.path.join(self.log_directory, "*%s" % self.log_extension)): try: os.remove(file_path) except OSError: pass os.makedirs(self.log_directory, exist_ok=True) self.log_path = os.path.join( self.log_directory, "%s%s" % (self.name, self.log_extension)) file_handler = logging.handlers.RotatingFileHandler( self.log_path, maxBytes=self.max_bytes, backupCount=self.backup_count) file_handler.setFormatter(self.log_formatter) if self.verbose: file_handler.setLevel(logging.DEBUG) else: file_handler.setLevel(logging.INFO) self.log.addHandler(file_handler) self.handlers[HandlerType.File] = file_handler self.log.info('log file path : "%s" ("%s")' % (self.log_path, os.path.abspath(self.log_path))) if self.gui: # GUI will only pop up a dialog box - it's important that GUI not try to output to stdout or stderr # since that would likely cause a permissions error. dialog_box_handler = DialogBoxHandler(self.rate_limits) if self.verbose: dialog_box_handler.setLevel(logging.WARNING) else: dialog_box_handler.setLevel(logging.ERROR) self.log.addHandler(dialog_box_handler) self.handlers[HandlerType.DialogBox] = dialog_box_handler else: console_handler = logging.StreamHandler() console_handler.setFormatter(self.log_formatter) if self.verbose: console_handler.setLevel(logging.INFO) else: console_handler.setLevel(logging.WARNING) self.log.addHandler(console_handler) self.handlers[HandlerType.Console] = console_handler string_list_handler = BalsaStringListHandler( self.max_string_list_entries) string_list_handler.setFormatter(self.log_formatter) string_list_handler.setLevel(logging.INFO) self.log.addHandler(string_list_handler) self.handlers[HandlerType.StringList] = string_list_handler # setting up Sentry error handling # For the Client to work you need a SENTRY_DSN environmental variable set, or one must be provided. if self.use_sentry: sample_rate = 0.0 if self.inhibit_cloud_services else 1.0 if self.sentry_dsn is None: self.sentry_client = raven.Client(sample_rate=sample_rate) else: self.sentry_client = raven.Client(dsn=self.sentry_dsn, sample_rate=sample_rate) sentry_handler = SentryHandler(self.sentry_client) sentry_handler.setLevel(logging.ERROR) self.handlers[HandlerType.Sentry] = sentry_handler self.log.addHandler(sentry_handler) # error handler for callback on error or above # (this is last since the user may do a sys.exit() in the error callback) if self.error_callback is not None: error_callback_handler = BalsaNullHandler(self.error_callback) error_callback_handler.setLevel(logging.ERROR) self.log.addHandler(error_callback_handler) self.handlers[HandlerType.Callback] = error_callback_handler
def setup_logger(logger_name, log_file=None, sentry_url=None): """Run once when the module is loaded and enable logging. :param logger_name: The logger name that we want to set up. :type logger_name: str :param log_file: Optional full path to a file to write logs to. :type log_file: str :param sentry_url: Optional url to sentry api for remote logging. Defaults to http://c64a83978732474ea751d432ab943a6b: [email protected]/5 which is the sentry project for InaSAFE desktop. :type sentry_url: str Borrowed heavily from this: http://docs.python.org/howto/logging-cookbook.html Now to log a message do:: LOGGER.debug('Some debug message') .. note:: The file logs are written to the inasafe user tmp dir e.g.: /tmp/inasafe/23-08-2012/timlinux/logs/inasafe.log """ logger = logging.getLogger(logger_name) logger.setLevel(logging.DEBUG) default_handler_level = logging.DEBUG # create formatter that will be added to the handlers formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') # create syslog handler which logs even debug messages # (ariel): Make this log to /var/log/safe.log instead of # /var/log/syslog # (Tim) Ole and I discussed this - we prefer to log into the # user's temporary working directory. inasafe_log_path = log_file_path() if log_file is None: file_handler = logging.FileHandler(inasafe_log_path) else: file_handler = logging.FileHandler(log_file) file_handler.setLevel(default_handler_level) # create console handler with a higher log level console_handler = logging.StreamHandler() console_handler.setLevel(logging.INFO) # create a QGIS handler qgis_handler = QgsLogHandler() # Set formatters file_handler.setFormatter(formatter) console_handler.setFormatter(formatter) qgis_handler.setFormatter(formatter) # add the handlers to the logger add_logging_handler_once(logger, file_handler) add_logging_handler_once(logger, console_handler) add_logging_handler_once(logger, qgis_handler) # Sentry handler - this is optional hence the localised import # It will only log if pip install raven. If raven is available # logging messages will be sent to http://sentry.linfiniti.com # We will log exceptions only there. You need to either: # * Set env var 'INASAFE_SENTRY=1' present (value can be anything) # before this will be enabled or sentry is enabled in QSettings settings = QSettings() flag = settings.value('inasafe/useSentry', False, type=bool) env_inasafe_sentry = 'INASAFE_SENTRY' in os.environ if env_inasafe_sentry or flag: if sentry_url is None: client = Client( 'http://11b7c9cb73874f97807ebc1934575e92' ':[email protected]/5') else: client = Client(sentry_url) sentry_handler = SentryHandler(client) sentry_handler.setFormatter(formatter) sentry_handler.setLevel(logging.ERROR) if add_logging_handler_once(logger, sentry_handler): logger.debug('Sentry logging enabled in safe') else: logger.debug('Sentry logging disabled in safe')
def run(base_dir, start_gunicorn_app=True, options=None): options = options or {} # Store a pidfile before doing anything else store_pidfile(base_dir) # For dumping stacktraces register_diag_handlers() # Capture warnings to log files logging.captureWarnings(True) # Start initializing the server now os.chdir(base_dir) try: import pymysql pymysql.install_as_MySQLdb() except ImportError: pass # We're doing it here even if someone doesn't use PostgreSQL at all # so we're not suprised when someone suddenly starts using PG. # TODO: Make sure it's registered for each of the subprocess psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY) # We know we don't need warnings because users may explicitly configure no certificate validation. # We don't want for urllib3 to warn us about it. import requests as _r _r.packages.urllib3.disable_warnings() repo_location = os.path.join(base_dir, 'config', 'repo') # Configure the logging first, before configuring the actual server. logging.addLevelName('TRACE1', TRACE1) with open(os.path.join(repo_location, 'logging.conf')) as f: dictConfig(yaml.load(f)) logger = logging.getLogger(__name__) kvdb_logger = logging.getLogger('zato_kvdb') config = get_config(repo_location, 'server.conf') pickup_config = get_config(repo_location, 'pickup.conf') # Do not proceed unless we can be certain our own preferred address or IP can be obtained. preferred_address = config.preferred_address.get('address') if not preferred_address: preferred_address = get_preferred_ip(config.main.gunicorn_bind, config.preferred_address) if not preferred_address and not config.server_to_server.boot_if_preferred_not_found: msg = 'Unable to start the server. Could not obtain a preferred address, please configure [bind_options] in server.conf' logger.warn(msg) raise Exception(msg) # New in 2.0 - Start monitoring as soon as possible if config.get('newrelic', {}).get('config'): import newrelic.agent newrelic.agent.initialize(config.newrelic.config, config.newrelic.environment or None, config.newrelic.ignore_errors or None, config.newrelic.log_file or None, config.newrelic.log_level or None) # New in 2.0 - override gunicorn-set Server HTTP header gunicorn.SERVER_SOFTWARE = config.misc.get('http_server_header', 'Zato') # Store KVDB config in logs, possibly replacing its password if told to kvdb_config = get_kvdb_config_for_log(config.kvdb) kvdb_logger.info('Master process config `%s`', kvdb_config) # New in 2.0 hence optional user_locale = config.misc.get('locale', None) if user_locale: locale.setlocale(locale.LC_ALL, user_locale) value = 12345 logger.info('Locale is `%s`, amount of %s -> `%s`', user_locale, value, locale.currency(value, grouping=True).decode('utf-8')) # Spring Python app_context = get_app_context(config) # Makes queries against Postgres asynchronous if asbool( config.odb.use_async_driver) and config.odb.engine == 'postgresql': make_psycopg_green() # New in 2.0 - Put HTTP_PROXY in os.environ. http_proxy = config.misc.get('http_proxy', False) if http_proxy: os.environ['http_proxy'] = http_proxy crypto_manager = get_crypto_manager(repo_location, app_context, config) server = app_context.get_object('server') zato_gunicorn_app = ZatoGunicornApplication(server, repo_location, config.main, config.crypto) server.crypto_manager = crypto_manager server.odb_data = config.odb server.host = zato_gunicorn_app.zato_host server.port = zato_gunicorn_app.zato_port server.repo_location = repo_location server.user_conf_location = os.path.join(server.repo_location, 'user-conf') server.base_dir = base_dir server.tls_dir = os.path.join(server.base_dir, 'config', 'repo', 'tls') server.fs_server_config = config server.pickup_config = pickup_config server.user_config.update(config.user_config_items) server.app_context = app_context server.preferred_address = preferred_address server.sync_internal = options['sync_internal'] server.jwt_secret = server.fs_server_config.misc.jwt_secret.encode('utf8') # Remove all locks possibly left over by previous server instances kvdb = app_context.get_object('kvdb') kvdb.component = 'master-proc' clear_locks(kvdb, config.main.token, config.kvdb, crypto_manager.decrypt) # New in 2.0.8 server.return_tracebacks = asbool( config.misc.get('return_tracebacks', True)) server.default_error_message = config.misc.get('default_error_message', 'An error has occurred') # Turn the repo dir into an actual repository and commit any new/modified files RepoManager(repo_location).ensure_repo_consistency() # New in 2.0 so it's optional. profiler_enabled = config.get('profiler', {}).get('enabled', False) # New in 2.0 so it's optional. sentry_config = config.get('sentry') dsn = sentry_config.pop('dsn', None) if dsn: from raven import Client from raven.handlers.logging import SentryHandler handler_level = sentry_config.pop('level') client = Client(dsn, **sentry_config) handler = SentryHandler(client=client) handler.setLevel(getattr(logging, handler_level)) logger = logging.getLogger('') logger.addHandler(handler) for name in logging.Logger.manager.loggerDict: if name.startswith('zato'): logger = logging.getLogger(name) logger.addHandler(handler) if asbool(profiler_enabled): profiler_dir = os.path.abspath( os.path.join(base_dir, config.profiler.profiler_dir)) server.on_wsgi_request = ProfileMiddleware( server.on_wsgi_request, log_filename=os.path.join(profiler_dir, config.profiler.log_filename), cachegrind_filename=os.path.join( profiler_dir, config.profiler.cachegrind_filename), discard_first_request=config.profiler.discard_first_request, flush_at_shutdown=config.profiler.flush_at_shutdown, path=config.profiler.url_path, unwind=config.profiler.unwind) # New in 2.0 - set environmet variables for servers to inherit os_environ = config.get('os_environ', {}) for key, value in os_environ.items(): os.environ[key] = value # Run the app at last if start_gunicorn_app: zato_gunicorn_app.run() else: return zato_gunicorn_app.zato_wsgi_app
if 'CONFIG' in os.environ: import runpy ctx = runpy.run_path(os.environ['CONFIG']) update_globals(ctx) # Local settings if os.path.exists(jroot('settings', 'local.py')): from .local import * # Check required settings invalid_opts = [k for k, v in globals().items() if v == 'UNDEFINED'] if invalid_opts: raise RuntimeError('Following options must be defined: {}'.format(sorted(invalid_opts))) # Configure logging if LOGGING: import logging.config logging.config.dictConfig(LOGGING) else: logging.basicConfig(level=LOGGING_LEVEL) # Configure Sentry if SENTRY: from raven import Client from raven.handlers.logging import SentryHandler sentry_client = Client(SENTRY) sentry_handler = SentryHandler(sentry_client) sentry_handler.setLevel('ERROR') logging.getLogger().addHandler(sentry_handler)
class Sentry: """Sentry Debugging""" __author__ = "Kowlin" __version__ = "S-V1.1" def __init__(self, bot): self.bot = bot self.settings = dataIO.load_json('data/sentry/settings.json') if self.settings['dsn'] is None: log.warning('Sentry: DSN key is not set. Not sending logs!') else: if self.settings['ssl'] is False: self.raven = Client(self.settings['dsn'] + '?verify_ssl=0') else: self.raven = Client(self.settings['dsn']) self.handler = SentryHandler(self.raven) self.handler.setLevel(self.settings['level']) self.logger = logging.getLogger("red").addHandler(self.handler) setup_logging(self.handler) # --- Raven settings self.raven.tags = self.settings['tags'] if self.settings['name'] is not None: self.raven.name = self.settings['name'] if self.settings['environment'] is not None: self.raven.environment = self.settings['environment'] if self.settings.get('ignore'): self.raven.ignore = self.settings['ignore'] def __unload(self): logging.getLogger("red").removeHandler(self.handler) @commands.group(pass_context=True) @checks.is_owner() async def sentry(self, ctx): """Manage Sentry logging""" if ctx.invoked_subcommand is None: await self.bot.send_cmd_help(ctx) @sentry.command(pass_context=True) async def dsn(self, ctx, dsn: str): """Set your DSN, Full private required. Recommended to do in DM""" if re.match('(https\:\/\/|http\:\/\/).*\:.*\@.*\/*[0-9]', dsn) is None: await self.bot.say('DSN key is not valid. Make sure its a full private key!') else: self.settings['dsn'] = dsn try: await self.bot.delete_message(ctx.message) await self.bot.say('DSN key set, removed your message for safety.\nReload the cog for the changes to have effect.\nTry the key with ``{}sentry test``'.format(ctx.prefix)) except: await self.bot.say('DSN key set. Please remove your message for the safety of your logging operation.\nReload the cog for the changes to have effect.\nTry the key with ``{}sentry test``'.format(ctx.prefix)) self.save_json() @sentry.command(pass_context=True) async def test(self, ctx, *, message="A test message to Sentry"): """Send a test message to the Sentry host.""" try: self.raven.captureMessage(message) await self.bot.say('Test message should be send. Please check your Sentry instance.') except: await self.bot.say('Sentry client isn\'t setup. Please set a key and reload the cog.') @sentry.command(pass_context=True) async def name(self, ctx, name): """Set the "server_name" that appears in Sentry""" self.settings['name'] = name await self.bot.say('Name set,\nReload the cog for the changes to have effect.') self.save_json() @sentry.command(pass_context=True) async def environment(self, ctx, environment): """Set the environment that appears in Sentry""" self.settings['environment'] = environment await self.bot.say('Environment set,\nReload the cog for the changes to have effect.') @sentry.command(pass_context=True) async def level(self, ctx, level): """Set the logging level for Sentry The level can only be one of the following: critical, debug, error, fatal, notset, warn, warning Recommended: error""" log_list = ['CRITICAL', 'DEBUG', 'ERROR', 'FATAL', 'NOTSET', 'WARN', 'WARNING'] if level.upper() in log_list: self.settings['level'] = level.upper() await self.bot.say('Log level now set to {}\nReload the cog for the changes to have effect.'.format(level)) self.save_json() else: await self.bot.say('Invalid log level, please use one of the following:\ncritical, debug, error, fatal, notset, warn, warning') @sentry.command(pass_context=True) async def ssl(self, ctx): """Enable or disable SSL verification to the Sentry server.""" if self.settings['ssl'] is True: self.settings['ssl'] = False await self.bot.say('SSL verification is disabled.\nReload the cog for the changes to have effect.') else: self.settings['ssl'] = True await self.bot.say('SSL verification is enabled\nReload the cog for the changes to have effect.') self.save_json() @sentry.group(pass_context=True) async def tags(self, ctx): """Manage tags for Sentry""" if ctx.invoked_subcommand is None: await self.bot.send_cmd_help(ctx) @tags.command(pass_context=True) async def add(self, ctx, tag, *, value): """Add/edit a tag with the set value.""" self.settings['tags'][tag] = value await self.bot.say('Tag ``{}`` with value ``{}`` added.\nReload the cog for the changes to have effect.'.format(tag, value)) self.save_json() @tags.command(pass_context=True) async def remove(self, ctx, tag): """Remove a tag""" if tag in self.settings['tags']: del self.settings['tags'][tag] await self.bot.say('Tag ``{}`` removed.\nReload the cog for the changes to have effect.'.format(tag)) self.save_json() else: await self.bot.say('This tag doesn\'t exist') @tags.command(pass_context=True) async def list(self, ctx): """List all tags""" tag_list = '' for tag, value in self.settings['tags'].items(): tag_list += '{}: {}\n'.format(tag, value) await self.bot.say("```\n{}\n```".format(tag_list)) def save_json(self): dataIO.save_json('data/sentry/settings.json', self.settings) @sentry.group(pass_context=True) async def ignore(self, ctx): """Manage the ignored loggers for Sentry""" if ctx.invoked_subcommand is None: await self.bot.send_cmd_help(ctx) @ignore.command(pass_context=True, name='add') async def add_ignore(self, ctx, logger): """Add a logger to the ignore list""" if 'ignore' not in self.settings: self.settings['ignore'] = [] if logger not in self.settings['ignore']: self.settings['ignore'].append(logger) self.save_json() await self.bot.say('``{}`` added to the ignore list.\nReload the cog for the changes to have effect.'.format(logger)) else: await self.bot.say('``{}`` already in the ignore list.'.format(logger)) @ignore.command(pass_context=True, name='remove') async def remove_ignore(self, ctx, logger): """Remove a logger from the ignore list""" if logger in self.settings['ignore']: del self.settings['ignore'][logger] self.save_json() await self.bot.say('``{}`` deleted from the ignore list.\nReload the cog for the changes to have effect.'.format(logger)) else: await self.bot.say('``{}`` is not in the ignore list.'.format(logger))
def setup_handlers(): if 'sentry_handler' not in __opts__: log.debug('No \'sentry_handler\' key was found in the configuration') return False options = {} dsn = get_config_value('dsn') if dsn is not None: try: dsn_config = raven.load(dsn) options.update({ 'project': dsn_config['SENTRY_PROJECT'], 'servers': dsn_config['SENTRY_SERVERS'], 'public_key': dsn_config['SENTRY_PUBLIC_KEY'], 'private_key': dsn_config['SENTRY_SECRET_KEY'] }) except ValueError as exc: log.info('Raven failed to parse the configuration provided ' 'DSN: {0}'.format(exc)) # Allow options to be overridden if previously parsed, or define them for key in ('project', 'servers', 'public_key', 'private_key'): config_value = get_config_value(key) if config_value is None and key not in options: log.debug('The required \'sentry_handler\' configuration key, ' '{0!r}, is not properly configured. Not configuring ' 'the sentry logging handler.'.format(key)) return elif config_value is None: continue options[key] = config_value # site: An optional, arbitrary string to identify this client installation. options.update({ # site: An optional, arbitrary string to identify this client # installation 'site': get_config_value('site'), # name: This will override the server_name value for this installation. # Defaults to socket.gethostname() 'name': get_config_value('name'), # exclude_paths: Extending this allow you to ignore module prefixes # when sentry attempts to discover which function an error comes from 'exclude_paths': get_config_value('exclude_paths', ()), # include_paths: For example, in Django this defaults to your list of # INSTALLED_APPS, and is used for drilling down where an exception is # located 'include_paths': get_config_value('include_paths', ()), # list_max_length: The maximum number of items a list-like container # should store. 'list_max_length': get_config_value('list_max_length'), # string_max_length: The maximum characters of a string that should be # stored. 'string_max_length': get_config_value('string_max_length'), # auto_log_stacks: Should Raven automatically log frame stacks # (including locals) all calls as it would for exceptions. 'auto_log_stacks': get_config_value('auto_log_stacks'), # timeout: If supported, the timeout value for sending messages to # remote. 'timeout': get_config_value('timeout', 1), # processors: A list of processors to apply to events before sending # them to the Sentry server. Useful for sending additional global state # data or sanitizing data that you want to keep off of the server. 'processors': get_config_value('processors'), # dsn: Ensure the DSN is passed into the client 'dsn': dsn }) client = raven.Client(**options) try: handler = SentryHandler(client) handler.setLevel(LOG_LEVELS[get_config_value('log_level', 'error')]) return handler except ValueError as exc: log.debug( 'Failed to setup the sentry logging handler: {0}'.format(exc), exc_info=exc)
def setup_logger(logger_name, log_file=None, sentry_url=None): """Run once when the module is loaded and enable logging. :param logger_name: The logger name that we want to set up. :type logger_name: str :param log_file: Optional full path to a file to write logs to. :type log_file: str :param sentry_url: Optional url to sentry api for remote logging. Defaults to URL defined in safe.definitions.sentry.py which is the sentry project for InaSAFE desktop. :type sentry_url: str Borrowed heavily from this: http://docs.python.org/howto/logging-cookbook.html Now to log a message do:: LOGGER.debug('Some debug message') .. note:: The file logs are written to the inasafe user tmp dir e.g.: /tmp/inasafe/23-08-2012/timlinux/logs/inasafe.log """ logger = logging.getLogger(logger_name) logging_level = int(os.environ.get('INASAFE_LOGGING_LEVEL', logging.DEBUG)) logger.setLevel(logging_level) default_handler_level = logging_level # create formatter that will be added to the handlers formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') # create syslog handler which logs even debug messages # (ariel): Make this log to /var/log/safe.log instead of # /var/log/syslog # (Tim) Ole and I discussed this - we prefer to log into the # user's temporary working directory. inasafe_log_path = log_file_path() if log_file is None: file_handler = logging.FileHandler(inasafe_log_path) else: file_handler = logging.FileHandler(log_file) file_handler.setLevel(default_handler_level) file_handler.setFormatter(formatter) add_logging_handler_once(logger, file_handler) if 'MUTE_LOGS' not in os.environ: # create console handler with a higher log level console_handler = logging.StreamHandler() console_handler.setLevel(logging.INFO) console_handler.setFormatter(formatter) add_logging_handler_once(logger, console_handler) # create a QGIS handler qgis_handler = QgsLogHandler() qgis_handler.setFormatter(formatter) add_logging_handler_once(logger, qgis_handler) # Sentry handler - this is optional hence the localised import # If raven is available logging messages will be sent to # http://sentry.kartoza.com # We will log exceptions only there. You need to either: # * Set env var 'INASAFE_SENTRY=1' present (value can be anything) # before this will be enabled or sentry is enabled in QSettings qsettings_flag = QSettings().value('inasafe/useSentry', False, type=bool) environment_flag = 'INASAFE_SENTRY' in os.environ if environment_flag or qsettings_flag: if sentry_url is None: sentry_url = PRODUCTION_SERVER tags = dict() tags[provenance_gdal_version['provenance_key']] = gdal.__version__ tags[provenance_os['provenance_key']] = readable_os_version() qgis_short_version = provenance_qgis_version['provenance_key'] qgis_full_version = qgis_short_version + '_full' versions = [str(v) for v in qgis_version_detailed()] tags[qgis_short_version] = '.'.join(versions[0:2]) tags[qgis_full_version] = '.'.join(versions[0:3]) tags[provenance_qt_version['provenance_key']] = QT_VERSION_STR hostname = os.environ.get('HOSTNAME_SENTRY', socket.gethostname()) sentry_handler = SentryHandler( dsn=sentry_url, name=hostname, release=get_version(), tags=tags, ) sentry_handler.setFormatter(formatter) sentry_handler.setLevel(logging.ERROR) if add_logging_handler_once(logger, sentry_handler): logger.debug('Sentry logging enabled in safe') else: logger.debug('Sentry logging disabled in safe')
def setup_logger(log_file=None, sentry_url=None): """Run once when the module is loaded and enable logging. :param log_file: Optional full path to a file to write logs to. :type log_file: str :param sentry_url: Optional url to sentry api for remote logging. Defaults to http://c64a83978732474ea751d432ab943a6b: [email protected]/5 which is the sentry project for InaSAFE desktop. :type sentry_url: str Borrowed heavily from this: http://docs.python.org/howto/logging-cookbook.html Use this to first initialise the logger (see safe/__init__.py):: from safe_qgis import utilities utilities.setupLogger() You would typically only need to do the above once ever as the safe model is initialised early and will set up the logger globally so it is available to all packages / subpackages as shown below. In a module that wants to do logging then use this example as a guide to get the initialised logger instance:: # The LOGGER is initialised in utilities.py by init import logging LOGGER = logging.getLogger('InaSAFE') Now to log a message do:: LOGGER.debug('Some debug message') .. note:: The file logs are written to the inasafe user tmp dir e.g.: /tmp/inasafe/23-08-2012/timlinux/logs/inasafe.log """ myLogger = logging.getLogger('InaSAFE') myLogger.setLevel(logging.DEBUG) myDefaultHanderLevel = logging.DEBUG # create formatter that will be added to the handlers myFormatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') # create syslog handler which logs even debug messages # (ariel): Make this log to /var/log/safe.log instead of # /var/log/syslog # (Tim) Ole and I discussed this - we prefer to log into the # user's temporary working directory. myTempDir = temp_dir('logs') myFilename = os.path.join(myTempDir, 'inasafe.log') if log_file is None: myFileHandler = logging.FileHandler(myFilename) else: myFileHandler = logging.FileHandler(log_file) myFileHandler.setLevel(myDefaultHanderLevel) # create console handler with a higher log level myConsoleHandler = logging.StreamHandler() myConsoleHandler.setLevel(logging.INFO) myQGISHandler = QgsLogHandler() # Sentry handler - this is optional hence the localised import # It will only log if pip install raven. If raven is available # logging messages will be sent to http://sentry.linfiniti.com # We will log exceptions only there. You need to either: # * Set env var 'INSAFE_SENTRY=1' present (value can be anything) # * Enable the 'help improve InaSAFE by submitting errors to a remove # server' option in InaSAFE options dialog # before this will be enabled. mySettings = QtCore.QSettings() myFlag = mySettings.value('inasafe/useSentry', False) if 'INASAFE_SENTRY' in os.environ or myFlag: if sentry_url is None: myClient = Client( 'http://c64a83978732474ea751d432ab943a6b' ':[email protected]/5') else: myClient = Client(sentry_url) mySentryHandler = SentryHandler(myClient) mySentryHandler.setFormatter(myFormatter) mySentryHandler.setLevel(logging.ERROR) if add_logging_handler_once(myLogger, mySentryHandler): myLogger.debug('Sentry logging enabled') else: myLogger.debug('Sentry logging disabled') # Set formatters myFileHandler.setFormatter(myFormatter) myConsoleHandler.setFormatter(myFormatter) myQGISHandler.setFormatter(myFormatter) # add the handlers to the logger add_logging_handler_once(myLogger, myFileHandler) add_logging_handler_once(myLogger, myConsoleHandler) add_logging_handler_once(myLogger, myQGISHandler)
def start(self): # remove all handlers self.handlers = [] # sentry log handler sentry_client = raven.Client( 'https://*****:*****@sentry.sickrage.ca/5?verify_ssl=0', release=sickrage.version(), repos={'sickrage': { 'name': 'sickrage/sickrage' }}) sentry_tags = { 'platform': platform.platform(), 'locale': sys.getdefaultencoding(), 'python': sys.version_info } if sickrage.app.config and sickrage.app.config.sub_id: sentry_tags.update({'sub_id': sickrage.app.config.sub_id}) if sickrage.app.config and sickrage.app.config.app_id: sentry_tags.update({'app_id': sickrage.app.config.app_id}) sentry_handler = SentryHandler(client=sentry_client, tags=sentry_tags) sentry_handler.setLevel(self.logLevels['ERROR']) sentry_handler.set_name('sentry') self.addHandler(sentry_handler) # console log handler if self.consoleLogging: console_handler = logging.StreamHandler() formatter = logging.Formatter( '%(asctime)s %(levelname)s::%(threadName)s::%(message)s', '%H:%M:%S') console_handler.setFormatter(formatter) console_handler.setLevel(self.logLevels['INFO'] if not self. debugLogging else self.logLevels['DEBUG']) self.addHandler(console_handler) # file log handlers if self.logFile: # make logs folder if it doesn't exist if not os.path.exists(os.path.dirname(self.logFile)): if not make_dir(os.path.dirname(self.logFile)): return if sickrage.app.developer: rfh = FileHandler(filename=self.logFile, ) else: rfh = RotatingFileHandler(filename=self.logFile, maxBytes=self.logSize, backupCount=self.logNr) rfh_errors = RotatingFileHandler(filename=self.logFile.replace( '.log', '.error.log'), maxBytes=self.logSize, backupCount=self.logNr) formatter = logging.Formatter( '%(asctime)s %(levelname)s::%(threadName)s::%(message)s', '%Y-%m-%d %H:%M:%S') rfh.setFormatter(formatter) rfh.setLevel(self.logLevels['INFO'] if not self.debugLogging else self.logLevels['DEBUG']) self.addHandler(rfh) rfh_errors.setFormatter(formatter) rfh_errors.setLevel(self.logLevels['ERROR']) self.addHandler(rfh_errors)