def register_sentry_handler(): handler = SentryHandler() handler.client = app.extensions['sentry'].client setup_logging(handler) return handler
def set_logger(cls, filename, to_sentry=False): logger = logging.getLogger() logger.setLevel(getattr(logging, config['logging']['level'].upper())) logger.handlers = [] format_ = logging.Formatter( '[%(levelname)s %(asctime)s.%(msecs)d %(module)s:%(lineno)d]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S' ) if config['logging']['path']: channel = logging.handlers.RotatingFileHandler( filename=os.path.join(config['logging']['path'], filename), maxBytes=config['logging']['max_size'], backupCount=config['logging']['num_backups'] ) channel.setFormatter(format_) logger.addHandler(channel) else:# send to console instead of file channel = logging.StreamHandler() channel.setFormatter(format_) logger.addHandler(channel) if to_sentry and config['sentry_dsn']: handler = SentryHandler( config['sentry_dsn'] ) handler.setLevel('ERROR') logger.addHandler(handler)
def sentry_logger(sender, logger, loglevel, logfile, format, colorize, **kw): filter_ = CeleryFilter() handler = SentryHandler(client) handler.setLevel(loglevel) handler.addFilter(filter_) logger.addHandler(handler)
def server_log_handler(client): """ Adds sentry log handler to the logger :return: the sentry handler """ from raven.handlers.logging import SentryHandler sh = SentryHandler(client=client, level=logging.ERROR) # Don't send Sentry events for command-line usage errors old_emit = sh.emit def emit(self, record): if record.message.startswith("Command-line usage error:"): return return old_emit(record) sh.emit = lambda x: emit(sh, x) fmt = ("[%(asctime)s][%(levelname)s] %(filename)s: %(lineno)d | " "%(message)s") formatter = logging.Formatter(fmt=fmt, datefmt="%H:%M:%S") formatter.converter = time.gmtime sh.setFormatter(formatter) logger.addHandler(sh) return sh
def on_inst_register_clicked(self, *args, **kwargs): '''send the registration data as sentry info log message ''' # create the handler first from raven import Client from raven.handlers.logging import SentryHandler sentry_client = Client('https://*****:*****@' 'app.getsentry.com/45704') handler = SentryHandler(sentry_client) handler.setLevel(logging.INFO) # the registration logger gets the above handler registrations = logging.getLogger('bauble.registrations') registrations.setLevel(logging.INFO) registrations.addHandler(handler) # produce the log record registrations.info([(key, getattr(self.model, key)) for key in self.widget_to_field_map.values()]) # remove the handler after usage registrations.removeHandler(handler) # disable button, so user will not send registration twice self.view.widget_set_sensitive('inst_register', False)
def init_logging(app): location_log_config = app.config['LOGGING_CONFIG_LOCATION'] if os.path.isfile(location_log_config): logging.config.fileConfig(location_log_config, disable_existing_loggers=True) logger.info('Loaded logging configuration file "%s"', location_log_config) else: logger.warning('Error loading configuration file "%s"', location_log_config) if app.config['SENTRY_DSN']: # This could not be done in the default .ini because the # handler has to be passed to `raven.setup_logging`. # the following adds itself to app.extensions['sentry'] sentry = Sentry() sentry.init_app(app, dsn=app.config['SENTRY_DSN']) handler = SentryHandler(app.extensions['sentry'].client) handler.level = logging.NOTSET setup_logging(handler) logger.debug("Sentry DSN: {}".format(app.config['SENTRY_DSN'])) else: logger.debug("No sentry DSN specified")
def SentryHandler(dns=None, level=logging.WARNING): from raven.handlers.logging import SentryHandler dns = dns or os.environ.get("SENTRY") sentry_handler = SentryHandler(dns) sentry_handler.level = level return sentry_handler
def test_tags_merge(self): handler = SentryHandler(self.client, tags={'foo': 'bar', 'biz': 'baz'}) record = self.make_record('Message', extra={'tags': {'foo': 'faz'}}) handler.emit(record) self.assertEqual(len(self.client.events), 1) event = self.client.events.pop(0) assert event['tags'] == {'foo': 'faz', 'biz': 'baz'}
def start(self): # remove all handlers self.handlers = [] # sentry log handler sentry_client = raven.Client('https://*****:*****@sentry.sickrage.ca/4?verify_ssl=0', release=sickrage.version(), repos={'sickrage': {'name': 'sickrage/sickrage'}}) sentry_handler = SentryHandler(client=sentry_client, tags={'platform': platform.platform()}) sentry_handler.setLevel(self.logLevels['ERROR']) sentry_handler.set_name('sentry') self.addHandler(sentry_handler) # console log handler if self.consoleLogging: console_handler = logging.StreamHandler() formatter = logging.Formatter('%(asctime)s %(levelname)s::%(threadName)s::%(message)s', '%H:%M:%S') console_handler.setFormatter(formatter) console_handler.setLevel(self.logLevels['INFO'] if not self.debugLogging else self.logLevels['DEBUG']) self.addHandler(console_handler) # file log handlers if self.logFile: # make logs folder if it doesn't exist if not os.path.exists(os.path.dirname(self.logFile)): if not makeDir(os.path.dirname(self.logFile)): return if sickrage.app.developer: rfh = FileHandler( filename=self.logFile, ) else: rfh = RotatingFileHandler( filename=self.logFile, maxBytes=self.logSize, backupCount=self.logNr ) rfh_errors = RotatingFileHandler( filename=self.logFile.replace('.log', '.error.log'), maxBytes=self.logSize, backupCount=self.logNr ) formatter = logging.Formatter('%(asctime)s %(levelname)s::%(threadName)s::%(message)s', '%Y-%m-%d %H:%M:%S') rfh.setFormatter(formatter) rfh.setLevel(self.logLevels['INFO'] if not self.debugLogging else self.logLevels['DEBUG']) self.addHandler(rfh) rfh_errors.setFormatter(formatter) rfh_errors.setLevel(self.logLevels['ERROR']) self.addHandler(rfh_errors)
def process_logger_event(sender, logger, loglevel, logfile, format, colorize, **kw): import logging logger = logging.getLogger() handler = SentryHandler(client) if handler.__class__ in list(map(type, logger.handlers)): return False handler.setLevel(logging.ERROR) handler.addFilter(CeleryFilter()) logger.addHandler(handler)
def _sentry_handler(sentry_key=None, obci_peer=None): try: client = OBCISentryClient(sentry_key, obci_peer=obci_peer, auto_log_stacks=True) except ValueError as e: print('logging setup: initializing sentry failed - ', e.args) return None handler = SentryHandler(client) handler.set_name('sentry_handler') setup_logging(handler) return handler
def create_app(load_admin=True): from redash import extensions, handlers from redash.handlers.webpack import configure_webpack from redash.handlers import chrome_logger from redash.admin import init_admin from redash.models import db from redash.authentication import setup_authentication from redash.metrics.request import provision_app app = Flask(__name__, template_folder=settings.STATIC_ASSETS_PATH, static_folder=settings.STATIC_ASSETS_PATH, static_path='/static') # Make sure we get the right referral address even behind proxies like nginx. app.wsgi_app = ProxyFix(app.wsgi_app, settings.PROXIES_COUNT) #app.wsgi_app = ReverseProxied(app.wsgi_app) app.url_map.converters['org_slug'] = SlugConverter if settings.ENFORCE_HTTPS: SSLify(app, skips=['ping']) if settings.SENTRY_DSN: from raven import Client from raven.contrib.flask import Sentry from raven.handlers.logging import SentryHandler client = Client(settings.SENTRY_DSN, release=__version__, install_logging_hook=False) sentry = Sentry(app, client=client) sentry.client.release = __version__ sentry_handler = SentryHandler(client=client) sentry_handler.setLevel(logging.ERROR) logging.getLogger().addHandler(sentry_handler) # configure our database app.config['SQLALCHEMY_DATABASE_URI'] = settings.SQLALCHEMY_DATABASE_URI app.config.update(settings.all_settings()) provision_app(app) db.init_app(app) migrate.init_app(app, db) if load_admin: init_admin(app) mail.init_app(app) setup_authentication(app) limiter.init_app(app) handlers.init_app(app) configure_webpack(app) extensions.init_extensions(app) chrome_logger.init_app(app) return app
def add_sentry_handler_to_celery_task_logger(client, sentry_handler_log_level): handler = SentryHandler(client) handler.setLevel(sentry_handler_log_level) def process_task_logger_event(sender, logger, loglevel, logfile, format, colorize, **kw): for h in logger.handlers: if type(h) == SentryHandler: return False logger.addHandler(handler) after_setup_task_logger.connect(process_task_logger_event, weak=False)
def setup_raven(): '''we setup sentry to get all stuff from our logs''' pcfg = AppBuilder.get_pcfg() from raven.handlers.logging import SentryHandler from raven import Client from raven.conf import setup_logging client = Client(pcfg['raven_dsn']) handler = SentryHandler(client) # TODO VERIFY THIS -> This is the way to do it if you have a paid account, each log call is an event so this isn't going to work for free accounts... handler.setLevel(pcfg["raven_loglevel"]) setup_logging(handler) return client
def setup_logger(): """Set up our logger with sentry support. Args: None Returns: None Raises: None """ myLogger = logging.getLogger('osm-reporter') myLogger.setLevel(logging.DEBUG) myDefaultHanderLevel = logging.DEBUG # create formatter that will be added to the handlers myFormatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') myTempDir = '/tmp' # so e.g. jenkins can override log dir. if 'OSM_REPORTER_LOGFILE' in os.environ: myFilename = os.environ['OSM_REPORTER_LOGFILE'] else: myFilename = os.path.join(myTempDir, 'reporter.log') myFileHandler = logging.FileHandler(myFilename) myFileHandler.setLevel(myDefaultHanderLevel) # create console handler with a higher log level myConsoleHandler = logging.StreamHandler() myConsoleHandler.setLevel(logging.ERROR) try: #pylint: disable=F0401 from raven.handlers.logging import SentryHandler # noinspection PyUnresolvedReferences from raven import Client #pylint: enable=F0401 myClient = Client( 'http://*****:*****@sentry.linfiniti.com/6') mySentryHandler = SentryHandler(myClient) mySentryHandler.setFormatter(myFormatter) mySentryHandler.setLevel(logging.ERROR) add_handler_once(myLogger, mySentryHandler) myLogger.debug('Sentry logging enabled') except ImportError: myLogger.debug('Sentry logging disabled. Try pip install raven') #Set formatters myFileHandler.setFormatter(myFormatter) myConsoleHandler.setFormatter(myFormatter) # add the handlers to the logger add_handler_once(myLogger, myFileHandler) add_handler_once(myLogger, myConsoleHandler)
def init_raven_client(dsn): global _sentry _sentry = raven.Client( dsn=dsn, transport=raven.transport.threaded_requests.ThreadedRequestsHTTPTransport, ignore_exceptions={'KeyboardInterrupt'}, logging=True, ) sentry_errors_logger = logging.getLogger("sentry.errors") sentry_errors_logger.addHandler(logging.StreamHandler()) handler = SentryHandler(_sentry) handler.setLevel(logging.ERROR) setup_logging(handler)
def init_sentry(app): sentry = IndicoSentry(wrap_wsgi=False, register_signal=True, logging=False) sentry.init_app(app) # setup logging manually and exclude uncaught indico exceptions. # these are logged manually in the flask error handler logic so # we get the X-Sentry-ID header which is not populated in the # logging handlers handler = SentryHandler(sentry.client, level=getattr(logging, config.SENTRY_LOGGING_LEVEL)) handler.addFilter(BlacklistFilter({'indico.flask', 'celery.redirected'})) setup_logging(handler) # connect to the celery logger register_logger_signal(sentry.client) register_signal(sentry.client)
def setup(dsn, level, propagate_sentry_errors=False): client = make_client(dsn, False) from raven.handlers.logging import SentryHandler global handler handler = SentryHandler(client) handler.setLevel(level) handler.dsn = dsn from raven.conf import setup_logging kwargs = {} if propagate_sentry_errors: kwargs["exclude"] = [] setup_logging(handler, **kwargs)
def check_args(self, args): # check allow_other and allow_root if args.allow_other: args.allow_root = False else: args.allow_root = True # check log_level if args.debug: args.log_level = 'debug' # setup logging if args.log != "syslog": if args.log in ('-', '/dev/stdout'): handler = StreamHandler(sys.stdout) else: handler = TimedRotatingFileHandler(args.log, when="midnight") handler.setFormatter(Formatter(fmt='%(asctime)s %(threadName)s: ' '%(message)s', datefmt='%B-%d-%Y %H:%M:%S')) else: if sys.platform == 'darwin': handler = SysLogHandler(address="/var/run/syslog") else: handler = SysLogHandler(address="/dev/log") logger_fmt = 'GitFS on {mount_point} [%(process)d]: %(threadName)s: '\ '%(message)s'.format(mount_point=args.mount_point) handler.setFormatter(Formatter(fmt=logger_fmt)) if args.sentry_dsn != '': from raven.conf import setup_logging from raven.handlers.logging import SentryHandler sentry_handler = SentryHandler(args.sentry_dsn) sentry_handler.setLevel("ERROR") setup_logging(sentry_handler) log.addHandler(sentry_handler) handler.setLevel(args.log_level.upper()) log.setLevel(args.log_level.upper()) log.addHandler(handler) # set cache size lru_cache.maxsize = args.cache_size # return absolute repository's path args.repo_path = os.path.abspath(args.repo_path) return args
def setup_logging(): logger = app.logger file_handler = logging.FileHandler(CONF.get("logging", "file")) file_handler.setFormatter(logging.Formatter( '%(asctime)s %(levelname)s %(message)s')) log_level = getattr(logging, CONF.get("logging", "level")) logger.setLevel(log_level) logger.addHandler(file_handler) raven_dsn = CONF.get("logging", "sentry") if raven_dsn: raven_log_level = getattr(logging, CONF.get("logging", "sentry_level")) sentry_handler = SentryHandler(raven_dsn) sentry_handler.setLevel(raven_log_level) logger.addHandler(sentry_handler)
def setupLogger(): """Set up our logger. Args: None Returns: None Raises: None """ myLogger = logging.getLogger('osm-reporter') myLogger.setLevel(logging.DEBUG) myDefaultHanderLevel = logging.DEBUG # create formatter that will be added to the handlers myFormatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') myTempDir = ('/tmp') myFilename = os.path.join(myTempDir, 'reporter.log') myFileHandler = logging.FileHandler(myFilename) myFileHandler.setLevel(myDefaultHanderLevel) # create console handler with a higher log level myConsoleHandler = logging.StreamHandler() myConsoleHandler.setLevel(logging.ERROR) try: #pylint: disable=F0401 from raven.handlers.logging import SentryHandler from raven import Client #pylint: enable=F0401 myClient = Client('http://*****:*****@sentry.linfiniti.com/6') mySentryHandler = SentryHandler(myClient) mySentryHandler.setFormatter(myFormatter) mySentryHandler.setLevel(logging.ERROR) addLoggingHanderOnce(myLogger, mySentryHandler) myLogger.debug('Sentry logging enabled') except: myLogger.debug('Sentry logging disabled. Try pip install raven') #Set formatters myFileHandler.setFormatter(myFormatter) myConsoleHandler.setFormatter(myFormatter) # add the handlers to the logger addLoggingHanderOnce(myLogger, myFileHandler) addLoggingHanderOnce(myLogger, myConsoleHandler)
def _child(self, control, data): '''Main function for child process.''' # Close supervisor log, open child log baselog = logging.getLogger() baselog.removeHandler(self._logfile_handler) del self._logfile_handler now = datetime.now().strftime(SEARCH_LOG_DATE_FORMAT) logname = SEARCH_LOG_FORMAT % (now, os.getpid()) logpath = os.path.join(self.config.logdir, logname) handler = logging.FileHandler(logpath) handler.setFormatter(_TimestampedLogFormatter()) baselog.addHandler(handler) if self.config.sentry_dsn: sentry_handler = SentryHandler(self.config.sentry_dsn) sentry_handler.setLevel(logging.ERROR) setup_logging(sentry_handler) # Okay, now we have logging search = None try: try: # Close listening socket and half-open connections self._listener.shutdown() # Log startup of child _log.info('Starting search %s, pid %d', opendiamond.__version__, os.getpid()) _log.info('Peer: %s', control.getpeername()[0]) _log.info('Worker threads: %d', self.config.threads) # Set up connection wrappers and search object control = RPCConnection(control) search = Search(self.config, RPCConnection(data)) # Dispatch RPCs on the control connection until we die while True: control.dispatch(search) finally: # Ensure that further signals (particularly SIGUSR1 from # worker threads) don't interfere with the shutdown process. self._ignore_signals = True except ConnectionFailure: # Client closed connection _log.info('Client closed connection') except _Signalled, s: # Worker threads raise SIGUSR1 when they've encountered a # fatal error if s.signal != signal.SIGUSR1: _log.info('Search exiting on %s', s.signame)
def setup_logger(): """Set up our logger with sentry support. """ logger = logging.getLogger('user_map') logger.setLevel(logging.DEBUG) handler_level = logging.DEBUG # create formatter that will be added to the handlers formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') temp_dir = '/tmp' # so e.g. jenkins can override log dir. if 'USER_MAP_LOGFILE' in os.environ: file_name = os.environ['USER_MAP_LOGFILE'] else: file_name = os.path.join(temp_dir, 'user-map.log') file_handler = logging.FileHandler(file_name) file_handler.setLevel(handler_level) # create console handler with a higher log level console_handler = logging.StreamHandler() console_handler.setLevel(logging.ERROR) try: #pylint: disable=F0401 from raven.handlers.logging import SentryHandler # noinspection PyUnresolvedReferences from raven import Client #pylint: enable=F0401 #client = Client( # 'http://*****:*****@sentry.linfiniti.com/6') sentry_handler = SentryHandler(client) sentry_handler.setFormatter(formatter) sentry_handler.setLevel(logging.ERROR) add_handler_once(logger, sentry_handler) logger.debug('Sentry logging enabled') except ImportError: logger.debug('Sentry logging disabled. Try pip install raven') #Set formatters file_handler.setFormatter(formatter) console_handler.setFormatter(formatter) # add the handlers to the logger add_handler_once(logger, file_handler) add_handler_once(logger, console_handler)
def init_logging(): root = logging.getLogger() root.setLevel(0) if State.config['sentry']: patch_gevent_hub_print_exception() hdlr = SentryHandler(raven.Client(State.config['sentry'], transport=GeventedHTTPTransport)) hdlr.setLevel(logging.ERROR) root.addHandler(hdlr) hdlr = logging.StreamHandler(sys.stdout) hdlr.setLevel(getattr(logging, logging.DEBUG)) root.addHandler(hdlr) root.info(datetime.datetime.now().strftime("%Y-%m-%d %H:%M")) root.info('==============================================')
def init(level, sentry_dsn, colored=False): root = logging.getLogger() root.setLevel(0) hdlr = logging.FileHandler('client_log.txt', encoding='utf-8') hdlr.setLevel(logging.INFO) root.addHandler(hdlr) hdlr = SentryHandler(raven.Client(sentry_dsn, transport=GeventedHTTPTransport)) hdlr.setLevel(logging.ERROR) root.addHandler(hdlr) hdlr = logging.StreamHandler(sys.stdout) hdlr.setLevel(getattr(logging, level)) if colored: from colorlog import ColoredFormatter formatter = ColoredFormatter( "%(log_color)s%(message)s%(reset)s", log_colors={ 'CRITICAL': 'bold_red', 'ERROR': 'red', 'WARNING': 'yellow', 'INFO': 'green', 'DEBUG': 'blue', } ) hdlr.setFormatter(formatter) root.addHandler(hdlr) root.info(datetime.datetime.now().strftime("%Y-%m-%d %H:%M")) root.info('==============================================')
def create_app(): from redash import handlers from redash.admin import init_admin from redash.models import db from redash.authentication import setup_authentication from redash.metrics.request import provision_app app = Flask( __name__, template_folder=settings.STATIC_ASSETS_PATHS[-1], static_folder=settings.STATIC_ASSETS_PATHS[-1], static_path="/static", ) # Make sure we get the right referral address even behind proxies like nginx. app.wsgi_app = ProxyFix(app.wsgi_app, settings.PROXIES_COUNT) app.url_map.converters["org_slug"] = SlugConverter if settings.ENFORCE_HTTPS: SSLify(app, skips=["ping"]) if settings.SENTRY_DSN: from raven.contrib.flask import Sentry from raven.handlers.logging import SentryHandler sentry = Sentry(app, dsn=settings.SENTRY_DSN) sentry.client.release = __version__ sentry_handler = SentryHandler(settings.SENTRY_DSN) sentry_handler.setLevel(logging.ERROR) logging.getLogger().addHandler(sentry_handler) # configure our database settings.DATABASE_CONFIG.update({"threadlocals": True}) app.config["DATABASE"] = settings.DATABASE_CONFIG app.config.update(settings.all_settings()) provision_app(app) init_admin(app) db.init_app(app) mail.init_app(app) setup_authentication(app) handlers.init_app(app) return app
def _configure_logging(self, config): """Configure loggers here, to hook sentry handler""" import logging from raven.handlers.logging import SentryHandler handler = SentryHandler(config.get('sentry.dsn')) handler.setLevel(logging.NOTSET) loggers = ['', 'ckan', 'ckanext', 'sentry.errors'] for name in loggers: logger = logging.getLogger(name) logger.addHandler(handler) logger.setLevel(logging.INFO) ckan_logger = logging.getLogger('ckan') ckan_logger.info("Sentry configured for this ckan instance (INFO)") ckan_logger.warning("Sentry configured for this ckan instance (WARN)")
def configure_logging(logger="respa_exchange", level=logging.INFO, handler=None): logger = logging.getLogger(logger) logger.setLevel(level) if not handler: handler = logging.StreamHandler(stream=sys.stdout) handler.setFormatter(logging.Formatter( fmt="%(asctime)s - %(name)s - %(levelname)s: %(message)s", datefmt=logging.Formatter.default_time_format )) logger.addHandler(handler) if hasattr(settings, 'RAVEN_CONFIG') and 'dsn' in settings.RAVEN_CONFIG: from raven.handlers.logging import SentryHandler from raven.conf import setup_logging sentry_handler = SentryHandler(settings.RAVEN_CONFIG['dsn']) sentry_handler.setLevel(logging.ERROR) logger.addHandler(sentry_handler) setup_logging(sentry_handler)
def _configure_logging(self, config): """ Configure the Sentry log handler to the specified level Based on @rshk work on https://github.com/opendatatrentino/ckanext-sentry """ handler = SentryHandler(config.get("sentry.dsn")) handler.setLevel(logging.NOTSET) loggers = ["", "ckan", "ckanext", "sentry.errors"] sentry_log_level = config.get("sentry.log_level", logging.INFO) for name in loggers: logger = logging.getLogger(name) logger.addHandler(handler) logger.setLevel(sentry_log_level) log.debug("Setting up Sentry logger with level {0}".format(sentry_log_level))
def configure_logging( config ): """ Allow some basic logging configuration to be read from the cherrpy config. """ # PasteScript will have already configured the logger if the appropriate # sections were found in the config file, so we do nothing if the # config has a loggers section, otherwise we do some simple setup # using the 'log_*' values from the config. if config.global_conf_parser.has_section( "loggers" ): return format = config.get( "log_format", "%(name)s %(levelname)s %(asctime)s %(message)s" ) level = logging._levelNames[ config.get( "log_level", "DEBUG" ) ] destination = config.get( "log_destination", "stdout" ) log.info( "Logging at '%s' level to '%s'" % ( level, destination ) ) # Get root logger root = logging.getLogger() # Set level root.setLevel( level ) # Turn down paste httpserver logging if level <= logging.DEBUG: logging.getLogger( "paste.httpserver.ThreadPool" ).setLevel( logging.WARN ) # Remove old handlers for h in root.handlers[:]: root.removeHandler(h) # Create handler if destination == "stdout": handler = logging.StreamHandler( sys.stdout ) else: handler = logging.FileHandler( destination ) # Create formatter formatter = logging.Formatter( format ) # Hook everything up handler.setFormatter( formatter ) root.addHandler( handler ) # If sentry is configured, also log to it if config.sentry_dsn: eggs.require( "raven" ) from raven.handlers.logging import SentryHandler sentry_handler = SentryHandler( config.sentry_dsn ) sentry_handler.setLevel( logging.WARN ) root.addHandler( sentry_handler )
def create_app(_read_config=True, **config): app = Flask( __name__, static_folder=os.path.join(ROOT, 'static'), template_folder=os.path.join(ROOT, 'templates'), ) # support for kubernetes # https://kubernetes.io/docs/concepts/services-networking/service/ if os.environ.get('GET_HOSTS_FROM') == 'env': REDIS_URL = 'redis://{}:{}/0'.format( os.environ['REDIS_MASTER_SERVICE_HOST'], os.environ['REDIS_MASTER_SERVICE_PORT'], ) # Cloud SQL # https://cloud.google.com/sql/docs/postgres/connect-container-engine SQLALCHEMY_URI = 'postgresql+psycopg2://{}:{}@127.0.0.1:5432/zeus'.format( os.environ['DB_USER'], os.environ['DB_PASSWORD'], ) if 'GCS_BUCKET' in os.environ: app.config['FILE_STORAGE'] = { 'backend': 'zeus.storage.gcs.GoogleCloudStorage', 'options': { 'bucket': os.environ['GCS_BUCKET'], 'project': os.environ.get('GC_PROJECT'), }, } else: REDIS_URL = os.environ.get('REDIS_URL', 'redis://localhost/0') SQLALCHEMY_URI = os.environ.get('SQLALCHEMY_DATABASE_URI', 'postgresql+psycopg2:///zeus') app.config['FILE_STORAGE'] = { 'backend': 'zeus.storage.base.FileStorage', 'options': {}, } if os.environ.get('SERVER_NAME'): app.config['SERVER_NAME'] = os.environ['SERVER_NAME'] app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY') app.config['LOG_LEVEL'] = os.environ.get('LOG_LEVEL') or 'INFO' app.config['SSL'] = os.environ.get('SSL') in ('1', 'true', 'on') # limit sessions to one day so permissions are revalidated automatically app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(days=1) app.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_URI app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.config['REDIS_URL'] = REDIS_URL app.config['SENTRY_DSN'] = os.environ.get('SENTRY_DSN') or None app.config['SENTRY_DSN_FRONTEND'] = os.environ.get( 'SENTRY_DSN_FRONTEND') or None app.config['SENTRY_INCLUDE_PATHS'] = [ 'zeus', ] try: app.config['SENTRY_RELEASE'] = raven.fetch_git_sha(ROOT) except Exception: app.logger.warn('unable to bind sentry.release context', exc_info=True) app.config['SENTRY_ENVIRONMENT'] = os.environ.get('NODE_ENV', 'development') app.config['GITHUB_CLIENT_ID'] = os.environ.get('GITHUB_CLIENT_ID') or None app.config['GITHUB_CLIENT_SECRET'] = os.environ.get( 'GITHUB_CLIENT_SECRET') or None app.config['CELERY_ACCEPT_CONTENT'] = ['zeus_json', 'json'] app.config['CELERY_ACKS_LATE'] = True app.config['CELERY_BROKER_URL'] = app.config['REDIS_URL'] app.config['CELERY_DEFAULT_QUEUE'] = 'default' app.config['CELERY_DEFAULT_EXCHANGE'] = 'default' app.config['CELERY_DEFAULT_EXCHANGE_TYPE'] = 'direct' app.config['CELERY_DEFAULT_ROUTING_KEY'] = 'default' app.config['CELERY_DISABLE_RATE_LIMITS'] = True app.config['CELERY_EVENT_SERIALIZER'] = 'zeus_json' app.config['CELERY_IGNORE_RESULT'] = True app.config['CELERY_IMPORTS'] = ('zeus.tasks', ) app.config['CELERY_RESULT_BACKEND'] = None app.config['CELERY_RESULT_SERIALIZER'] = 'zeus_json' app.config['CELERY_SEND_EVENTS'] = False app.config['CELERY_TASK_RESULT_EXPIRES'] = 1 app.config['CELERY_TASK_SERIALIZER'] = 'zeus_json' app.config['CELERYD_PREFETCH_MULTIPLIER'] = 1 app.config['CELERYD_MAX_TASKS_PER_CHILD'] = 10000 app.config['REPO_ROOT'] = os.environ.get('REPO_ROOT', '/usr/local/cache/zeus-repos') if _read_config: if os.environ.get('ZEUS_CONF'): # ZEUS_CONF=/etc/zeus.conf.py app.config.from_envvar('ZEUS_CONF') else: # Look for ~/.zeus/zeus.conf.py path = os.path.normpath( os.path.expanduser('~/.zeus/zeus.config.py')) app.config.from_pyfile(path, silent=True) app.config.update(config) req_vars = ('GITHUB_CLIENT_ID', 'GITHUB_CLIENT_SECRET', 'REDIS_URL', 'SECRET_KEY', 'SQLALCHEMY_DATABASE_URI') for varname in req_vars: if not app.config.get(varname): raise SystemExit( 'Required configuration not present for {}'.format(varname)) if app.config.get('SSL'): app.wgsi_app = force_ssl(app) app.config['PREFERRED_URL_SCHEME'] = 'https' app.config['SESSION_COOKIE_SECURE'] = True from zeus.testutils.client import ZeusTestClient app.test_client_class = ZeusTestClient if app.config.get('LOG_LEVEL'): app.logger.setLevel(getattr(logging, app.config['LOG_LEVEL'].upper())) # init sentry first sentry.init_app(app) # XXX(dcramer): Sentry + Flask + Logging integration is broken # https://github.com/getsentry/raven-python/issues/1030 from raven.handlers.logging import SentryHandler app.logger.addHandler( SentryHandler(client=sentry.client, level=logging.WARN)) configure_db(app) redis.init_app(app) celery.init_app(app, sentry) configure_api(app) configure_web(app) from . import models # NOQA return app
def run(base_dir, start_gunicorn_app=True): # Store a pidfile before doing anything else store_pidfile(base_dir) # For dumping stacktraces register_diag_handlers() # Start initializing the server now os.chdir(base_dir) try: import pymysql pymysql.install_as_MySQLdb() except ImportError: pass # We're doing it here even if someone doesn't use PostgreSQL at all # so we're not suprised when someone suddenly starts using PG. # TODO: Make sure it's registered for each of the subprocess psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY) repo_location = os.path.join(base_dir, 'config', 'repo') # Configure the logging first, before configuring the actual server. logging.addLevelName('TRACE1', TRACE1) with open(os.path.join(repo_location, 'logging.conf')) as f: dictConfig(yaml.load(f)) logger = logging.getLogger(__name__) kvdb_logger = logging.getLogger('zato_kvdb') config = get_config(repo_location, 'server.conf') # New in 2.0 - Start monitoring as soon as possible if config.get('newrelic', {}).get('config'): import newrelic.agent newrelic.agent.initialize(config.newrelic.config, config.newrelic.environment or None, config.newrelic.ignore_errors or None, config.newrelic.log_file or None, config.newrelic.log_level or None) # New in 2.0 - override gunicorn-set Server HTTP header gunicorn.SERVER_SOFTWARE = config.misc.get('http_server_header', 'Zato') # Store KVDB config in logs, possibly replacing its password if told to kvdb_config = get_kvdb_config_for_log(config.kvdb) kvdb_logger.info('Master process config `%s`', kvdb_config) # New in 2.0 hence optional user_locale = config.misc.get('locale', None) if user_locale: locale.setlocale(locale.LC_ALL, user_locale) value = 12345 logger.info('Locale is `%s`, amount of %s -> `%s`', user_locale, value, locale.currency(value, grouping=True).decode('utf-8')) # Spring Python app_context = get_app_context(config) # Makes queries against Postgres asynchronous if asbool( config.odb.use_async_driver) and config.odb.engine == 'postgresql': make_psycopg_green() # New in 2.0 - Put HTTP_PROXY in os.environ. http_proxy = config.misc.get('http_proxy', False) if http_proxy: os.environ['http_proxy'] = http_proxy crypto_manager = get_crypto_manager(repo_location, app_context, config) parallel_server = app_context.get_object('parallel_server') zato_gunicorn_app = ZatoGunicornApplication(parallel_server, repo_location, config.main, config.crypto) parallel_server.crypto_manager = crypto_manager parallel_server.odb_data = config.odb parallel_server.host = zato_gunicorn_app.zato_host parallel_server.port = zato_gunicorn_app.zato_port parallel_server.repo_location = repo_location parallel_server.base_dir = base_dir parallel_server.tls_dir = os.path.join(parallel_server.base_dir, 'config', 'repo', 'tls') parallel_server.fs_server_config = config parallel_server.user_config.update(config.user_config_items) parallel_server.startup_jobs = app_context.get_object('startup_jobs') parallel_server.app_context = app_context # Remove all locks possibly left over by previous server instances kvdb = app_context.get_object('kvdb') kvdb.component = 'master-proc' clear_locks(kvdb, config.main.token, config.kvdb, crypto_manager.decrypt) # Turn the repo dir into an actual repository and commit any new/modified files RepoManager(repo_location).ensure_repo_consistency() # New in 2.0 so it's optional. profiler_enabled = config.get('profiler', {}).get('enabled', False) # New in 2.0 so it's optional. sentry_config = config.get('sentry') dsn = sentry_config.pop('dsn', None) if dsn: from raven import Client from raven.handlers.logging import SentryHandler handler_level = sentry_config.pop('level') client = Client(dsn, **sentry_config) handler = SentryHandler(client=client) handler.setLevel(getattr(logging, handler_level)) logger = logging.getLogger('') logger.addHandler(handler) for name in logging.Logger.manager.loggerDict: if name.startswith('zato'): logger = logging.getLogger(name) logger.addHandler(handler) if asbool(profiler_enabled): profiler_dir = os.path.abspath( os.path.join(base_dir, config.profiler.profiler_dir)) parallel_server.on_wsgi_request = ProfileMiddleware( parallel_server.on_wsgi_request, log_filename=os.path.join(profiler_dir, config.profiler.log_filename), cachegrind_filename=os.path.join( profiler_dir, config.profiler.cachegrind_filename), discard_first_request=config.profiler.discard_first_request, flush_at_shutdown=config.profiler.flush_at_shutdown, path=config.profiler.url_path, unwind=config.profiler.unwind) # New in 2.0 - set environmet variables for servers to inherit os_environ = config.get('os_environ') for key, value in os_environ.items(): os.environ[key] = value # Run the app at last if start_gunicorn_app: zato_gunicorn_app.run() else: return zato_gunicorn_app.zato_wsgi_app
def test_capture_plus_logging(self): client, raven, app = self.make_client_and_raven(debug=False) app.logger.addHandler(SentryHandler(raven)) client.get('/an-error/') assert len(raven.events) == 1
import os import logging from raven import Client import raven_aiohttp from raven.handlers.logging import SentryHandler from raven.conf import setup_logging if 'CENTRY_URL' in os.environ: client = Client( dsn=os.environ['CENTRY_URL'], transport=raven_aiohttp.AioHttpTransport, ) handler = SentryHandler(client) handler.setLevel(logging.ERROR) setup_logging(handler)
def filter(self, record): record.hostname = ContextFilter.hostname return True stream_handler = logging.StreamHandler() stream_handler.setLevel(setting['logging']['term_level']) stream_handler.addFilter(ContextFilter()) stream_handler.setFormatter( logging.Formatter('%(asctime)s - %(levelname)-10s - %(hostname)s - [in %(pathname)s:%(lineno)d]: - %(message)s')) logger.addHandler(stream_handler) sentry_url = setting.get('sentry_url') if sentry_url: handler = SentryHandler(sentry_url) handler.setLevel(setting['logging']['sentry_level']) setup_logging(handler) # MONGO ########### mongo_host, mongo_port = setting['mongodb']['host'], setting['mongodb']['port'] mongo_lock = threading.Lock() self_params = setting.get('self') if not self_params: raise Exception('Setting error. Self params not found') class MongoException(Exception): pass
"Will pass buildman hostname %s to builders for websocket connection", manager_hostname ) logger.debug('Starting build manager with lifecycle "%s"', build_manager_config[0]) server = BuilderServer( app.config["SERVER_HOSTNAME"], manager_hostname, dockerfile_build_queue, build_logs, user_files, manager_klass, build_manager_config[1], instance_keys, ) server.run("0.0.0.0", controller_port) if __name__ == "__main__": logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False) logging.getLogger("peewee").setLevel(logging.WARN) logging.getLogger("boto").setLevel(logging.WARN) if app.config.get("EXCEPTION_LOG_TYPE", "FakeSentry") == "Sentry": buildman_name = "%s:buildman" % socket.gethostname() setup_logging( SentryHandler(app.config.get("SENTRY_DSN", ""), name=buildman_name, level=logging.ERROR) ) run_build_manager()
from flask_restful import Resource import requests from flask import request from BeautifulSoup import BeautifulSoup from actions import sendNotification,sendNotificationToUser,access_token_validation from raven.handlers.logging import SentryHandler from raven import Client from raven.conf import setup_logging import logging client = Client('https://*****:*****@app.getsentry.com/23855') handler = SentryHandler(client) setup_logging(handler) logger = logging.getLogger(__name__) class GCMClient(Resource): def post(self): request_params = request.get_json() try: access_token = request_params['access_token'] gcm_id= request_params['gcm_id'] message= request_params['message'] except KeyError as ke: logger.error(ke) except Exception as e: logger.error(e) if access_token ==None or gcm_id == None or message == None:
def test_first_arg_as_dsn(self): handler = SentryHandler('http://*****:*****@example.com/1') self.assertTrue(isinstance(handler.client, Client))
def test_client_kwarg(self): client = TempStoreClient(include_paths=['tests']) handler = SentryHandler(client=client) self.assertEqual(handler.client, client)
def setup_logger(logger_name, log_file=None, sentry_url=None): """Run once when the module is loaded and enable logging. :param logger_name: The logger name that we want to set up. :type logger_name: str :param log_file: Optional full path to a file to write logs to. :type log_file: str :param sentry_url: Optional url to sentry api for remote logging. Defaults to http://c64a83978732474ea751d432ab943a6b: [email protected]/5 which is the sentry project for InaSAFE desktop. :type sentry_url: str Borrowed heavily from this: http://docs.python.org/howto/logging-cookbook.html Now to log a message do:: LOGGER.debug('Some debug message') .. note:: The file logs are written to the inasafe user tmp dir e.g.: /tmp/inasafe/23-08-2012/timlinux/logs/inasafe.log """ logger = logging.getLogger(logger_name) logger.setLevel(logging.DEBUG) default_handler_level = logging.DEBUG # create formatter that will be added to the handlers formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') # create syslog handler which logs even debug messages # (ariel): Make this log to /var/log/safe.log instead of # /var/log/syslog # (Tim) Ole and I discussed this - we prefer to log into the # user's temporary working directory. inasafe_log_path = log_file_path() if log_file is None: file_handler = logging.FileHandler(inasafe_log_path) else: file_handler = logging.FileHandler(log_file) file_handler.setLevel(default_handler_level) # create console handler with a higher log level console_handler = logging.StreamHandler() console_handler.setLevel(logging.INFO) # Sentry handler - this is optional hence the localised import # It will only log if pip install raven. If raven is available # logging messages will be sent to http://sentry.linfiniti.com # We will log exceptions only there. You need to either: # * Set env var 'INASAFE_SENTRY=1' present (value can be anything) # before this will be enabled. if 'INASAFE_SENTRY' in os.environ: if sentry_url is None: client = Client( 'http://c64a83978732474ea751d432ab943a6b' ':[email protected]/5') else: client = Client(sentry_url) sentry_handler = SentryHandler(client) sentry_handler.setFormatter(formatter) sentry_handler.setLevel(logging.ERROR) if add_logging_handler_once(logger, sentry_handler): logger.debug('Sentry logging enabled in safe') else: logger.debug('Sentry logging disabled in safe') # Set formatters file_handler.setFormatter(formatter) console_handler.setFormatter(formatter) # add the handlers to the logger add_logging_handler_once(logger, file_handler) add_logging_handler_once(logger, console_handler)
def test_custom_client_class(self): handler = SentryHandler('http://*****:*****@example.com/1', client_cls=TempStoreClient) self.assertTrue(type(handler.client), TempStoreClient)
def start(self): # remove all handlers self.handlers = [] # sentry log handler sentry_client = raven.Client( 'https://*****:*****@sentry.sickrage.ca/5?verify_ssl=0', release=sickrage.version(), repos={'sickrage': { 'name': 'sickrage/sickrage' }}) sentry_tags = { 'platform': platform.platform(), 'locale': sys.getdefaultencoding() } if sickrage.app.config and sickrage.app.config.sub_id: sentry_tags.update({'sub_id': sickrage.app.config.sub_id}) if sickrage.app.config and sickrage.app.config.app_id: sentry_tags.update({'app_id': sickrage.app.config.app_id}) sentry_handler = SentryHandler(client=sentry_client, tags=sentry_tags) sentry_handler.setLevel(self.logLevels['ERROR']) sentry_handler.set_name('sentry') self.addHandler(sentry_handler) # console log handler if self.consoleLogging: console_handler = logging.StreamHandler() formatter = logging.Formatter( '%(asctime)s %(levelname)s::%(threadName)s::%(message)s', '%H:%M:%S') console_handler.setFormatter(formatter) console_handler.setLevel(self.logLevels['INFO'] if not self. debugLogging else self.logLevels['DEBUG']) self.addHandler(console_handler) # file log handlers if self.logFile: # make logs folder if it doesn't exist if not os.path.exists(os.path.dirname(self.logFile)): if not make_dir(os.path.dirname(self.logFile)): return if sickrage.app.developer: rfh = FileHandler(filename=self.logFile, ) else: rfh = RotatingFileHandler(filename=self.logFile, maxBytes=self.logSize, backupCount=self.logNr) rfh_errors = RotatingFileHandler(filename=self.logFile.replace( '.log', '.error.log'), maxBytes=self.logSize, backupCount=self.logNr) formatter = logging.Formatter( '%(asctime)s %(levelname)s::%(threadName)s::%(message)s', '%Y-%m-%d %H:%M:%S') rfh.setFormatter(formatter) rfh.setLevel(self.logLevels['INFO'] if not self.debugLogging else self.logLevels['DEBUG']) self.addHandler(rfh) rfh_errors.setFormatter(formatter) rfh_errors.setLevel(self.logLevels['ERROR']) self.addHandler(rfh_errors)
# load the config helper config = get_default_config() server_mode = config.get('SERVER_MODE').lower() if server_mode not in [SERVER_MODE_DEV, SERVER_MODE_PROD]: logger.error( u"Unknown server mode '{}', set a mode in the `config/app.config` file" .format(server_mode)) sys.exit(1) else: logger.info(u"Started server in %s mode", server_mode) # setup optional sentry logging service try: handler = SentryHandler(config.get('SENTRY_DSN')) handler.setLevel(logging.ERROR) setup_logging(handler) except ConfigException as e: logger.info("no sentry logging") # Connect to MediaCloud TOOL_API_KEY = config.get('MEDIA_CLOUD_API_KEY') mc = mediacloud.api.AdminMediaCloud(TOOL_API_KEY) logger.info(u"Connected to mediacloud") # Connect to CLIFF if the settings are there cliff = None try: cliff = Cliff(config.get('CLIFF_URL'))
def setup_bot(backend_name, logger, config, restore=None): # from here the environment is supposed to be set (daemon / non daemon, # config.py in the python path ) from .utils import PLUGINS_SUBDIR from .errBot import bot_config_defaults bot_config_defaults(config) if config.BOT_LOG_FILE: hdlr = logging.FileHandler(config.BOT_LOG_FILE) hdlr.setFormatter( logging.Formatter( "%(asctime)s %(levelname)-8s %(name)-25s %(message)s")) logger.addHandler(hdlr) if config.BOT_LOG_SENTRY: try: from raven.handlers.logging import SentryHandler except ImportError: log.exception( "You have BOT_LOG_SENTRY enabled, but I couldn't import modules " "needed for Sentry integration. Did you install raven? " "(See http://raven.readthedocs.org/en/latest/install/index.html " "for installation instructions)") exit(-1) sentryhandler = SentryHandler(config.SENTRY_DSN, level=config.SENTRY_LOGLEVEL) logger.addHandler(sentryhandler) logger.setLevel(config.BOT_LOG_LEVEL) # make the plugins subdir to store the plugin shelves d = path.join(config.BOT_DATA_DIR, PLUGINS_SUBDIR) if not path.exists(d): makedirs(d, mode=0o755) # instanciate the bot bpm = BackendManager(config) plug = bpm.get_candidate(backend_name) log.info("Found Backend plugin: '%s'\n\t\t\t\t\t\tDescription: %s" % (plug.name, plug.description)) try: bot = bpm.get_backend_by_name(backend_name) except Exception: log.exception( "Unable to configure the backend, please check if your config.py is correct." ) exit(-1) # restore the bot from the restore script if restore: # Prepare the context for the restore script if 'repos' in bot: log.fatal('You cannot restore onto a non empty bot.') from errbot.plugin_manager import get_plugin_by_name # noqa log.info('**** RESTORING the bot from %s' % restore) with open(restore) as f: exec(f.read()) bot.close_storage() print('Restore complete restore the bot normally') sys.exit(0) errors = bot.update_dynamic_plugins() if errors: log.error('Some plugins failed to load:\n' + '\n'.join(errors)) return bot
config = ConfigParser.RawConfigParser() config.read('posmon.ini') keys = [(config.getint(section, 'keyID'), config.get(section, 'vCode')) for section in config.sections() if section.startswith('key:')] cache_path = config.get('posmon', 'cache') sde_db_uri = config.get('posmon', 'sde_db_uri') try: sentry_uri = config.get('posmon', 'sentry.uri') except ConfigParser.NoOptionError: sentry_uri = None # Set up logging logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG) if sentry_uri: from raven.handlers.logging import SentryHandler sentry_handler = SentryHandler(sentry_uri) sentry_handler.setLevel(logging.WARNING) logging.getLogger().addHandler(sentry_handler) sde.initialize(sde_db_uri) # Run! cache=SqliteCache(cache_path) fmt = sys.argv[1] if len(sys.argv) > 1 else 'text' for key_id, vcode in keys: api_key = API(api_key=(key_id, vcode), cache=cache) try: process(api_key, format=fmt, config=config) except Exception as e: if fmt == 'text': print "error processing key: %s" % (str(e),)
def setup_handlers(): if 'sentry_handler' not in __opts__: log.debug('No \'sentry_handler\' key was found in the configuration') return False options = {} dsn = get_config_value('dsn') if dsn is not None: try: # support raven ver 5.5.0 from raven.transport import TransportRegistry, default_transports from raven.utils.urlparse import urlparse transport_registry = TransportRegistry(default_transports) url = urlparse(dsn) if not transport_registry.supported_scheme(url.scheme): raise ValueError('Unsupported Sentry DSN scheme: {0}'.format( url.scheme)) dsn_config = {} conf_extras = transport_registry.compute_scope(url, dsn_config) dsn_config.update(conf_extras) options.update({ 'project': dsn_config['SENTRY_PROJECT'], 'servers': dsn_config['SENTRY_SERVERS'], 'public_key': dsn_config['SENTRY_PUBLIC_KEY'], 'secret_key': dsn_config['SENTRY_SECRET_KEY'] }) except ValueError as exc: log.info('Raven failed to parse the configuration provided ' 'DSN: {0}'.format(exc)) # Allow options to be overridden if previously parsed, or define them for key in ('project', 'servers', 'public_key', 'secret_key'): config_value = get_config_value(key) if config_value is None and key not in options: log.debug('The required \'sentry_handler\' configuration key, ' '{0!r}, is not properly configured. Not configuring ' 'the sentry logging handler.'.format(key)) return elif config_value is None: continue options[key] = config_value # site: An optional, arbitrary string to identify this client installation. options.update({ # site: An optional, arbitrary string to identify this client # installation 'site': get_config_value('site'), # name: This will override the server_name value for this installation. # Defaults to socket.gethostname() 'name': get_config_value('name'), # exclude_paths: Extending this allow you to ignore module prefixes # when sentry attempts to discover which function an error comes from 'exclude_paths': get_config_value('exclude_paths', ()), # include_paths: For example, in Django this defaults to your list of # INSTALLED_APPS, and is used for drilling down where an exception is # located 'include_paths': get_config_value('include_paths', ()), # list_max_length: The maximum number of items a list-like container # should store. 'list_max_length': get_config_value('list_max_length'), # string_max_length: The maximum characters of a string that should be # stored. 'string_max_length': get_config_value('string_max_length'), # auto_log_stacks: Should Raven automatically log frame stacks # (including locals) all calls as it would for exceptions. 'auto_log_stacks': get_config_value('auto_log_stacks'), # timeout: If supported, the timeout value for sending messages to # remote. 'timeout': get_config_value('timeout', 1), # processors: A list of processors to apply to events before sending # them to the Sentry server. Useful for sending additional global state # data or sanitizing data that you want to keep off of the server. 'processors': get_config_value('processors'), # dsn: Ensure the DSN is passed into the client 'dsn': dsn }) client = raven.Client(**options) context = get_config_value('context') context_dict = {} if context is not None: for tag in context: tag_value = __salt__['grains.get'](tag) if len(tag_value) > 0: context_dict[tag] = tag_value if len(context_dict) > 0: client.context.merge({'tags': context_dict}) try: handler = SentryHandler(client) handler.setLevel(LOG_LEVELS[get_config_value('log_level', 'error')]) return handler except ValueError as exc: log.debug( 'Failed to setup the sentry logging handler: {0}'.format(exc), exc_info=exc)
from superdesk.validator import SuperdeskValidator try: from eve_docs import eve_docs from flask.ext.bootstrap import Bootstrap has_docs = True except ImportError: has_docs = False from .entities.twt_oauths import init as citizendesk_oauths_init_app from .entities.reports import init as citizendesk_reports_init_app from .entities.coverages import init as citizendesk_coverages_init_app from .settings import settings as default_settings from .blueprints.proxy import blueprint as proxy_blueprint handler = SentryHandler('http://*****:*****@sentry.sourcefabric.org/8') #logger = logging.getLogger('citizendesk') #logger.addHandler(handler) def register_blueprints(app): app.register_blueprint(proxy_blueprint, url_prefix='/proxy') citizendesk_oauths_init_app(app) citizendesk_reports_init_app(app) citizendesk_coverages_init_app(app) superdesk_users.init_app(app) superdesk_auth.init_app(app) superdesk_auth_db.init_app(app) if has_docs: Bootstrap(app) # required by eve docs app.register_blueprint(eve_docs, url_prefix='/docs')
def test_logging_level_set(self): handler = SentryHandler('http://*****:*****@example.com/1', level="ERROR") # XXX: some version of python 2.6 seem to pass the string on instead of coercing it self.assertTrue(handler.level in (logging.ERROR, 'ERROR'))
from utils.paths import ensure_trailing_slash from utils.db import MongoClient from utils.create_dag import create_dag, resources_for_workflow logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) SENTRY_ENABLED = config('SENTRY_ENABLED', cast=bool, default=True) if SENTRY_ENABLED: from raven.handlers.logging import SentryHandler from raven.conf import setup_logging SENTRY_DSN = config('SENTRY_DSN') handler = SentryHandler(SENTRY_DSN) handler.setLevel(logging.ERROR) setup_logging(handler) else: logger.warn("Not attaching sentry to ftp dags because sentry is disabled") S3_BUCKET = os.getenv('AWS_S3_TEMP_BUCKET') aws_key = os.getenv('AWS_ACCESS_KEY_ID', '') aws_secret = quote_plus(os.getenv('AWS_SECRET_ACCESS_KEY', '')) os.environ[CONN_ENV_PREFIX + 'S3_CONNECTION'] = 's3://{aws_key}:{aws_secret}@S3'.format( aws_key=aws_key, aws_secret=aws_secret) logger.info('Querying for ftpConfigs') client = MongoClient() ftp_configs = client.ftp_configs()
def logger_init(self): """ Initialize the logger globally. :returns: True """ # Let's attempt to make the log directory if it doesn't exist os.makedirs(self.log_dir, exist_ok=True) # Instantiate a logger self.log = logging.getLogger("stoq") # Set the default logging level self.log.setLevel(self.log_level.upper()) # Define the log filename and path log_file = "stoq.log" self.log_path = os.path.abspath(os.path.join(self.log_dir, log_file)) # Setup our logfile file_handler = logging.handlers.RotatingFileHandler( filename=self.log_path, mode='a', maxBytes=int(self.log_maxbytes), backupCount=int(self.log_backup_count)) # Setup our STDERR output stderr_handler = logging.StreamHandler() if self.log_syntax == "json": formatter = jsonlogger.JsonFormatter else: formatter = logging.Formatter # Define the format of the log file log_format = formatter( "%(asctime)s %(levelname)s %(name)s:%(filename)s:%(funcName)s:%(lineno)s: " "%(message)s", datefmt='%Y-%m-%d %H:%M:%S') stderr_logformat = formatter( "[%(asctime)s %(levelname)s] %(name)s: %(message)s") file_handler.setFormatter(log_format) stderr_handler.setFormatter(stderr_logformat) # Attach the handler to the logger self.log.addHandler(file_handler) self.log.addHandler(stderr_handler) # If logging to sentry.io, setup the logger if raven_imported and self.sentry_url: try: sentry_handler = SentryHandler( self.sentry_url, ignore_exceptions=self.sentry_ignore_list) sentry_handler.setFormatter( "[%(asctime)s][%(levelname)s] %(name)s " "%(filename)s:%(funcName)s:%(lineno)d | %(message)s") sentry_handler.setLevel(logging.WARN) self.log.addHandler(sentry_handler) except: self.log.error("Unable to initiate logging to Sentry")
def add_sentry_to_logging(): if not os.environ.get('HTTP_HOST').startswith('localhost:'): client = Client(GAESENTRY_SENTRY_DSN) handler = SentryHandler(client=client, level=logging.WARNING) setup_logging(handler)
def setup_sentry_logging(): """Set up sentry logging of exceptions""" if _sentry_client: setup_logging(SentryHandler(_sentry_client))
def test_logging_level_set(self): handler = SentryHandler('http://*****:*****@example.com/1', level="ERROR") self.assertEquals(handler.level, logging.ERROR)
import xml.sax.handler import sys import os import errno import logging ##### START CONFIGURATION HEADER ##### # Set to true if you want to use Sentry SENTRY = False # Sentry logging if SENTRY: from raven.handlers.logging import SentryHandler from raven import Client from raven.conf import setup_logging sentry_client = Client('INSERT_SENTRY_DSN_HERE') handler = SentryHandler(sentry_client) setup_logging(handler) # What releases would you like to track. 'other' is mandatory RELEASES = ['other', '6'] # What severity levels do we want to include SEVERITY = ['Critical', 'Important'] # Who is this from? UPDATE_FROM = "you@your_domain.com" # Directory prefix to build the files under. BUILD_PREFIX = "/state/partition1/site-roll/rocks/src/roll/security-updates/current" ##### END CONFIGURATION HEADER #####
def setUp(self): self.client = TempStoreClient(include_paths=['tests', 'raven']) self.handler = SentryHandler(self.client) self.logger = logging.getLogger(__name__) self.logger.handlers = [] self.logger.addHandler(self.handler)
class LoggingIntegrationTest(TestCase): def setUp(self): self.client = TempStoreClient(include_paths=['tests', 'raven']) self.handler = SentryHandler(self.client) def make_record(self, msg, args=(), level=logging.INFO, extra=None, exc_info=None, name='root', pathname=__file__): record = logging.LogRecord(name, level, pathname, 27, msg, args, exc_info, 'make_record') if extra: for key, value in six.iteritems(extra): record.__dict__[key] = value return record def test_logger_basic(self): record = self.make_record('This is a test error') self.handler.emit(record) self.assertEqual(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEqual(event['logger'], 'root') self.assertEqual(event['level'], logging.INFO) self.assertEqual(event['message'], 'This is a test error') assert 'exception' not in event self.assertTrue('sentry.interfaces.Message' in event) msg = event['sentry.interfaces.Message'] self.assertEqual(msg['message'], 'This is a test error') self.assertEqual(msg['params'], ()) def test_can_record(self): tests = [ ("raven", False), ("raven.foo", False), ("sentry.errors", False), ("sentry.errors.foo", False), ("raven_utils", True), ] for test in tests: record = self.make_record("Test", name=test[0]) self.assertEqual(self.handler.can_record(record), test[1]) @mock.patch('raven.transport.http.HTTPTransport.send') @mock.patch('raven.base.ClientState.should_try') def test_exception_on_emit(self, should_try, _send_remote): should_try.return_value = True # Test for the default behaviour in which an exception is handled by the client or handler client = Client(dsn='sync+http://public:[email protected]/1', ) handler = SentryHandler(client) _send_remote.side_effect = Exception() record = self.make_record('This is a test error') handler.emit(record) self.assertEquals(handler.client.state.status, handler.client.state.ERROR) # Test for the case in which a send error is raised to the calling frame. client = Client( dsn='sync+http://public:[email protected]/1', raise_send_errors=True, ) handler = SentryHandler(client) _send_remote.side_effect = Exception() with self.assertRaises(Exception): record = self.make_record('This is a test error') handler.emit(record) def test_logger_extra_data(self): record = self.make_record( 'This is a test error', extra={'data': { 'url': 'http://example.com', }}) self.handler.emit(record) self.assertEqual(len(self.client.events), 1) event = self.client.events.pop(0) if six.PY3: expected = "'http://example.com'" else: expected = "u'http://example.com'" self.assertEqual(event['extra']['url'], expected) def test_logger_exc_info(self): try: raise ValueError('This is a test ValueError') except ValueError: record = self.make_record('This is a test info with an exception', exc_info=sys.exc_info()) else: self.fail('Should have raised an exception') self.handler.emit(record) self.assertEqual(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEqual(event['message'], 'This is a test info with an exception') assert 'exception' in event exc = event['exception']['values'][0] self.assertEqual(exc['type'], 'ValueError') self.assertEqual(exc['value'], 'This is a test ValueError') self.assertTrue('sentry.interfaces.Message' in event) msg = event['sentry.interfaces.Message'] self.assertEqual(msg['message'], 'This is a test info with an exception') self.assertEqual(msg['params'], ()) def test_message_params(self): record = self.make_record('This is a test of %s', args=('args', )) self.handler.emit(record) self.assertEqual(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEqual(event['message'], 'This is a test of args') msg = event['sentry.interfaces.Message'] self.assertEqual(msg['message'], 'This is a test of %s') expected = ("'args'", ) if six.PY3 else ("u'args'", ) self.assertEqual(msg['params'], expected) def test_record_stack(self): record = self.make_record('This is a test of stacks', extra={'stack': True}) self.handler.emit(record) self.assertEqual(len(self.client.events), 1) event = self.client.events.pop(0) self.assertTrue('stacktrace' in event) frames = event['stacktrace']['frames'] self.assertNotEquals(len(frames), 1) frame = frames[0] self.assertEqual(frame['module'], 'raven.handlers.logging') assert 'exception' not in event self.assertTrue('sentry.interfaces.Message' in event) self.assertEqual(event['culprit'], 'root in make_record') self.assertEqual(event['message'], 'This is a test of stacks') def test_no_record_stack(self): record = self.make_record('This is a test with no stacks', extra={'stack': False}) self.handler.emit(record) self.assertEqual(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEqual(event['message'], 'This is a test with no stacks') self.assertFalse('sentry.interfaces.Stacktrace' in event) def test_explicit_stack(self): record = self.make_record('This is a test of stacks', extra={'stack': iter_stack_frames()}) self.handler.emit(record) self.assertEqual(len(self.client.events), 1) event = self.client.events.pop(0) assert 'stacktrace' in event assert 'culprit' in event assert event['culprit'] == 'root in make_record' self.assertTrue('message' in event, event) self.assertEqual(event['message'], 'This is a test of stacks') assert 'exception' not in event self.assertTrue('sentry.interfaces.Message' in event) msg = event['sentry.interfaces.Message'] self.assertEqual(msg['message'], 'This is a test of stacks') self.assertEqual(msg['params'], ()) def test_extra_culprit(self): record = self.make_record('This is a test of stacks', extra={'culprit': 'foo in bar'}) self.handler.emit(record) self.assertEqual(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEqual(event['culprit'], 'foo in bar') def test_extra_data_as_string(self): record = self.make_record('Message', extra={'data': 'foo'}) self.handler.emit(record) self.assertEqual(len(self.client.events), 1) event = self.client.events.pop(0) expected = "'foo'" if six.PY3 else "u'foo'" self.assertEqual(event['extra']['data'], expected) def test_tags(self): record = self.make_record('Message', extra={'tags': {'foo': 'bar'}}) self.handler.emit(record) self.assertEqual(len(self.client.events), 1) event = self.client.events.pop(0) assert event['tags'] == {'foo': 'bar'} def test_tags_on_error(self): try: raise ValueError('This is a test ValueError') except ValueError: record = self.make_record('Message', extra={'tags': { 'foo': 'bar' }}, exc_info=sys.exc_info()) self.handler.emit(record) self.assertEqual(len(self.client.events), 1) event = self.client.events.pop(0) assert event['tags'] == {'foo': 'bar'} def test_fingerprint_on_event(self): record = self.make_record('Message', extra={'fingerprint': ['foo']}) self.handler.emit(record) self.assertEqual(len(self.client.events), 1) event = self.client.events.pop(0) assert event['fingerprint'] == ['foo'] def test_culprit_on_event(self): record = self.make_record('Message', extra={'culprit': 'foo'}) self.handler.emit(record) self.assertEqual(len(self.client.events), 1) event = self.client.events.pop(0) assert event['culprit'] == 'foo' def test_server_name_on_event(self): record = self.make_record('Message', extra={'server_name': 'foo'}) self.handler.emit(record) self.assertEqual(len(self.client.events), 1) event = self.client.events.pop(0) assert event['server_name'] == 'foo'
def setUp(self): self.client = TempStoreClient(include_paths=['tests', 'raven']) self.handler = SentryHandler(self.client)
def setup_logger(sentry_url, log_file=None): """Run once when the module is loaded and enable logging. :param sentry_url: Mandatory url to sentry api for remote logging. Consult your sentry instance for the client instance url. :type sentry_url: str :param log_file: Optional full path to a file to write logs to. :type log_file: str Borrowed heavily from this: http://docs.python.org/howto/logging-cookbook.html Use this to first initialise the logger in your __init__.py:: import custom_logging custom_logging.setup_logger('http://path to sentry') You would typically only need to do the above once ever as the safe model is initialised early and will set up the logger globally so it is available to all packages / subpackages as shown below. In a module that wants to do logging then use this example as a guide to get the initialised logger instance:: # The LOGGER is initialised in sg_utilities.py by init import logging LOGGER = logging.getLogger('QGIS') Now to log a message do:: LOGGER.debug('Some debug message') .. note:: The file logs are written to the user tmp dir e.g.: /tmp/23-08-2012/timlinux/logs/qgis.log """ logger = logging.getLogger('QGIS') logger.setLevel(logging.DEBUG) default_handler_level = logging.DEBUG # create formatter that will be added to the handlers formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') # create syslog handler which logs even debug messages log_temp_dir = temp_dir('logs') path = log_file_path() if log_file is None: file_handler = logging.FileHandler(path) else: file_handler = logging.FileHandler(log_file) file_handler.setLevel(default_handler_level) # create console handler with a higher log level console_handler = logging.StreamHandler() console_handler.setLevel(logging.INFO) qgis_handler = QgsLogHandler() # Sentry handler - this is optional hence the localised import # It will only log if pip install raven. If raven is available # logging messages will be sent to the sentry host. # We will only log exceptions. You need to either: # * Set env var 'SENTRY' present (value can be anything) # * Enable the 'plugins/use_sentry' QSettings option # before this will be enabled. client = Client(sentry_url) sentry_handler = SentryHandler(client) sentry_handler.setFormatter(formatter) sentry_handler.setLevel(logging.ERROR) if add_logging_handler_once(logger, sentry_handler): logger.debug('Sentry logging enabled') # Set formatters file_handler.setFormatter(formatter) console_handler.setFormatter(formatter) qgis_handler.setFormatter(formatter) # add the handlers to the logger add_logging_handler_once(logger, file_handler) add_logging_handler_once(logger, console_handler) add_logging_handler_once(logger, qgis_handler)
def test_logging_level_not_set(self): handler = SentryHandler('http://*****:*****@example.com/1') self.assertEqual(handler.level, logging.NOTSET)
def setup_logger(sentry_url=None): """Run once when the module is loaded and enable logging. :param sentry_url: Optional url to sentry api for remote logging. Defaults to http://c64a83978732474ea751d432ab943a6b: [email protected]/5 which is the sentry project for InaSAFE desktop. :type sentry_url: str Borrowed heavily from this: http://docs.python.org/howto/logging-cookbook.html Use this to first initialise the logger (see safe/__init__.py):: from safe_qgis import utilities utilities.setupLogger() You would typically only need to do the above once ever as the safe model is initialised early and will set up the logger globally so it is available to all packages / subpackages as shown below. In a module that wants to do logging then use this example as a guide to get the initialised logger instance:: # The LOGGER is initialised in utilities.py by init import logging LOGGER = logging.getLogger('InaSAFE') Now to log a message do:: LOGGER.debug('Some debug message') .. note:: The file logs are written to the inasafe user tmp dir e.g.: /tmp/inasafe/23-08-2012/timlinux/logs/inasafe.log """ logger = logging.getLogger('InaSAFE') # create formatter that will be added to the handlers formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') qgis_handler = QgsLogHandler() qgis_handler.setFormatter(formatter) add_logging_handler_once(logger, qgis_handler) # Sentry handler - this is optional hence the localised import # It will only log if pip install raven. If raven is available # logging messages will be sent to http://sentry.linfiniti.com # We will log exceptions only there. You need to either: # * Set env var 'INSAFE_SENTRY=1' present (value can be anything) # * Enable the 'help improve InaSAFE by submitting errors to a remove # server' option in InaSAFE options dialog # before this will be enabled. settings = QtCore.QSettings() flag = settings.value('inasafe/useSentry', False) if 'INASAFE_SENTRY' in os.environ or flag: if sentry_url is None: client = Client( 'http://c64a83978732474ea751d432ab943a6b' ':[email protected]/5') else: client = Client(sentry_url) sentry_handler = SentryHandler(client) sentry_handler.setFormatter(formatter) sentry_handler.setLevel(logging.ERROR) if add_logging_handler_once(logger, sentry_handler): logger.debug('Sentry logging enabled in safe_qgis') elif 'INASAFE_SENTRY' in os.environ: logger.debug('Sentry logging already enabled in safe') else: logger.debug('Sentry logging disabled in safe_qgis')