def _emit(self, record, **kwargs): data = {} for k, v in record.__dict__.iteritems(): if '.' not in k and k not in ('culprit',): continue data[k] = v stack = getattr(record, 'stack', None) if stack is True: stack = iter_stack_frames() if stack: frames = [] started = False last_mod = '' for item in stack: if isinstance(item, (list, tuple)): frame, lineno = item else: frame, lineno = item, item.f_lineno if not started: f_globals = getattr(frame, 'f_globals', {}) module_name = f_globals.get('__name__', '') if last_mod.startswith('logging') and not module_name.startswith('logging'): started = True else: last_mod = module_name continue frames.append((frame, lineno)) stack = frames extra = getattr(record, 'data', {}) # Add in all of the data from the record that we aren't already capturing for k in record.__dict__.keys(): if k in ('stack', 'name', 'args', 'msg', 'levelno', 'exc_text', 'exc_info', 'data', 'created', 'levelname', 'msecs', 'relativeCreated'): continue if k.startswith('_'): continue extra[k] = record.__dict__[k] date = datetime.datetime.utcfromtimestamp(record.created) # If there's no exception being processed, exc_info may be a 3-tuple of None # http://docs.python.org/library/sys.html#sys.exc_info if record.exc_info and all(record.exc_info): handler = self.client.get_handler('opbeat.events.Exception') data.update(handler.capture(exc_info=record.exc_info)) # data['checksum'] = handler.get_hash(data) data['level'] = record.levelno data['logger'] = record.name return self.client.capture('Message', param_message={'message':record.msg,'params':record.args}, stack=stack, data=data, extra=extra, date=date, **kwargs)
def test_explicit_stack(self): self.logger.info('This is a test of stacks', extra={'stack': iter_stack_frames()}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertTrue('culprit' in event, event) self.assertEquals(event['culprit'], 'tests.handlers.logging.logging_tests.test_explicit_stack') self.assertTrue('message' in event, event) self.assertEquals(event['message'], 'This is a test of stacks') self.assertFalse('exception' in event) self.assertTrue('param_message' in event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test of stacks') self.assertEquals(msg['params'], ()) self.assertTrue('stacktrace' in event)
def _emit(self, record, **kwargs): data = {} for k, v in six.iteritems(record.__dict__): if '.' not in k and k not in ('culprit', ): continue data[k] = v stack = getattr(record, 'stack', None) if stack is True: stack = iter_stack_frames() if stack: frames = [] started = False last_mod = '' for item in stack: if isinstance(item, (list, tuple)): frame, lineno = item else: frame, lineno = item, item.f_lineno if not started: f_globals = getattr(frame, 'f_globals', {}) module_name = f_globals.get('__name__', '') if last_mod.startswith( 'logging' ) and not module_name.startswith('logging'): started = True else: last_mod = module_name continue frames.append((frame, lineno)) stack = frames extra = getattr(record, 'data', {}) # Add in all of the data from the record that we aren't already capturing for k in record.__dict__.keys(): if k in ('stack', 'name', 'args', 'msg', 'levelno', 'exc_text', 'exc_info', 'data', 'created', 'levelname', 'msecs', 'relativeCreated'): continue if k.startswith('_'): continue extra[k] = record.__dict__[k] date = datetime.datetime.utcfromtimestamp(record.created) # If there's no exception being processed, # exc_info may be a 3-tuple of None # http://docs.python.org/library/sys.html#sys.exc_info if record.exc_info and all(record.exc_info): handler = self.client.get_handler('opbeat.events.Exception') data.update(handler.capture(exc_info=record.exc_info)) # data['checksum'] = handler.get_hash(data) data['level'] = record.levelno data['logger'] = record.name return self.client.capture('Message', param_message={ 'message': record.msg, 'params': record.args }, stack=stack, data=data, extra=extra, date=date, **kwargs)
def build_msg_for_logging(self, event_type, data=None, date=None, extra=None, stack=None, **kwargs): """ Captures, processes and serializes an event into a dict object """ # create ID client-side so that it can be passed to application event_id = uuid.uuid4().hex if data is None: data = {} if extra is None: extra = {} if not date: date = datetime.datetime.utcnow() if stack is None: stack = self.auto_log_stacks self.build_msg(data=data) # if '.' not in event_type: # Assume it's a builtin event_type = 'opbeat.events.%s' % event_type handler = self.get_handler(event_type) result = handler.capture(**kwargs) # data (explicit) culprit takes over auto event detection culprit = result.pop('culprit', None) if data.get('culprit'): culprit = data['culprit'] for k, v in result.iteritems(): if k not in data: data[k] = v if stack and 'stacktrace' not in data: if stack is True: frames = iter_stack_frames() else: frames = stack data.update({ 'stacktrace': { 'frames': varmap(lambda k, v: shorten(v, string_length=self.string_max_length, list_length=self.list_max_length), get_stack_info(frames)) }, }) if 'stacktrace' in data and not culprit: culprit = get_culprit( data['stacktrace']['frames'], self.include_paths, self.exclude_paths ) if not data.get('level'): data['level'] = 'error' if isinstance( data['level'], ( int, long ) ): data['level'] = logging.getLevelName(data['level']).lower() data.setdefault('extra', {}) # Shorten lists/strings for k, v in extra.iteritems(): data['extra'][k] = shorten(v, string_length=self.string_max_length, list_length=self.list_max_length) if culprit: data['culprit'] = culprit # Run the data through processors for processor in self.get_processors(): data.update(processor.process(data)) # Make sure all data is coerced data = transform(data) if 'message' not in data: data['message'] = handler.to_string(data) # Make sure certain values are not too long for v in defaults.MAX_LENGTH_VALUES: if v in data: data[v] = shorten(data[v], string_length=defaults.MAX_LENGTH_VALUES[v] ) data.update({ 'timestamp': date, # 'time_spent': time_spent, 'client_supplied_id': event_id, }) return data
def build_msg_for_logging(self, event_type, data=None, date=None, extra=None, stack=None, **kwargs): """ Captures, processes and serializes an event into a dict object """ # create ID client-side so that it can be passed to application event_id = uuid.uuid4().hex if data is None: data = {} if extra is None: extra = {} if not date: date = datetime.datetime.utcnow() if stack is None: stack = self.auto_log_stacks self.build_msg(data=data) # if '.' not in event_type: # Assume it's a builtin event_type = 'opbeat.events.%s' % event_type handler = self.get_handler(event_type) result = handler.capture(**kwargs) # data (explicit) culprit takes over auto event detection culprit = result.pop('culprit', None) if data.get('culprit'): culprit = data['culprit'] for k, v in result.iteritems(): if k not in data: data[k] = v if stack and 'stacktrace' not in data: if stack is True: frames = iter_stack_frames() else: frames = stack data.update({ 'stacktrace': { 'frames': varmap( lambda k, v: shorten(v, string_length=self. string_max_length, list_length=self.list_max_length), get_stack_info(frames)) }, }) if 'stacktrace' in data and not culprit: culprit = get_culprit(data['stacktrace']['frames'], self.include_paths, self.exclude_paths) if not data.get('level'): data['level'] = 'error' if isinstance(data['level'], (int, long)): data['level'] = logging.getLevelName(data['level']).lower() data.setdefault('extra', {}) # Shorten lists/strings for k, v in extra.iteritems(): data['extra'][k] = shorten(v, string_length=self.string_max_length, list_length=self.list_max_length) if culprit: data['culprit'] = culprit # Run the data through processors for processor in self.get_processors(): data.update(processor.process(data)) # Make sure all data is coerced data = transform(data) if 'message' not in data: data['message'] = handler.to_string(data) # Make sure certain values are not too long for v in defaults.MAX_LENGTH_VALUES: if v in data: data[v] = shorten(data[v], string_length=defaults.MAX_LENGTH_VALUES[v]) data.update({ 'timestamp': date, # 'time_spent': time_spent, 'client_supplied_id': event_id, }) return data
def __init__(self, organization_id=None, app_id=None, secret_token=None, transport_class=None, include_paths=None, exclude_paths=None, timeout=None, hostname=None, auto_log_stacks=None, key=None, string_max_length=None, list_max_length=None, processors=None, filter_exception_types=None, servers=None, api_path=None, asynk=None, async_mode=None, traces_send_freq_secs=None, transactions_ignore_patterns=None, framework_version='', **kwargs): # configure loggers first cls = self.__class__ self.logger = logging.getLogger('%s.%s' % (cls.__module__, cls.__name__)) self.error_logger = logging.getLogger('opbeat.errors') self.state = ClientState() if organization_id is None and os.environ.get( 'OPBEAT_ORGANIZATION_ID'): msg = "Configuring opbeat from environment variable 'OPBEAT_ORGANIZATION_ID'" self.logger.info(msg) organization_id = os.environ['OPBEAT_ORGANIZATION_ID'] if app_id is None and os.environ.get('OPBEAT_APP_ID'): msg = "Configuring opbeat from environment variable 'OPBEAT_APP_ID'" self.logger.info(msg) app_id = os.environ['OPBEAT_APP_ID'] if secret_token is None and os.environ.get('OPBEAT_SECRET_TOKEN'): msg = "Configuring opbeat from environment variable 'OPBEAT_SECRET_TOKEN'" self.logger.info(msg) secret_token = os.environ['OPBEAT_SECRET_TOKEN'] self.servers = servers or defaults.SERVERS if asynk is not None and async_mode is None: warnings.warn( 'Usage of "async" argument is deprecated. Use "async_mode"', category=DeprecationWarning, stacklevel=2, ) async_mode = asynk self.async_mode = (async_mode is True or (defaults.ASYNC_MODE and async_mode is not False)) if not transport_class: transport_class = (defaults.ASYNC_TRANSPORT_CLASS if self.async_mode else defaults.SYNC_TRANSPORT_CLASS) self._transport_class = import_string(transport_class) self._transports = {} # servers may be set to a NoneType (for Django) if self.servers and not (organization_id and app_id and secret_token): msg = 'Missing configuration for Opbeat client. Please see documentation.' self.logger.info(msg) self.is_send_disabled = (os.environ.get('OPBEAT_DISABLE_SEND', '').lower() in ('1', 'true')) if self.is_send_disabled: self.logger.info( 'Not sending any data to Opbeat due to OPBEAT_DISABLE_SEND ' 'environment variable') self.include_paths = set(include_paths or defaults.INCLUDE_PATHS) self.exclude_paths = set(exclude_paths or defaults.EXCLUDE_PATHS) self.timeout = int(timeout or defaults.TIMEOUT) self.hostname = six.text_type(hostname or defaults.HOSTNAME) self.auto_log_stacks = bool(auto_log_stacks or defaults.AUTO_LOG_STACKS) self.string_max_length = int(string_max_length or defaults.MAX_LENGTH_STRING) self.list_max_length = int(list_max_length or defaults.MAX_LENGTH_LIST) self.traces_send_freq_secs = (traces_send_freq_secs or defaults.TRACES_SEND_FREQ_SECS) self.organization_id = six.text_type(organization_id) self.app_id = six.text_type(app_id) self.secret_token = six.text_type(secret_token) self.filter_exception_types_dict = {} for exc_to_filter in (filter_exception_types or []): exc_to_filter_type = exc_to_filter.split(".")[-1] exc_to_filter_module = ".".join(exc_to_filter.split(".")[:-1]) self.filter_exception_types_dict[ exc_to_filter_type] = exc_to_filter_module if processors is None: self.processors = defaults.PROCESSORS else: self.processors = processors self._framework_version = framework_version self.module_cache = ModuleProxyCache() self.instrumentation_store = RequestsStore( lambda: self.get_stack_info_for_trace(iter_stack_frames(), False), self.traces_send_freq_secs, transactions_ignore_patterns) atexit_register(self.close)