if settings.USE_SEARCH: try: maybe_delay(index_event, event) except Exception, e: transaction.rollback_unless_managed(using=group._state.db) logger.exception(u'Error indexing document: %s', e) if is_new: try: regression_signal.send(sender=self.model, instance=group) except Exception, e: transaction.rollback_unless_managed(using=group._state.db) logger.exception(u'Error sending regression signal: %s', e) send_group_processors(group=group, event=event, is_new=is_new, is_sample=is_sample) return event def _create_group(self, event, **kwargs): from sentry.models import FilterValue, STATUS_RESOLVED date = event.datetime time_spent = event.time_spent project = event.project group, is_new = self.get_or_create( project=project, culprit=event.culprit, logger=event.logger, checksum=event.checksum,
def save_data(self, project, data, raw=False): # TODO: this function is way too damn long and needs refactored # the inner imports also suck so let's try to move it away from # the objects manager # TODO: culprit should default to "most recent" frame in stacktraces when # it's not provided. from sentry.plugins import plugins from sentry.models import Event, Project, EventMapping project = Project.objects.get_from_cache(id=project) Raven.tags_context({'project': project.id}) # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') message = data.pop('message') culprit = data.pop('culprit') level = data.pop('level') time_spent = data.pop('time_spent') logger_name = data.pop('logger') server_name = data.pop('server_name') site = data.pop('site') date = data.pop('timestamp') checksum = data.pop('checksum') platform = data.pop('platform') if 'sentry.interfaces.Exception' in data: if 'values' not in data['sentry.interfaces.Exception']: data['sentry.interfaces.Exception'] = {'values': [data['sentry.interfaces.Exception']]} # convert stacktrace + exception into expanded exception if 'sentry.interfaces.Stacktrace' in data: data['sentry.interfaces.Exception']['values'][0]['stacktrace'] = data.pop('sentry.interfaces.Stacktrace') kwargs = { 'level': level, 'message': message, 'platform': platform, 'culprit': culprit or '', 'logger': logger_name, } event = Event( project=project, event_id=event_id, data=data, server_name=server_name, site=site, time_spent=time_spent, datetime=date, **kwargs ) # Calculate the checksum from the first highest scoring interface if not checksum: checksum = get_checksum_from_event(event) event.checksum = checksum group_kwargs = kwargs.copy() group_kwargs.update({ 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) tags = data['tags'] tags.append(('level', LOG_LEVELS[level])) if logger: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) for plugin in plugins.for_project(project): added_tags = safe_execute(plugin.get_tags, event) if added_tags: tags.extend(added_tags) try: group, is_new, is_sample = self._create_group( event=event, tags=data['tags'], **group_kwargs ) except Exception as exc: # TODO: should we mail admins when there are failures? try: logger.exception(u'Unable to process log entry: %s', exc) except Exception as exc: warnings.warn(u'Unable to process log entry: %s', exc) return using = group._state.db event.group = group # save the event unless its been sampled if not is_sample: sid = transaction.savepoint(using=using) try: event.save() except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) sid = transaction.savepoint(using=using) try: EventMapping.objects.create( project=project, group=group, event_id=event_id) except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) transaction.commit_unless_managed(using=using) if not raw: send_group_processors( group=group, event=event, is_new=is_new, is_sample=is_sample ) if getattr(settings, 'SENTRY_INDEX_SEARCH', settings.SENTRY_USE_SEARCH): index_event.delay(event) # TODO: move this to the queue if is_new and not raw: regression_signal.send_robust(sender=self.model, instance=group) return event
class GroupManager(BaseManager, ChartMixin): use_for_related_fields = True def normalize_event_data(self, data): # TODO(dcramer): store http.env.REMOTE_ADDR as user.ip # First we pull out our top-level (non-data attr) kwargs if not data.get('level') or data['level'] not in LOG_LEVELS: data['level'] = logging.ERROR if not data.get('logger'): data['logger'] = DEFAULT_LOGGER_NAME else: data['logger'] = trim(data['logger'], 64) timestamp = data.get('timestamp') if not timestamp: timestamp = timezone.now() # We must convert date to local time so Django doesn't mess it up # based on TIME_ZONE if settings.TIME_ZONE: if not timezone.is_aware(timestamp): timestamp = timestamp.replace(tzinfo=timezone.utc) elif timezone.is_aware(timestamp): timestamp = timestamp.replace(tzinfo=None) data['timestamp'] = timestamp if not data.get('event_id'): data['event_id'] = uuid.uuid4().hex data.setdefault('message', None) data.setdefault('culprit', None) data.setdefault('time_spent', None) data.setdefault('server_name', None) data.setdefault('site', None) data.setdefault('checksum', None) data.setdefault('platform', None) data.setdefault('extra', {}) tags = data.get('tags') if not tags: tags = [] # full support for dict syntax elif isinstance(tags, dict): tags = tags.items() # prevent [tag, tag, tag] (invalid) syntax elif not all(len(t) == 2 for t in tags): tags = [] else: tags = list(tags) data['tags'] = tags data['message'] = strip(data['message']) data['culprit'] = strip(data['culprit']) if not isinstance(data['extra'], dict): # throw it away data['extra'] = {} trim_dict(data['extra'], max_size=MAX_EXTRA_VARIABLE_SIZE) if 'sentry.interfaces.Exception' in data: if 'values' not in data['sentry.interfaces.Exception']: data['sentry.interfaces.Exception'] = { 'values': [data['sentry.interfaces.Exception']] } # convert stacktrace + exception into expanded exception if 'sentry.interfaces.Stacktrace' in data: data['sentry.interfaces.Exception']['values'][0][ 'stacktrace'] = data.pop('sentry.interfaces.Stacktrace') for exc_data in data['sentry.interfaces.Exception']['values']: for key in ('type', 'module', 'value'): value = exc_data.get(key) if value: exc_data[key] = trim(value) if exc_data.get('stacktrace'): trim_frames(exc_data['stacktrace']) for frame in exc_data['stacktrace']['frames']: stack_vars = frame.get('vars', {}) trim_dict(stack_vars) if 'sentry.interfaces.Stacktrace' in data: trim_frames(data['sentry.interfaces.Stacktrace']) for frame in data['sentry.interfaces.Stacktrace']['frames']: stack_vars = frame.get('vars', {}) trim_dict(stack_vars) if 'sentry.interfaces.Message' in data: msg_data = data['sentry.interfaces.Message'] trim(msg_data['message'], 1024) if msg_data.get('params'): msg_data['params'] = trim(msg_data['params']) if 'sentry.interfaces.Http' in data: http_data = data['sentry.interfaces.Http'] for key in ('cookies', 'querystring', 'headers', 'env', 'url'): value = http_data.get(key) if not value: continue if type(value) == dict: trim_dict(value) else: http_data[key] = trim(value) value = http_data.get('data') if value: http_data['data'] = trim(value, 2048) # default the culprit to the url if not data['culprit']: data['culprit'] = trim(strip(http_data.get('url')), MAX_CULPRIT_LENGTH) return data def from_kwargs(self, project, **kwargs): data = self.normalize_event_data(kwargs) return self.save_data(project, data) @transaction.commit_on_success def save_data(self, project, data, raw=False): # TODO: this function is way too damn long and needs refactored # the inner imports also suck so let's try to move it away from # the objects manager # TODO: culprit should default to "most recent" frame in stacktraces when # it's not provided. from sentry.plugins import plugins from sentry.models import Event, Project, EventMapping project = Project.objects.get_from_cache(id=project) # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') message = data.pop('message') culprit = data.pop('culprit') level = data.pop('level') time_spent = data.pop('time_spent') logger_name = data.pop('logger') server_name = data.pop('server_name') site = data.pop('site') date = data.pop('timestamp') checksum = data.pop('checksum') platform = data.pop('platform') if 'sentry.interfaces.Exception' in data: if 'values' not in data['sentry.interfaces.Exception']: data['sentry.interfaces.Exception'] = { 'values': [data['sentry.interfaces.Exception']] } # convert stacktrace + exception into expanded exception if 'sentry.interfaces.Stacktrace' in data: data['sentry.interfaces.Exception']['values'][0][ 'stacktrace'] = data.pop('sentry.interfaces.Stacktrace') kwargs = { 'level': level, 'message': message, 'platform': platform, 'culprit': culprit or '', 'logger': logger_name, } event = Event(project=project, event_id=event_id, data=data, server_name=server_name, site=site, time_spent=time_spent, datetime=date, **kwargs) # Calculate the checksum from the first highest scoring interface if not checksum: checksum = get_checksum_from_event(event) event.checksum = checksum group_kwargs = kwargs.copy() group_kwargs.update({ 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) tags = data['tags'] tags.append(('level', LOG_LEVELS[level])) if logger: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) for plugin in plugins.for_project(project): added_tags = safe_execute(plugin.get_tags, event) if added_tags: tags.extend(added_tags) try: group, is_new, is_sample = self._create_group(event=event, tags=data['tags'], **group_kwargs) except Exception as exc: # TODO: should we mail admins when there are failures? try: logger.exception(u'Unable to process log entry: %s', exc) except Exception, exc: warnings.warn(u'Unable to process log entry: %s', exc) return using = group._state.db event.group = group # save the event unless its been sampled if not is_sample: sid = transaction.savepoint(using=using) try: event.save() except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) sid = transaction.savepoint(using=using) try: EventMapping.objects.create(project=project, group=group, event_id=event_id) except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) transaction.commit_unless_managed(using=using) if not raw: send_group_processors(group=group, event=event, is_new=is_new, is_sample=is_sample) if getattr(settings, 'SENTRY_INDEX_SEARCH', settings.SENTRY_USE_SEARCH): index_event.delay(event) # TODO: move this to the queue if is_new and not raw: regression_signal.send_robust(sender=self.model, instance=group) return event
def save_data(self, project, data, raw=False): # TODO: this function is way too damn long and needs refactored # the inner imports also suck so let's try to move it away from # the objects manager # TODO: culprit should default to "most recent" frame in stacktraces when # it's not provided. from sentry.plugins import plugins from sentry.models import Event, Project, EventMapping project = Project.objects.get_from_cache(id=project) # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') message = data.pop('message') culprit = data.pop('culprit') or '' level = data.pop('level') time_spent = data.pop('time_spent') logger_name = data.pop('logger') server_name = data.pop('server_name') site = data.pop('site') checksum = data.pop('checksum') platform = data.pop('platform') date = datetime.fromtimestamp(data.pop('timestamp')) date = date.replace(tzinfo=timezone.utc) kwargs = { 'message': message, 'platform': platform, } event = Event(project=project, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs) # Calculate the checksum from the first highest scoring interface if not checksum: checksum = get_checksum_from_event(event) event.checksum = checksum group_kwargs = kwargs.copy() group_kwargs.update({ 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) tags = data['tags'] tags.append(('level', LOG_LEVELS[level])) if logger: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) for plugin in plugins.for_project(project): added_tags = safe_execute(plugin.get_tags, event) if added_tags: tags.extend(added_tags) try: group, is_new, is_regression, is_sample = self._create_group( event=event, tags=data['tags'], **group_kwargs) except Exception as exc: # TODO: should we mail admins when there are failures? try: logger.exception(u'Unable to process log entry: %s', exc) except Exception as exc: warnings.warn(u'Unable to process log entry: %s', exc) return using = group._state.db event.group = group # save the event unless its been sampled if not is_sample: sid = transaction.savepoint(using=using) try: event.save() except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) sid = transaction.savepoint(using=using) try: EventMapping.objects.create(project=project, group=group, event_id=event_id) except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) transaction.commit_unless_managed(using=using) if not raw: send_group_processors( group=group, event=event, is_new=is_new or is_regression, # backwards compat is_sample=is_sample, is_regression=is_regression, ) index_event.delay(event) # TODO: move this to the queue if is_new and not raw: regression_signal.send_robust(sender=self.model, instance=group) return event
class GroupManager(BaseManager, ChartMixin): use_for_related_fields = True def normalize_event_data(self, data): # First we pull out our top-level (non-data attr) kwargs if not data.get('level') or data['level'] not in LOG_LEVELS_DICT: data['level'] = logging.ERROR if not data.get('logger'): data['logger'] = settings.DEFAULT_LOGGER_NAME timestamp = data.get('timestamp') if not timestamp: timestamp = timezone.now() if not data.get('culprit'): data['culprit'] = '' # We must convert date to local time so Django doesn't mess it up # based on TIME_ZONE if dj_settings.TIME_ZONE: if not timezone.is_aware(timestamp): timestamp = timestamp.replace(tzinfo=timezone.utc) elif timezone.is_aware(timestamp): timestamp = timestamp.replace(tzinfo=None) data['timestamp'] = timestamp if not data.get('event_id'): data['event_id'] = uuid.uuid4().hex data.setdefault('message', None) data.setdefault('time_spent', None) data.setdefault('server_name', None) data.setdefault('site', None) data.setdefault('checksum', None) data.setdefault('platform', None) tags = data.get('tags') if not tags: tags = [] # full support for dict syntax elif isinstance(tags, dict): tags = tags.items() else: tags = list(tags) data['tags'] = tags if 'sentry.interfaces.Exception' in data: if 'values' not in data['sentry.interfaces.Exception']: data['sentry.interfaces.Exception'] = {'values': [data['sentry.interfaces.Exception']]} # convert stacktrace + exception into expanded exception if 'sentry.interfaces.Stacktrace' in data: data['sentry.interfaces.Exception']['values'][0]['stacktrace'] = data.pop('sentry.interfaces.Stacktrace') return data def from_kwargs(self, project, **kwargs): data = self.normalize_event_data(kwargs) return self.save_data(project, data) @transaction.commit_on_success def save_data(self, project, data): # TODO: this function is way too damn long and needs refactored # the inner imports also suck so let's try to move it away from # the objects manager # TODO: culprit should default to "most recent" frame in stacktraces when # it's not provided. from sentry.plugins import plugins from sentry.models import Event, Project, EventMapping project = Project.objects.get_from_cache(pk=project) # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') message = data.pop('message') culprit = data.pop('culprit') level = data.pop('level') time_spent = data.pop('time_spent') logger_name = data.pop('logger') server_name = data.pop('server_name') site = data.pop('site') date = data.pop('timestamp') checksum = data.pop('checksum') platform = data.pop('platform') kwargs = { 'level': level, 'message': message, 'platform': platform, 'culprit': culprit or '', 'logger': logger_name, } event = Event( project=project, event_id=event_id, data=data, server_name=server_name, site=site, time_spent=time_spent, datetime=date, **kwargs ) # Calculate the checksum from the first highest scoring interface if not checksum: checksum = get_checksum_from_event(event) event.checksum = checksum group_kwargs = kwargs.copy() group_kwargs.update({ 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) tags = data['tags'] tags.append(('level', LOG_LEVELS_DICT[level])) if logger: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) for plugin in plugins.for_project(project): added_tags = safe_execute(plugin.get_tags, event) if added_tags: tags.extend(added_tags) try: group, is_new, is_sample = self._create_group( event=event, tags=data['tags'], **group_kwargs ) except Exception as exc: # TODO: should we mail admins when there are failures? try: logger.exception(u'Unable to process log entry: %s', exc) except Exception, exc: warnings.warn(u'Unable to process log entry: %s', exc) return using = group._state.db event.group = group # save the event unless its been sampled if not is_sample: sid = transaction.savepoint(using=using) try: event.save() except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) sid = transaction.savepoint(using=using) try: EventMapping.objects.create( project=project, group=group, event_id=event_id) except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) transaction.commit_unless_managed(using=using) send_group_processors( group=group, event=event, is_new=is_new, is_sample=is_sample ) if settings.USE_SEARCH: index_event.delay(event) # TODO: move this to the queue if is_new: regression_signal.send_robust(sender=self.model, instance=group) return event