Example #1
0
    def save(self, project, raw=False):
        project = Project.objects.get_from_cache(id=project)

        data = self.data.copy()

        # First we pull out our top-level (non-data attr) kwargs
        event_id = data.pop('event_id')
        message = data.pop('message')
        level = data.pop('level')

        culprit = data.pop('culprit', None)
        time_spent = data.pop('time_spent', None)
        logger_name = data.pop('logger', None)
        server_name = data.pop('server_name', None)
        site = data.pop('site', None)
        checksum = data.pop('checksum', None)
        fingerprint = data.pop('fingerprint', None)
        platform = data.pop('platform', None)
        release = data.pop('release', None)
        environment = data.pop('environment', None)

        if not culprit:
            culprit = generate_culprit(data)

        date = datetime.fromtimestamp(data.pop('timestamp'))
        date = date.replace(tzinfo=timezone.utc)

        kwargs = {
            'message': message,
            'platform': platform,
        }

        event = Event(
            project=project,
            event_id=event_id,
            data=data,
            time_spent=time_spent,
            datetime=date,
            **kwargs
        )

        tags = data.get('tags') or []
        tags.append(('level', LOG_LEVELS[level]))
        if logger_name:
            tags.append(('logger', logger_name))
        if server_name:
            tags.append(('server_name', server_name))
        if site:
            tags.append(('site', site))
        if release:
            # TODO(dcramer): we should ensure we create Release objects
            tags.append(('sentry:release', release))
        if environment:
            tags.append(('environment', environment))

        for plugin in plugins.for_project(project, version=None):
            added_tags = safe_execute(plugin.get_tags, event,
                                      _with_transaction=False)
            if added_tags:
                tags.extend(added_tags)

        event_user = self._get_event_user(project, data)
        if event_user:
            tags.append(('sentry:user', event_user.tag_value))

        # XXX(dcramer): we're relying on mutation of the data object to ensure
        # this propagates into Event
        data['tags'] = tags

        data['fingerprint'] = fingerprint or '{{ default }}'

        # prioritize fingerprint over checksum as its likely the client defaulted
        # a checksum whereas the fingerprint was explicit
        if fingerprint:
            hashes = map(md5_from_hash, get_hashes_from_fingerprint(event, fingerprint))
        elif checksum:
            hashes = [checksum]
        else:
            hashes = map(md5_from_hash, get_hashes_for_event(event))

        group_kwargs = kwargs.copy()
        group_kwargs.update({
            'culprit': culprit,
            'logger': logger_name,
            'level': level,
            'last_seen': date,
            'first_seen': date,
            'time_spent_total': time_spent or 0,
            'time_spent_count': time_spent and 1 or 0,
        })

        if release:
            release = Release.get_or_create(
                project=project,
                version=release,
                date_added=date,
            )

            group_kwargs['first_release'] = release

            Activity.objects.create(
                type=Activity.RELEASE,
                project=project,
                ident=release,
                data={'version': release},
                datetime=date,
            )

        group, is_new, is_regression, is_sample = self._save_aggregate(
            event=event,
            hashes=hashes,
            release=release,
            **group_kwargs
        )

        event.group = group
        event.group_id = group.id
        # store a reference to the group id to guarantee validation of isolation
        event.data.bind_ref(event)

        try:
            with transaction.atomic():
                EventMapping.objects.create(
                    project=project, group=group, event_id=event_id)
        except IntegrityError:
            self.logger.info('Duplicate EventMapping found for event_id=%s', event_id)
            return event

        UserReport.objects.filter(
            project=project, event_id=event_id,
        ).update(group=group)

        # save the event unless its been sampled
        if not is_sample:
            try:
                with transaction.atomic():
                    event.save()
            except IntegrityError:
                self.logger.info('Duplicate Event found for event_id=%s', event_id)
                return event

        if event_user:
            tsdb.record_multi((
                (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value,)),
                (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value,)),
            ), timestamp=event.datetime)

        if is_new and release:
            buffer.incr(Release, {'new_groups': 1}, {
                'id': release.id,
            })

        safe_execute(Group.objects.add_tags, group, tags,
                     _with_transaction=False)

        if not raw:
            if not project.first_event:
                project.update(first_event=date)

            post_process_group.delay(
                group=group,
                event=event,
                is_new=is_new,
                is_sample=is_sample,
                is_regression=is_regression,
            )
        else:
            self.logger.info('Raw event passed; skipping post process for event_id=%s', event_id)

        index_event.delay(event)

        # TODO: move this to the queue
        if is_regression and not raw:
            regression_signal.send_robust(sender=Group, instance=group)

        return event
Example #2
0
    def save_data(self, project, data, raw=False):
        # TODO: this function is way too damn long and needs refactored
        # the inner imports also suck so let's try to move it away from
        # the objects manager

        # TODO: culprit should default to "most recent" frame in stacktraces when
        # it's not provided.
        from sentry.plugins import plugins
        from sentry.models import Event, Project, EventMapping

        project = Project.objects.get_from_cache(id=project)

        Raven.tags_context({'project': project.id})

        # First we pull out our top-level (non-data attr) kwargs
        event_id = data.pop('event_id')
        message = data.pop('message')
        culprit = data.pop('culprit')
        level = data.pop('level')
        time_spent = data.pop('time_spent')
        logger_name = data.pop('logger')
        server_name = data.pop('server_name')
        site = data.pop('site')
        date = data.pop('timestamp')
        checksum = data.pop('checksum')
        platform = data.pop('platform')

        if 'sentry.interfaces.Exception' in data:
            if 'values' not in data['sentry.interfaces.Exception']:
                data['sentry.interfaces.Exception'] = {'values': [data['sentry.interfaces.Exception']]}

            # convert stacktrace + exception into expanded exception
            if 'sentry.interfaces.Stacktrace' in data:
                data['sentry.interfaces.Exception']['values'][0]['stacktrace'] = data.pop('sentry.interfaces.Stacktrace')

        kwargs = {
            'level': level,
            'message': message,
            'platform': platform,
            'culprit': culprit or '',
            'logger': logger_name,
        }

        event = Event(
            project=project,
            event_id=event_id,
            data=data,
            server_name=server_name,
            site=site,
            time_spent=time_spent,
            datetime=date,
            **kwargs
        )

        # Calculate the checksum from the first highest scoring interface
        if not checksum:
            checksum = get_checksum_from_event(event)

        event.checksum = checksum

        group_kwargs = kwargs.copy()
        group_kwargs.update({
            'last_seen': date,
            'first_seen': date,
            'time_spent_total': time_spent or 0,
            'time_spent_count': time_spent and 1 or 0,
        })

        tags = data['tags']
        tags.append(('level', LOG_LEVELS[level]))
        if logger:
            tags.append(('logger', logger_name))
        if server_name:
            tags.append(('server_name', server_name))
        if site:
            tags.append(('site', site))

        for plugin in plugins.for_project(project):
            added_tags = safe_execute(plugin.get_tags, event)
            if added_tags:
                tags.extend(added_tags)

        try:
            group, is_new, is_sample = self._create_group(
                event=event,
                tags=data['tags'],
                **group_kwargs
            )
        except Exception as exc:
            # TODO: should we mail admins when there are failures?
            try:
                logger.exception(u'Unable to process log entry: %s', exc)
            except Exception as exc:
                warnings.warn(u'Unable to process log entry: %s', exc)
            return

        using = group._state.db

        event.group = group

        # save the event unless its been sampled
        if not is_sample:
            sid = transaction.savepoint(using=using)
            try:
                event.save()
            except IntegrityError:
                transaction.savepoint_rollback(sid, using=using)
                return event
            transaction.savepoint_commit(sid, using=using)

        sid = transaction.savepoint(using=using)
        try:
            EventMapping.objects.create(
                project=project, group=group, event_id=event_id)
        except IntegrityError:
            transaction.savepoint_rollback(sid, using=using)
            return event
        transaction.savepoint_commit(sid, using=using)
        transaction.commit_unless_managed(using=using)

        if not raw:
            send_group_processors(
                group=group,
                event=event,
                is_new=is_new,
                is_sample=is_sample
            )

        if getattr(settings, 'SENTRY_INDEX_SEARCH', settings.SENTRY_USE_SEARCH):
            index_event.delay(event)

        # TODO: move this to the queue
        if is_new and not raw:
            regression_signal.send_robust(sender=self.model, instance=group)

        return event
Example #3
0
    def save(self, project, raw=False):
        # TODO: culprit should default to "most recent" frame in stacktraces when
        # it's not provided.
        project = Project.objects.get_from_cache(id=project)

        data = self.data.copy()

        # First we pull out our top-level (non-data attr) kwargs
        event_id = data.pop('event_id')
        message = data.pop('message')
        level = data.pop('level')

        culprit = data.pop('culprit', None) or ''
        time_spent = data.pop('time_spent', None)
        logger_name = data.pop('logger', None)
        server_name = data.pop('server_name', None)
        site = data.pop('site', None)
        checksum = data.pop('checksum', None)
        platform = data.pop('platform', None)
        release = data.pop('release', None)

        date = datetime.fromtimestamp(data.pop('timestamp'))
        date = date.replace(tzinfo=timezone.utc)

        kwargs = {
            'message': message,
            'platform': platform,
        }

        event = Event(
            project=project,
            event_id=event_id,
            data=data,
            time_spent=time_spent,
            datetime=date,
            **kwargs
        )

        # Calculate the checksum from the first highest scoring interface
        if checksum:
            hashes = [checksum]
        else:
            hashes = get_hashes_for_event(event)

        # TODO(dcramer): remove checksum usage
        event.checksum = hashes[0]

        group_kwargs = kwargs.copy()
        group_kwargs.update({
            'culprit': culprit,
            'logger': logger_name,
            'level': level,
            'last_seen': date,
            'first_seen': date,
            'time_spent_total': time_spent or 0,
            'time_spent_count': time_spent and 1 or 0,
        })

        tags = data['tags']
        tags.append(('level', LOG_LEVELS[level]))
        if logger_name:
            tags.append(('logger', logger_name))
        if server_name:
            tags.append(('server_name', server_name))
        if site:
            tags.append(('site', site))
        if release:
            # TODO(dcramer): we should ensure we create Release objects
            tags.append(('sentry:release', release))

        for plugin in plugins.for_project(project):
            added_tags = safe_execute(plugin.get_tags, event)
            if added_tags:
                tags.extend(added_tags)

        result = safe_execute(
            self._save_aggregate,
            event=event,
            tags=tags,
            hashes=hashes,
            **group_kwargs
        )
        if result is None:
            return

        group, is_new, is_regression, is_sample = result

        using = group._state.db

        event.group = group

        # save the event unless its been sampled
        if not is_sample:
            sid = transaction.savepoint(using=using)
            try:
                event.save()
            except IntegrityError:
                transaction.savepoint_rollback(sid, using=using)
                return event
            transaction.savepoint_commit(sid, using=using)

        sid = transaction.savepoint(using=using)
        try:
            EventMapping.objects.create(
                project=project, group=group, event_id=event_id)
        except IntegrityError:
            transaction.savepoint_rollback(sid, using=using)
            return event
        transaction.savepoint_commit(sid, using=using)
        transaction.commit_unless_managed(using=using)

        if not raw:
            post_process_group.delay(
                group=group,
                event=event,
                is_new=is_new,
                is_sample=is_sample,
                is_regression=is_regression,
            )

        index_event.delay(event)

        # TODO: move this to the queue
        if is_regression and not raw:
            regression_signal.send_robust(sender=Group, instance=group)

        return event
Example #4
0
class GroupManager(BaseManager, ChartMixin):
    use_for_related_fields = True

    def normalize_event_data(self, data):
        # First we pull out our top-level (non-data attr) kwargs
        if not data.get('level') or data['level'] not in LOG_LEVELS_DICT:
            data['level'] = logging.ERROR
        if not data.get('logger'):
            data['logger'] = settings.DEFAULT_LOGGER_NAME

        timestamp = data.get('timestamp')
        if not timestamp:
            timestamp = timezone.now()

        if not data.get('culprit'):
            data['culprit'] = ''

        # We must convert date to local time so Django doesn't mess it up
        # based on TIME_ZONE
        if dj_settings.TIME_ZONE:
            if not timezone.is_aware(timestamp):
                timestamp = timestamp.replace(tzinfo=timezone.utc)
        elif timezone.is_aware(timestamp):
            timestamp = timestamp.replace(tzinfo=None)
        data['timestamp'] = timestamp

        if not data.get('event_id'):
            data['event_id'] = uuid.uuid4().hex

        data.setdefault('message', None)
        data.setdefault('time_spent', None)
        data.setdefault('server_name', None)
        data.setdefault('site', None)
        data.setdefault('checksum', None)
        data.setdefault('platform', None)

        tags = data.get('tags')
        if not tags:
            tags = []
        # full support for dict syntax
        elif isinstance(tags, dict):
            tags = tags.items()
        else:
            tags = list(tags)

        data['tags'] = tags

        if 'sentry.interfaces.Exception' in data:
            if 'values' not in data['sentry.interfaces.Exception']:
                data['sentry.interfaces.Exception'] = {
                    'values': [data['sentry.interfaces.Exception']]
                }

            # convert stacktrace + exception into expanded exception
            if 'sentry.interfaces.Stacktrace' in data:
                data['sentry.interfaces.Exception']['values'][0][
                    'stacktrace'] = data.pop('sentry.interfaces.Stacktrace')

        return data

    def from_kwargs(self, project, **kwargs):
        data = self.normalize_event_data(kwargs)

        return self.save_data(project, data)

    @transaction.commit_on_success
    def save_data(self, project, data):
        # TODO: this function is way too damn long and needs refactored
        # the inner imports also suck so let's try to move it away from
        # the objects manager

        # TODO: culprit should default to "most recent" frame in stacktraces when
        # it's not provided.
        from sentry.plugins import plugins
        from sentry.models import Event, Project, EventMapping

        project = Project.objects.get_from_cache(pk=project)

        # First we pull out our top-level (non-data attr) kwargs
        event_id = data.pop('event_id')
        message = data.pop('message')
        culprit = data.pop('culprit')
        level = data.pop('level')
        time_spent = data.pop('time_spent')
        logger_name = data.pop('logger')
        server_name = data.pop('server_name')
        site = data.pop('site')
        date = data.pop('timestamp')
        checksum = data.pop('checksum')
        platform = data.pop('platform')

        kwargs = {
            'level': level,
            'message': message,
            'platform': platform,
            'culprit': culprit or '',
            'logger': logger_name,
        }

        event = Event(project=project,
                      event_id=event_id,
                      data=data,
                      server_name=server_name,
                      site=site,
                      time_spent=time_spent,
                      datetime=date,
                      **kwargs)

        # Calculate the checksum from the first highest scoring interface
        if not checksum:
            checksum = get_checksum_from_event(event)

        event.checksum = checksum

        group_kwargs = kwargs.copy()
        group_kwargs.update({
            'last_seen': date,
            'first_seen': date,
            'time_spent_total': time_spent or 0,
            'time_spent_count': time_spent and 1 or 0,
        })

        tags = data['tags']
        tags.append(('level', LOG_LEVELS_DICT[level]))
        if logger:
            tags.append(('logger', logger_name))
        if server_name:
            tags.append(('server_name', server_name))
        if site:
            tags.append(('site', site))

        for plugin in plugins.for_project(project):
            added_tags = safe_execute(plugin.get_tags, event)
            if added_tags:
                tags.extend(added_tags)

        try:
            group, is_new, is_sample = self._create_group(event=event,
                                                          tags=data['tags'],
                                                          **group_kwargs)
        except Exception as exc:
            # TODO: should we mail admins when there are failures?
            try:
                logger.exception(u'Unable to process log entry: %s', exc)
            except Exception, exc:
                warnings.warn(u'Unable to process log entry: %s', exc)
            return

        event.group = group

        # save the event unless its been sampled
        if not is_sample:
            try:
                event.save()
            except IntegrityError:
                transaction.rollback_unless_managed(using=group._state.db)
                return event

        try:
            EventMapping.objects.create(project=project,
                                        group=group,
                                        event_id=event_id)
        except IntegrityError:
            transaction.rollback_unless_managed(using=group._state.db)
            return event

        transaction.commit_unless_managed(using=group._state.db)

        if settings.USE_SEARCH:
            try:
                index_event.delay(event)
            except Exception, e:
                transaction.rollback_unless_managed(using=group._state.db)
                logger.exception(u'Error indexing document: %s', e)
Example #5
0
    def save(self, project, raw=False):
        # TODO: culprit should default to "most recent" frame in stacktraces when
        # it's not provided.
        project = Project.objects.get_from_cache(id=project)

        data = self.data.copy()

        # First we pull out our top-level (non-data attr) kwargs
        event_id = data.pop('event_id')
        message = data.pop('message')
        level = data.pop('level')

        culprit = data.pop('culprit', None) or ''
        time_spent = data.pop('time_spent', None)
        logger_name = data.pop('logger', None)
        server_name = data.pop('server_name', None)
        site = data.pop('site', None)
        checksum = data.pop('checksum', None)
        platform = data.pop('platform', None)
        release = data.pop('release', None)

        date = datetime.fromtimestamp(data.pop('timestamp'))
        date = date.replace(tzinfo=timezone.utc)

        kwargs = {
            'message': message,
            'platform': platform,
        }

        event = Event(
            project=project,
            event_id=event_id,
            data=data,
            time_spent=time_spent,
            datetime=date,
            **kwargs
        )

        tags = data.get('tags') or []
        tags.append(('level', LOG_LEVELS[level]))
        if logger_name:
            tags.append(('logger', logger_name))
        if server_name:
            tags.append(('server_name', server_name))
        if site:
            tags.append(('site', site))
        if release:
            # TODO(dcramer): we should ensure we create Release objects
            tags.append(('sentry:release', release))

        for plugin in plugins.for_project(project, version=None):
            added_tags = safe_execute(plugin.get_tags, event,
                                      _with_transaction=False)
            if added_tags:
                tags.extend(added_tags)
        # XXX(dcramer): we're relying on mutation of the data object to ensure
        # this propagates into Event
        data['tags'] = tags

        # Calculate the checksum from the first highest scoring interface
        if checksum:
            hashes = [checksum]
        else:
            hashes = get_hashes_for_event(event)

        group_kwargs = kwargs.copy()
        group_kwargs.update({
            'culprit': culprit,
            'logger': logger_name,
            'level': level,
            'last_seen': date,
            'first_seen': date,
            'time_spent_total': time_spent or 0,
            'time_spent_count': time_spent and 1 or 0,
        })

        if release:
            group_kwargs['first_release'] = Release.get_or_create(
                project=project,
                version=release,
                date_added=date,
            )

            Activity.objects.create(
                type=Activity.RELEASE,
                project=project,
                ident=release,
                data={'version': release},
                datetime=date,
            )

        group, is_new, is_regression, is_sample = safe_execute(
            self._save_aggregate,
            event=event,
            hashes=hashes,
            **group_kwargs
        )

        using = group._state.db

        event.group = group

        try:
            with transaction.atomic():
                EventMapping.objects.create(
                    project=project, group=group, event_id=event_id)
        except IntegrityError:
            self.logger.info('Duplicate EventMapping found for event_id=%s', event_id)
            return event

        UserReport.objects.filter(
            project=project, event_id=event_id,
        ).update(group=group)

        # save the event unless its been sampled
        if not is_sample:
            try:
                with transaction.atomic():
                    event.save()
            except IntegrityError:
                self.logger.info('Duplicate Event found for event_id=%s', event_id)
                return event

        safe_execute(Group.objects.add_tags, group, tags,
                     _with_transaction=False)

        if not raw:
            post_process_group.delay(
                group=group,
                event=event,
                is_new=is_new,
                is_sample=is_sample,
                is_regression=is_regression,
            )
        else:
            self.logger.info('Raw event passed; skipping post process for event_id=%s', event_id)

        index_event.delay(event)

        # TODO: move this to the queue
        if is_regression and not raw:
            regression_signal.send_robust(sender=Group, instance=group)

        return event
Example #6
0
class GroupManager(BaseManager, ChartMixin):
    use_for_related_fields = True

    def normalize_event_data(self, data):
        # TODO(dcramer): store http.env.REMOTE_ADDR as user.ip
        # First we pull out our top-level (non-data attr) kwargs
        if not data.get('level') or data['level'] not in LOG_LEVELS:
            data['level'] = logging.ERROR

        if not data.get('logger'):
            data['logger'] = DEFAULT_LOGGER_NAME
        else:
            data['logger'] = trim(data['logger'], 64)

        timestamp = data.get('timestamp')
        if not timestamp:
            timestamp = timezone.now()

        # We must convert date to local time so Django doesn't mess it up
        # based on TIME_ZONE
        if settings.TIME_ZONE:
            if not timezone.is_aware(timestamp):
                timestamp = timestamp.replace(tzinfo=timezone.utc)
        elif timezone.is_aware(timestamp):
            timestamp = timestamp.replace(tzinfo=None)
        data['timestamp'] = timestamp

        if not data.get('event_id'):
            data['event_id'] = uuid.uuid4().hex

        data.setdefault('message', None)
        data.setdefault('culprit', None)
        data.setdefault('time_spent', None)
        data.setdefault('server_name', None)
        data.setdefault('site', None)
        data.setdefault('checksum', None)
        data.setdefault('platform', None)
        data.setdefault('extra', {})

        tags = data.get('tags')
        if not tags:
            tags = []
        # full support for dict syntax
        elif isinstance(tags, dict):
            tags = tags.items()
        # prevent [tag, tag, tag] (invalid) syntax
        elif not all(len(t) == 2 for t in tags):
            tags = []
        else:
            tags = list(tags)

        data['tags'] = tags
        data['message'] = strip(data['message'])
        data['culprit'] = strip(data['culprit'])

        if not isinstance(data['extra'], dict):
            # throw it away
            data['extra'] = {}

        trim_dict(data['extra'], max_size=MAX_EXTRA_VARIABLE_SIZE)

        if 'sentry.interfaces.Exception' in data:
            if 'values' not in data['sentry.interfaces.Exception']:
                data['sentry.interfaces.Exception'] = {
                    'values': [data['sentry.interfaces.Exception']]
                }

            # convert stacktrace + exception into expanded exception
            if 'sentry.interfaces.Stacktrace' in data:
                data['sentry.interfaces.Exception']['values'][0][
                    'stacktrace'] = data.pop('sentry.interfaces.Stacktrace')

            for exc_data in data['sentry.interfaces.Exception']['values']:
                for key in ('type', 'module', 'value'):
                    value = exc_data.get(key)
                    if value:
                        exc_data[key] = trim(value)
                if exc_data.get('stacktrace'):
                    trim_frames(exc_data['stacktrace'])
                    for frame in exc_data['stacktrace']['frames']:
                        stack_vars = frame.get('vars', {})
                        trim_dict(stack_vars)

        if 'sentry.interfaces.Stacktrace' in data:
            trim_frames(data['sentry.interfaces.Stacktrace'])
            for frame in data['sentry.interfaces.Stacktrace']['frames']:
                stack_vars = frame.get('vars', {})
                trim_dict(stack_vars)

        if 'sentry.interfaces.Message' in data:
            msg_data = data['sentry.interfaces.Message']
            trim(msg_data['message'], 1024)
            if msg_data.get('params'):
                msg_data['params'] = trim(msg_data['params'])

        if 'sentry.interfaces.Http' in data:
            http_data = data['sentry.interfaces.Http']
            for key in ('cookies', 'querystring', 'headers', 'env', 'url'):
                value = http_data.get(key)
                if not value:
                    continue

                if type(value) == dict:
                    trim_dict(value)
                else:
                    http_data[key] = trim(value)

            value = http_data.get('data')
            if value:
                http_data['data'] = trim(value, 2048)

            # default the culprit to the url
            if not data['culprit']:
                data['culprit'] = trim(strip(http_data.get('url')),
                                       MAX_CULPRIT_LENGTH)

        return data

    def from_kwargs(self, project, **kwargs):
        data = self.normalize_event_data(kwargs)

        return self.save_data(project, data)

    @transaction.commit_on_success
    def save_data(self, project, data, raw=False):
        # TODO: this function is way too damn long and needs refactored
        # the inner imports also suck so let's try to move it away from
        # the objects manager

        # TODO: culprit should default to "most recent" frame in stacktraces when
        # it's not provided.
        from sentry.plugins import plugins
        from sentry.models import Event, Project, EventMapping

        project = Project.objects.get_from_cache(id=project)

        # First we pull out our top-level (non-data attr) kwargs
        event_id = data.pop('event_id')
        message = data.pop('message')
        culprit = data.pop('culprit')
        level = data.pop('level')
        time_spent = data.pop('time_spent')
        logger_name = data.pop('logger')
        server_name = data.pop('server_name')
        site = data.pop('site')
        date = data.pop('timestamp')
        checksum = data.pop('checksum')
        platform = data.pop('platform')

        if 'sentry.interfaces.Exception' in data:
            if 'values' not in data['sentry.interfaces.Exception']:
                data['sentry.interfaces.Exception'] = {
                    'values': [data['sentry.interfaces.Exception']]
                }

            # convert stacktrace + exception into expanded exception
            if 'sentry.interfaces.Stacktrace' in data:
                data['sentry.interfaces.Exception']['values'][0][
                    'stacktrace'] = data.pop('sentry.interfaces.Stacktrace')

        kwargs = {
            'level': level,
            'message': message,
            'platform': platform,
            'culprit': culprit or '',
            'logger': logger_name,
        }

        event = Event(project=project,
                      event_id=event_id,
                      data=data,
                      server_name=server_name,
                      site=site,
                      time_spent=time_spent,
                      datetime=date,
                      **kwargs)

        # Calculate the checksum from the first highest scoring interface
        if not checksum:
            checksum = get_checksum_from_event(event)

        event.checksum = checksum

        group_kwargs = kwargs.copy()
        group_kwargs.update({
            'last_seen': date,
            'first_seen': date,
            'time_spent_total': time_spent or 0,
            'time_spent_count': time_spent and 1 or 0,
        })

        tags = data['tags']
        tags.append(('level', LOG_LEVELS[level]))
        if logger:
            tags.append(('logger', logger_name))
        if server_name:
            tags.append(('server_name', server_name))
        if site:
            tags.append(('site', site))

        for plugin in plugins.for_project(project):
            added_tags = safe_execute(plugin.get_tags, event)
            if added_tags:
                tags.extend(added_tags)

        try:
            group, is_new, is_sample = self._create_group(event=event,
                                                          tags=data['tags'],
                                                          **group_kwargs)
        except Exception as exc:
            # TODO: should we mail admins when there are failures?
            try:
                logger.exception(u'Unable to process log entry: %s', exc)
            except Exception, exc:
                warnings.warn(u'Unable to process log entry: %s', exc)
            return

        using = group._state.db

        event.group = group

        # save the event unless its been sampled
        if not is_sample:
            sid = transaction.savepoint(using=using)
            try:
                event.save()
            except IntegrityError:
                transaction.savepoint_rollback(sid, using=using)
                return event
            transaction.savepoint_commit(sid, using=using)

        sid = transaction.savepoint(using=using)
        try:
            EventMapping.objects.create(project=project,
                                        group=group,
                                        event_id=event_id)
        except IntegrityError:
            transaction.savepoint_rollback(sid, using=using)
            return event
        transaction.savepoint_commit(sid, using=using)
        transaction.commit_unless_managed(using=using)

        if not raw:
            send_group_processors(group=group,
                                  event=event,
                                  is_new=is_new,
                                  is_sample=is_sample)

        if getattr(settings, 'SENTRY_INDEX_SEARCH',
                   settings.SENTRY_USE_SEARCH):
            index_event.delay(event)

        # TODO: move this to the queue
        if is_new and not raw:
            regression_signal.send_robust(sender=self.model, instance=group)

        return event
Example #7
0
    def save(self, project, raw=False):
        # TODO: culprit should default to "most recent" frame in stacktraces when
        # it's not provided.
        project = Project.objects.get_from_cache(id=project)

        data = self.data.copy()

        # First we pull out our top-level (non-data attr) kwargs
        event_id = data.pop('event_id')
        message = data.pop('message')
        level = data.pop('level')

        culprit = data.pop('culprit', None) or ''
        time_spent = data.pop('time_spent', None)
        logger_name = data.pop('logger', None)
        server_name = data.pop('server_name', None)
        site = data.pop('site', None)
        checksum = data.pop('checksum', None)
        platform = data.pop('platform', None)
        release = data.pop('release', None)

        date = datetime.fromtimestamp(data.pop('timestamp'))
        date = date.replace(tzinfo=timezone.utc)

        kwargs = {
            'message': message,
            'platform': platform,
        }

        event = Event(
            project=project,
            event_id=event_id,
            data=data,
            time_spent=time_spent,
            datetime=date,
            **kwargs
        )

        # Calculate the checksum from the first highest scoring interface
        if checksum:
            hashes = [checksum]
        else:
            hashes = get_hashes_for_event(event)

        # TODO(dcramer): remove checksum usage
        event.checksum = hashes[0]

        group_kwargs = kwargs.copy()
        group_kwargs.update({
            'culprit': culprit,
            'logger': logger_name,
            'level': level,
            'last_seen': date,
            'first_seen': date,
            'time_spent_total': time_spent or 0,
            'time_spent_count': time_spent and 1 or 0,
        })

        tags = data['tags']
        tags.append(('level', LOG_LEVELS[level]))
        if logger_name:
            tags.append(('logger', logger_name))
        if server_name:
            tags.append(('server_name', server_name))
        if site:
            tags.append(('site', site))
        if release:
            # TODO(dcramer): we should ensure we create Release objects
            tags.append(('sentry:release', release))

        for plugin in plugins.for_project(project):
            added_tags = safe_execute(plugin.get_tags, event)
            if added_tags:
                tags.extend(added_tags)

        result = safe_execute(
            self._save_aggregate,
            event=event,
            tags=tags,
            hashes=hashes,
            **group_kwargs
        )
        if result is None:
            return

        group, is_new, is_regression, is_sample = result

        using = group._state.db

        event.group = group

        # save the event unless its been sampled
        if not is_sample:
            sid = transaction.savepoint(using=using)
            try:
                event.save()
            except IntegrityError:
                transaction.savepoint_rollback(sid, using=using)
                return event
            transaction.savepoint_commit(sid, using=using)

        sid = transaction.savepoint(using=using)
        try:
            EventMapping.objects.create(
                project=project, group=group, event_id=event_id)
        except IntegrityError:
            transaction.savepoint_rollback(sid, using=using)
            return event
        transaction.savepoint_commit(sid, using=using)
        transaction.commit_unless_managed(using=using)

        if not raw:
            post_process_group.delay(
                group=group,
                event=event,
                is_new=is_new,
                is_sample=is_sample,
                is_regression=is_regression,
            )

        index_event.delay(event)

        # TODO: move this to the queue
        if is_regression and not raw:
            regression_signal.send_robust(sender=Group, instance=group)

        return event
Example #8
0
    def save(self, project, raw=False):
        project = Project.objects.get_from_cache(id=project)

        data = self.data.copy()

        # First we pull out our top-level (non-data attr) kwargs
        event_id = data.pop('event_id')
        message = data.pop('message')
        level = data.pop('level')

        culprit = data.pop('culprit', None)
        time_spent = data.pop('time_spent', None)
        logger_name = data.pop('logger', None)
        server_name = data.pop('server_name', None)
        site = data.pop('site', None)
        checksum = data.pop('checksum', None)
        fingerprint = data.pop('fingerprint', None)
        platform = data.pop('platform', None)
        release = data.pop('release', None)

        if not culprit:
            culprit = generate_culprit(data)

        date = datetime.fromtimestamp(data.pop('timestamp'))
        date = date.replace(tzinfo=timezone.utc)

        kwargs = {
            'message': message,
            'platform': platform,
        }

        event = Event(project=project,
                      event_id=event_id,
                      data=data,
                      time_spent=time_spent,
                      datetime=date,
                      **kwargs)

        tags = data.get('tags') or []
        tags.append(('level', LOG_LEVELS[level]))
        if logger_name:
            tags.append(('logger', logger_name))
        if server_name:
            tags.append(('server_name', server_name))
        if site:
            tags.append(('site', site))
        if release:
            # TODO(dcramer): we should ensure we create Release objects
            tags.append(('sentry:release', release))

        for plugin in plugins.for_project(project, version=None):
            added_tags = safe_execute(plugin.get_tags,
                                      event,
                                      _with_transaction=False)
            if added_tags:
                tags.extend(added_tags)

        event_user = self._get_event_user(project, data)
        if event_user:
            tags.append(('sentry:user', event_user.tag_value))

        # XXX(dcramer): we're relying on mutation of the data object to ensure
        # this propagates into Event
        data['tags'] = tags

        data['fingerprint'] = fingerprint or '{{ default }}'

        # prioritize fingerprint over checksum as its likely the client defaulted
        # a checksum whereas the fingerprint was explicit
        if fingerprint:
            hashes = map(md5_from_hash,
                         get_hashes_from_fingerprint(event, fingerprint))
        elif checksum:
            hashes = [checksum]
        else:
            hashes = map(md5_from_hash, get_hashes_for_event(event))

        group_kwargs = kwargs.copy()
        group_kwargs.update({
            'culprit': culprit,
            'logger': logger_name,
            'level': level,
            'last_seen': date,
            'first_seen': date,
            'time_spent_total': time_spent or 0,
            'time_spent_count': time_spent and 1 or 0,
        })

        if release:
            release = Release.get_or_create(
                project=project,
                version=release,
                date_added=date,
            )

            group_kwargs['first_release'] = release

            Activity.objects.create(
                type=Activity.RELEASE,
                project=project,
                ident=release,
                data={'version': release},
                datetime=date,
            )

        group, is_new, is_regression, is_sample = self._save_aggregate(
            event=event, hashes=hashes, **group_kwargs)

        event.group = group
        event.group_id = group.id
        # store a reference to the group id to guarantee validation of isolation
        event.data.bind_ref(event)

        try:
            with transaction.atomic():
                EventMapping.objects.create(project=project,
                                            group=group,
                                            event_id=event_id)
        except IntegrityError:
            self.logger.info('Duplicate EventMapping found for event_id=%s',
                             event_id)
            return event

        UserReport.objects.filter(
            project=project,
            event_id=event_id,
        ).update(group=group)

        # save the event unless its been sampled
        if not is_sample:
            try:
                with transaction.atomic():
                    event.save()
            except IntegrityError:
                self.logger.info('Duplicate Event found for event_id=%s',
                                 event_id)
                return event

        if is_new and release:
            buffer.incr(Release, {'new_groups': 1}, {
                'id': release.id,
            })

        safe_execute(Group.objects.add_tags,
                     group,
                     tags,
                     _with_transaction=False)

        if not project.first_event:
            project.update(first_event=date)

        if not raw:
            post_process_group.delay(
                group=group,
                event=event,
                is_new=is_new,
                is_sample=is_sample,
                is_regression=is_regression,
            )
        else:
            self.logger.info(
                'Raw event passed; skipping post process for event_id=%s',
                event_id)

        index_event.delay(event)

        # TODO: move this to the queue
        if is_regression and not raw:
            regression_signal.send_robust(sender=Group, instance=group)

        return event
Example #9
0
    def save_data(self, project, data, raw=False):
        # TODO: this function is way too damn long and needs refactored
        # the inner imports also suck so let's try to move it away from
        # the objects manager

        # TODO: culprit should default to "most recent" frame in stacktraces when
        # it's not provided.
        from sentry.plugins import plugins
        from sentry.models import Event, Project, EventMapping

        project = Project.objects.get_from_cache(id=project)

        # First we pull out our top-level (non-data attr) kwargs
        event_id = data.pop('event_id')
        message = data.pop('message')
        culprit = data.pop('culprit') or ''
        level = data.pop('level')
        time_spent = data.pop('time_spent')
        logger_name = data.pop('logger')
        server_name = data.pop('server_name')
        site = data.pop('site')
        checksum = data.pop('checksum')
        platform = data.pop('platform')

        date = datetime.fromtimestamp(data.pop('timestamp'))
        date = date.replace(tzinfo=timezone.utc)

        kwargs = {
            'message': message,
            'platform': platform,
        }

        event = Event(project=project,
                      event_id=event_id,
                      data=data,
                      time_spent=time_spent,
                      datetime=date,
                      **kwargs)

        # Calculate the checksum from the first highest scoring interface
        if not checksum:
            checksum = get_checksum_from_event(event)

        event.checksum = checksum

        group_kwargs = kwargs.copy()
        group_kwargs.update({
            'culprit': culprit,
            'logger': logger_name,
            'level': level,
            'last_seen': date,
            'first_seen': date,
            'time_spent_total': time_spent or 0,
            'time_spent_count': time_spent and 1 or 0,
        })

        tags = data['tags']
        tags.append(('level', LOG_LEVELS[level]))
        if logger:
            tags.append(('logger', logger_name))
        if server_name:
            tags.append(('server_name', server_name))
        if site:
            tags.append(('site', site))

        for plugin in plugins.for_project(project):
            added_tags = safe_execute(plugin.get_tags, event)
            if added_tags:
                tags.extend(added_tags)

        try:
            group, is_new, is_regression, is_sample = self._create_group(
                event=event, tags=data['tags'], **group_kwargs)
        except Exception as exc:
            # TODO: should we mail admins when there are failures?
            try:
                logger.exception(u'Unable to process log entry: %s', exc)
            except Exception as exc:
                warnings.warn(u'Unable to process log entry: %s', exc)
            return

        using = group._state.db

        event.group = group

        # save the event unless its been sampled
        if not is_sample:
            sid = transaction.savepoint(using=using)
            try:
                event.save()
            except IntegrityError:
                transaction.savepoint_rollback(sid, using=using)
                return event
            transaction.savepoint_commit(sid, using=using)

        sid = transaction.savepoint(using=using)
        try:
            EventMapping.objects.create(project=project,
                                        group=group,
                                        event_id=event_id)
        except IntegrityError:
            transaction.savepoint_rollback(sid, using=using)
            return event
        transaction.savepoint_commit(sid, using=using)
        transaction.commit_unless_managed(using=using)

        if not raw:
            send_group_processors(
                group=group,
                event=event,
                is_new=is_new or is_regression,  # backwards compat
                is_sample=is_sample,
                is_regression=is_regression,
            )

        index_event.delay(event)

        # TODO: move this to the queue
        if is_new and not raw:
            regression_signal.send_robust(sender=self.model, instance=group)

        return event
Example #10
0
    def save(self, project, raw=False):
        # TODO: culprit should default to "most recent" frame in stacktraces when
        # it's not provided.
        project = Project.objects.get_from_cache(id=project)

        data = self.data.copy()

        # First we pull out our top-level (non-data attr) kwargs
        event_id = data.pop('event_id')
        message = data.pop('message')
        level = data.pop('level')

        culprit = data.pop('culprit', None) or ''
        time_spent = data.pop('time_spent', None)
        logger_name = data.pop('logger', None)
        server_name = data.pop('server_name', None)
        site = data.pop('site', None)
        checksum = data.pop('checksum', None)
        platform = data.pop('platform', None)
        release = data.pop('release', None)

        date = datetime.fromtimestamp(data.pop('timestamp'))
        date = date.replace(tzinfo=timezone.utc)

        kwargs = {
            'message': message,
            'platform': platform,
        }

        event = Event(project=project,
                      event_id=event_id,
                      data=data,
                      time_spent=time_spent,
                      datetime=date,
                      **kwargs)

        tags = data.get('tags') or []
        tags.append(('level', LOG_LEVELS[level]))
        if logger_name:
            tags.append(('logger', logger_name))
        if server_name:
            tags.append(('server_name', server_name))
        if site:
            tags.append(('site', site))
        if release:
            # TODO(dcramer): we should ensure we create Release objects
            tags.append(('sentry:release', release))

        for plugin in plugins.for_project(project, version=None):
            added_tags = safe_execute(plugin.get_tags,
                                      event,
                                      _with_transaction=False)
            if added_tags:
                tags.extend(added_tags)
        # XXX(dcramer): we're relying on mutation of the data object to ensure
        # this propagates into Event
        data['tags'] = tags

        # Calculate the checksum from the first highest scoring interface
        if checksum:
            hashes = [checksum]
        else:
            hashes = get_hashes_for_event(event)

        group_kwargs = kwargs.copy()
        group_kwargs.update({
            'culprit': culprit,
            'logger': logger_name,
            'level': level,
            'last_seen': date,
            'first_seen': date,
            'time_spent_total': time_spent or 0,
            'time_spent_count': time_spent and 1 or 0,
        })

        if release:
            group_kwargs[
                'first_release'], created = Release.objects.get_or_create(
                    project=project,
                    version=release,
                    defaults={
                        'date_added': date,
                    },
                )

            Activity.objects.create(
                type=Activity.RELEASE,
                project=project,
                ident=release,
                data={'version': release},
                datetime=date,
            )

        group, is_new, is_regression, is_sample = safe_execute(
            self._save_aggregate, event=event, hashes=hashes, **group_kwargs)

        using = group._state.db

        event.group = group

        try:
            with transaction.atomic():
                EventMapping.objects.create(project=project,
                                            group=group,
                                            event_id=event_id)
        except IntegrityError:
            self.logger.info('Duplicate EventMapping found for event_id=%s',
                             event_id)
            return event

        # save the event unless its been sampled
        if not is_sample:
            try:
                with transaction.atomic():
                    event.save()
            except IntegrityError:
                self.logger.info('Duplicate Event found for event_id=%s',
                                 event_id)
                return event

        safe_execute(Group.objects.add_tags,
                     group,
                     tags,
                     _with_transaction=False)

        if not raw:
            post_process_group.delay(
                group=group,
                event=event,
                is_new=is_new,
                is_sample=is_sample,
                is_regression=is_regression,
            )
        else:
            self.logger.info(
                'Raw event passed; skipping post process for event_id=%s',
                event_id)

        index_event.delay(event)

        # TODO: move this to the queue
        if is_regression and not raw:
            regression_signal.send_robust(sender=Group, instance=group)

        return event