def reprocess_events(project_id, **kwargs): from sentry.models import ProcessingIssue from sentry.coreapi import ClientApiHelper from sentry import app lock_key = 'events:reprocess_events:%s' % project_id have_more = False lock = app.locks.get(lock_key, duration=60) try: with lock.acquire(): raw_events, have_more = ProcessingIssue.objects \ .find_resolved(project_id) if raw_events: helper = ClientApiHelper() for raw_event in raw_events: helper.insert_data_to_database(raw_event.data.data, from_reprocessing=True) create_reprocessing_report(project_id=project_id, event_id=raw_event.event_id) raw_event.delete() except UnableToAcquireLock as error: logger.warning('reprocess_events.fail', extra={'error': error}) # There are more, kick us off again if have_more: reprocess_events.delay(project_id=project_id)
def test_save_path(self, mock_produce, mock_preprocess_event, mock_process_event, mock_save_event): with self.feature('projects:kafka-ingest'): project = self.create_project() data = self._create_event_with_platform(project, 'doesnt_need_process') helper = ClientApiHelper(project_id=self.project.id) helper.context.bind_project(project) helper.insert_data_to_database(data) # preprocess self._call_consumer(mock_produce) # save self._call_consumer(mock_produce) assert mock_preprocess_event.delay.call_count == 0 assert mock_process_event.delay.call_count == 0 assert mock_save_event.delay.call_count == 0 event = Event.objects.get(project_id=project.id, event_id=data['event_id']) saved_data = event.get_raw_data() assert 'foo' not in saved_data assert saved_data['platform'] == 'doesnt_need_process'
def reprocess_events(project_id, **kwargs): from sentry.models import ProcessingIssue from sentry.coreapi import ClientApiHelper from sentry import app lock_key = 'events:reprocess_events:%s' % project_id have_more = False lock = app.locks.get(lock_key, duration=60) try: with lock.acquire(): raw_events, have_more = ProcessingIssue.objects \ .find_resolved(project_id) if raw_events: helper = ClientApiHelper() for raw_event in raw_events: helper.insert_data_to_database(raw_event.data.data, from_reprocessing=True) create_reprocessing_report(project_id=project_id, event_id=raw_event.event_id) raw_event.delete() except UnableToAcquireLock as error: logger.warning('reprocess_events.fail', extra={'error': error}) # There are more, kick us off again if have_more: reprocess_events.delay(project_id=project_id)
def reprocess_events(project_id, **kwargs): from sentry.models import ProcessingIssue from sentry.coreapi import ClientApiHelper from sentry import app lock_key = "events:reprocess_events:%s" % project_id have_more = False lock = app.locks.get(lock_key, duration=60) try: with lock.acquire(): raw_events, have_more = ProcessingIssue.objects.find_resolved( project_id) if raw_events: helper = ClientApiHelper() for raw_event in raw_events: helper.insert_data_to_database(raw_event.data.data, from_reprocessing=True) create_reprocessing_report(project_id=project_id, event_id=raw_event.event_id) # Here we only delete the raw event but leave the # reprocessing report alive. When the queue # eventually kicks in this should clean up. raw_event.delete() except UnableToAcquireLock as error: logger.warning("reprocess_events.fail", extra={"error": error}) # There are more, kick us off again if have_more: reprocess_events.delay(project_id=project_id)
def send(self, **kwargs): # TODO(dcramer): this should respect rate limits/etc and use the normal # pipeline from sentry.app import tsdb from sentry.coreapi import ClientApiHelper from sentry.event_manager import EventManager from sentry.models import Project helper = ClientApiHelper( agent='raven-python/%s (sentry %s)' % (raven.VERSION, sentry.VERSION), project_id=settings.SENTRY_PROJECT, version=self.protocol_version, ) try: project = Project.objects.get_from_cache( id=settings.SENTRY_PROJECT) except DatabaseError: self.error_logger.error('Unable to fetch internal project', exc_info=True) return except Project.DoesNotExist: self.error_logger.error('Internal project (id=%s) does not exist', settings.SENTRY_PROJECT) return except Exception: self.error_logger.error( 'Unable to fetch internal project for some unknown reason', exec_info=True) return helper.context.bind_project(project) metrics.incr('events.total') kwargs['project'] = project.id try: manager = EventManager(kwargs) data = manager.normalize() tsdb.incr_multi([ (tsdb.models.project_total_received, project.id), (tsdb.models.organization_total_received, project.organization_id), ]) helper.insert_data_to_database(data) except Exception as e: if self.raise_send_errors: raise self.error_logger.error( 'Unable to record event: %s\nEvent was: %r', e, kwargs['message'], exc_info=True)
def send(self, **kwargs): # TODO(dcramer): this should respect rate limits/etc and use the normal # pipeline from sentry.app import tsdb from sentry.coreapi import ClientApiHelper from sentry.event_manager import EventManager from sentry.models import Project helper = ClientApiHelper( agent='raven-python/%s (sentry %s)' % (raven.VERSION, sentry.VERSION), project_id=settings.SENTRY_PROJECT, version=self.protocol_version, ) try: project = Project.objects.get_from_cache(id=settings.SENTRY_PROJECT) except DatabaseError: self.error_logger.error('Unable to fetch internal project', exc_info=True) return except Project.DoesNotExist: self.error_logger.error('Internal project (id=%s) does not exist', settings.SENTRY_PROJECT) return except Exception: self.error_logger.error( 'Unable to fetch internal project for some unknown reason', exc_info=True) return helper.context.bind_project(project) metrics.incr('events.total') kwargs['project'] = project.id try: manager = EventManager(kwargs) data = manager.normalize() tsdb.incr_multi([ (tsdb.models.project_total_received, project.id), (tsdb.models.organization_total_received, project.organization_id), ]) helper.insert_data_to_database(data) except Exception as e: if self.raise_send_errors: raise self.error_logger.error( 'Unable to record event: %s\nEvent was: %r', e, kwargs['message'], exc_info=True)
def mark_failed(self, last_checkin=None): from sentry.coreapi import ClientApiHelper from sentry.event_manager import EventManager from sentry.models import Project from sentry.signals import monitor_failed if last_checkin is None: next_checkin_base = timezone.now() last_checkin = self.last_checkin else: next_checkin_base = last_checkin affected = type(self).objects.filter( id=self.id, last_checkin=self.last_checkin, ).update( next_checkin=self.get_next_scheduled_checkin(next_checkin_base), status=MonitorStatus.ERROR, last_checkin=last_checkin, ) if not affected: return False event_manager = EventManager( { 'logentry': { 'message': 'Monitor failure: %s' % (self.name, ), }, 'contexts': { 'monitor': { 'id': six.text_type(self.guid), }, }, }, project=Project(id=self.project_id), ) event_manager.normalize() data = event_manager.get_data() helper = ClientApiHelper(project_id=self.project_id) helper.insert_data_to_database(data) monitor_failed.send(monitor=self, sender=type(self)) return True
def mark_failed(self, last_checkin=None): from sentry.coreapi import ClientApiHelper from sentry.event_manager import EventManager from sentry.models import Project from sentry.signals import monitor_failed if last_checkin is None: next_checkin_base = timezone.now() last_checkin = self.last_checkin or timezone.now() else: next_checkin_base = last_checkin affected = (type(self).objects.filter( Q(last_checkin__lte=last_checkin) | Q(last_checkin__isnull=True), id=self.id).update( next_checkin=self.get_next_scheduled_checkin( next_checkin_base), status=MonitorStatus.ERROR, last_checkin=last_checkin, )) if not affected: return False event_manager = EventManager( { "logentry": { "message": "Monitor failure: %s" % (self.name, ) }, "contexts": { "monitor": { "id": six.text_type(self.guid) } }, }, project=Project(id=self.project_id), ) event_manager.normalize() data = event_manager.get_data() helper = ClientApiHelper(project_id=self.project_id) helper.insert_data_to_database(data) monitor_failed.send(monitor=self, sender=type(self)) return True
def mark_failed(self, last_checkin=None): from sentry.coreapi import ClientApiHelper from sentry.event_manager import EventManager from sentry.models import Project from sentry.signals import monitor_failed if last_checkin is None: next_checkin_base = timezone.now() last_checkin = self.last_checkin or timezone.now() else: next_checkin_base = last_checkin affected = type(self).objects.filter( Q(last_checkin__lte=last_checkin) | Q(last_checkin__isnull=True), id=self.id, ).update( next_checkin=self.get_next_scheduled_checkin(next_checkin_base), status=MonitorStatus.ERROR, last_checkin=last_checkin, ) if not affected: return False event_manager = EventManager( { 'logentry': { 'message': 'Monitor failure: %s' % (self.name,), }, 'contexts': { 'monitor': { 'id': six.text_type(self.guid), }, }, }, project=Project(id=self.project_id), ) event_manager.normalize() data = event_manager.get_data() helper = ClientApiHelper(project_id=self.project_id) helper.insert_data_to_database(data) monitor_failed.send(monitor=self, sender=type(self)) return True
def send(self, **kwargs): # TODO(dcramer): this should respect rate limits/etc and use the normal # pipeline # Report the issue to an upstream Sentry if active # NOTE: we don't want to check self.is_enabled() like normal, since # is_enabled behavior is overridden in this class. We explicitly # want to check if the remote is active. if self.remote.is_active(): from sentry import options # Append some extra tags that are useful for remote reporting super_kwargs = copy.deepcopy(kwargs) super_kwargs['tags']['install-id'] = options.get( 'sentry:install-id') super(SentryInternalClient, self).send(**super_kwargs) if not is_current_event_safe(): return from sentry import tsdb from sentry.coreapi import ClientApiHelper from sentry.event_manager import EventManager from sentry.models import Project helper = ClientApiHelper( agent='raven-python/%s (sentry %s)' % (raven.VERSION, sentry.VERSION), project_id=settings.SENTRY_PROJECT, version=self.protocol_version, ) try: project = Project.objects.get_from_cache( id=settings.SENTRY_PROJECT) except DatabaseError: self.error_logger.error('Unable to fetch internal project', exc_info=True) return except Project.DoesNotExist: self.error_logger.error('Internal project (id=%s) does not exist', settings.SENTRY_PROJECT) return except Exception: self.error_logger.error( 'Unable to fetch internal project for some unknown reason', exc_info=True) return helper.context.bind_project(project) metrics.incr('events.total') kwargs['project'] = project.id try: # This in theory is the right way to do it because validate # also normalizes currently, but we just send in data already # normalised in the raven client now. # data = helper.validate_data(kwargs) data = kwargs manager = EventManager(data) data = manager.normalize() tsdb.incr_multi([ (tsdb.models.project_total_received, project.id), (tsdb.models.organization_total_received, project.organization_id), ]) helper.insert_data_to_database(data) except Exception as e: if self.raise_send_errors: raise message = kwargs.get('message') if not message: msg_interface = kwargs.get('sentry.interface.Message', {}) message = msg_interface.get( 'formatted', msg_interface.get('message', 'unknown error')) self.error_logger.error( 'Unable to record event: %s\nEvent was: %r', e, message, exc_info=True)
def send(self, **kwargs): # TODO(dcramer): this should respect rate limits/etc and use the normal # pipeline # Report the issue to an upstream Sentry if active # NOTE: we don't want to check self.is_enabled() like normal, since # is_enabled behavior is overridden in this class. We explicitly # want to check if the remote is active. if self.remote.is_active(): from sentry import options # Append some extra tags that are useful for remote reporting super_kwargs = copy.deepcopy(kwargs) super_kwargs['tags']['install-id'] = options.get('sentry:install-id') super(SentryInternalClient, self).send(**super_kwargs) if not is_current_event_safe(): return from sentry.app import tsdb from sentry.coreapi import ClientApiHelper from sentry.event_manager import EventManager from sentry.models import Project helper = ClientApiHelper( agent='raven-python/%s (sentry %s)' % (raven.VERSION, sentry.VERSION), project_id=settings.SENTRY_PROJECT, version=self.protocol_version, ) try: project = Project.objects.get_from_cache(id=settings.SENTRY_PROJECT) except DatabaseError: self.error_logger.error('Unable to fetch internal project', exc_info=True) return except Project.DoesNotExist: self.error_logger.error('Internal project (id=%s) does not exist', settings.SENTRY_PROJECT) return except Exception: self.error_logger.error( 'Unable to fetch internal project for some unknown reason', exc_info=True) return helper.context.bind_project(project) metrics.incr('events.total') kwargs['project'] = project.id try: # This in theory is the right way to do it because validate # also normalizes currently, but we just send in data already # normalised in the raven client now. # data = helper.validate_data(project, kwargs) data = kwargs manager = EventManager(data) data = manager.normalize() tsdb.incr_multi([ (tsdb.models.project_total_received, project.id), (tsdb.models.organization_total_received, project.organization_id), ]) helper.insert_data_to_database(data) except Exception as e: if self.raise_send_errors: raise message = kwargs.get('message') if not message: msg_interface = kwargs.get('sentry.interface.Message', {}) message = msg_interface.get('formatted', msg_interface.get('message', 'unknown error')) self.error_logger.error( 'Unable to record event: %s\nEvent was: %r', e, message, exc_info=True)