Ejemplo n.º 1
0
def post_process_group(event, is_new, is_regression, is_sample, **kwargs):
    """
    Fires post processing hooks for a group.
    """
    from sentry.models import Project
    from sentry.rules.processor import RuleProcessor

    project = Project.objects.get_from_cache(id=event.group.project_id)

    if settings.SENTRY_ENABLE_EXPLORE_CODE:
        record_affected_code.delay(event=event)

    if settings.SENTRY_ENABLE_EXPLORE_USERS:
        record_affected_user.delay(event=event)

    for plugin in plugins.for_project(project):
        plugin_post_process_group.apply_async(
            kwargs={
                'plugin_slug': plugin.slug,
                'event': event,
                'is_new': is_new,
                'is_regresion': is_regression,
                'is_sample': is_sample,
            },
            expires=300,
        )

    rp = RuleProcessor(event, is_new, is_regression, is_sample)
    # TODO(dcramer): ideally this would fanout, but serializing giant
    # objects back and forth isn't super efficient
    for callback, futures in rp.apply():
        safe_execute(callback, event, futures)
Ejemplo n.º 2
0
    def test_no_filters(self):
        # setup an alert rule with 1 conditions and no filters that passes
        self.event = self.store_event(data={}, project_id=self.project.id)

        Rule.objects.filter(project=self.event.project).delete()
        self.rule = Rule.objects.create(
            project=self.event.project,
            data={
                "conditions": [EVERY_EVENT_COND_DATA],
                "actions": [EMAIL_ACTION_DATA],
                "filter_match": "any",
            },
        )

        rp = RuleProcessor(
            self.event,
            is_new=True,
            is_regression=True,
            is_new_group_environment=True,
            has_reappeared=True,
        )
        results = list(rp.apply())
        assert len(results) == 1
        callback, futures = results[0]
        assert len(futures) == 1
        assert futures[0].rule == self.rule
        assert futures[0].kwargs == {}
Ejemplo n.º 3
0
    def test_integrated(self):
        rp = RuleProcessor(
            self.event,
            is_new=True,
            is_regression=True,
            is_new_group_environment=True,
            has_reappeared=True,
        )
        results = list(rp.apply())
        assert len(results) == 1
        callback, futures = results[0]
        assert len(futures) == 1
        assert futures[0].rule == self.rule
        assert futures[0].kwargs == {}
        assert RuleFireHistory.objects.filter(
            rule=self.rule, group=self.event.group).count() == 1

        # should not apply twice due to default frequency
        results = list(rp.apply())
        assert len(results) == 0
        assert RuleFireHistory.objects.filter(
            rule=self.rule, group=self.event.group).count() == 1

        # now ensure that moving the last update backwards
        # in time causes the rule to trigger again
        GroupRuleStatus.objects.filter(rule=self.rule).update(
            last_active=timezone.now() -
            timedelta(minutes=Rule.DEFAULT_FREQUENCY + 1))

        results = list(rp.apply())
        assert len(results) == 1
        assert RuleFireHistory.objects.filter(
            rule=self.rule, group=self.event.group).count() == 2
Ejemplo n.º 4
0
    def test_no_conditions(self):
        # if a rule has no conditions/triggers it should still pass
        self.event = self.store_event(data={}, project_id=self.project.id)

        Rule.objects.filter(project=self.event.project).delete()
        self.rule = Rule.objects.create(
            project=self.event.project,
            data={
                "actions": [EMAIL_ACTION_DATA],
                "action_match": "any"
            },
        )

        rp = RuleProcessor(
            self.event,
            is_new=True,
            is_regression=True,
            is_new_group_environment=True,
            has_reappeared=True,
        )
        results = list(rp.apply())
        assert len(results) == 1
        callback, futures = results[0]
        assert len(futures) == 1
        assert futures[0].rule == self.rule
        assert futures[0].kwargs == {}
Ejemplo n.º 5
0
    def test_integrated(self):
        event = self.create_event()

        action_data = {
            'id': 'sentry.rules.actions.notify_event.NotifyEventAction',
        }
        condition_data = {
            'id': 'sentry.rules.conditions.every_event.EveryEventCondition',
        }

        Rule.objects.filter(project=event.project).delete()
        rule = Rule.objects.create(project=event.project,
                                   data={
                                       'conditions': [condition_data],
                                       'actions': [action_data],
                                   })

        rp = RuleProcessor(event,
                           is_new=True,
                           is_regression=True,
                           is_sample=False)
        results = list(rp.apply())
        assert len(results) == 1
        callback, futures = results[0]
        assert callback == plugins.get('mail').rule_notify
        assert len(futures) == 1
        assert futures[0].rule == rule
        assert futures[0].kwargs == {}
Ejemplo n.º 6
0
def post_process_group(event, is_new, is_regression, is_sample, **kwargs):
    """
    Fires post processing hooks for a group.
    """
    from sentry.models import Project
    from sentry.rules.processor import RuleProcessor

    project = Project.objects.get_from_cache(id=event.group.project_id)

    _capture_stats(event, is_new)

    if settings.SENTRY_ENABLE_EXPLORE_CODE:
        record_affected_code.delay(event=event)

    record_affected_user.delay(event=event)

    record_additional_tags(event=event)

    rp = RuleProcessor(event, is_new, is_regression, is_sample)
    # TODO(dcramer): ideally this would fanout, but serializing giant
    # objects back and forth isn't super efficient
    for callback, futures in rp.apply():
        safe_execute(callback, event, futures)

    for plugin in plugins.for_project(project):
        plugin_post_process_group(
            plugin_slug=plugin.slug,
            event=event,
            is_new=is_new,
            is_regresion=is_regression,
            is_sample=is_sample,
        )
Ejemplo n.º 7
0
def post_process_group(event, is_new, is_regression, is_sample, **kwargs):
    """
    Fires post processing hooks for a group.
    """
    from sentry.models import Project
    from sentry.rules.processor import RuleProcessor

    project_id = event.group.project_id
    Raven.tags_context({
        'project': project_id,
    })

    project = Project.objects.get_from_cache(id=project_id)

    _capture_stats(event, is_new)

    rp = RuleProcessor(event, is_new, is_regression, is_sample)
    # TODO(dcramer): ideally this would fanout, but serializing giant
    # objects back and forth isn't super efficient
    for callback, futures in rp.apply():
        safe_execute(callback, event, futures)

    for plugin in plugins.for_project(project):
        plugin_post_process_group(
            plugin_slug=plugin.slug,
            event=event,
            is_new=is_new,
            is_regresion=is_regression,
            is_sample=is_sample,
        )
Ejemplo n.º 8
0
def post_process_group(event, is_new, is_regression, is_sample, **kwargs):
    """
    Fires post processing hooks for a group.
    """
    from sentry.models import Project
    from sentry.rules.processor import RuleProcessor

    project = Project.objects.get_from_cache(id=event.group.project_id)

    if settings.SENTRY_ENABLE_EXPLORE_CODE:
        record_affected_code.delay(event=event)

    if settings.SENTRY_ENABLE_EXPLORE_USERS:
        record_affected_user.delay(event=event)

    for plugin in plugins.for_project(project):
        plugin_post_process_group.apply_async(
            kwargs={
                'plugin_slug': plugin.slug,
                'event': event,
                'is_new': is_new,
                'is_regresion': is_regression,
                'is_sample': is_sample,
            },
            expires=300,
        )

    rp = RuleProcessor(event, is_new, is_regression, is_sample)
    # TODO(dcramer): ideally this would fanout, but serializing giant
    # objects back and forth isn't super efficient
    for callback, futures in rp.apply():
        safe_execute(callback, event, futures)
Ejemplo n.º 9
0
    def test_integrated(self):
        event = self.create_event()

        action_data = {
            'id': 'sentry.rules.actions.notify_event.NotifyEventAction',
        }
        condition_data = {
            'id': 'sentry.rules.conditions.every_event.EveryEventCondition',
        }

        Rule.objects.filter(project=event.project).delete()
        rule = Rule.objects.create(
            project=event.project,
            data={
                'conditions': [condition_data],
                'actions': [action_data],
            }
        )

        rp = RuleProcessor(event, is_new=True, is_regression=True, is_sample=False)
        results = list(rp.apply())
        assert len(results) == 1
        callback, futures = results[0]
        assert callback == plugins.get('mail').rule_notify
        assert len(futures) == 1
        assert futures[0].rule == rule
        assert futures[0].kwargs == {}
Ejemplo n.º 10
0
    def test_filter_fails(self):
        # setup a simple alert rule with 1 condition and 1 filter that doesn't pass
        self.event = self.store_event(data={}, project_id=self.project.id)

        filter_data = {
            "id": "tests.sentry.rules.test_processor.MockFilterFalse"
        }

        Rule.objects.filter(project=self.event.project).delete()
        self.rule = Rule.objects.create(
            project=self.event.project,
            data={
                "conditions": [EVERY_EVENT_COND_DATA, filter_data],
                "actions": [EMAIL_ACTION_DATA],
            },
        )
        # patch the rule registry to contain the mocked rules
        with patch("sentry.rules.processor.rules", init_registry()):
            rp = RuleProcessor(
                self.event,
                is_new=True,
                is_regression=True,
                is_new_group_environment=True,
                has_reappeared=True,
            )
            results = list(rp.apply())
            assert len(results) == 0
Ejemplo n.º 11
0
 def test_slow_conditions_evaluate_last(self):
     # Make sure slow/expensive conditions are evaluated last, so that we can skip evaluating
     # them if cheaper conditions satisfy the rule.
     self.rule.update(data={
         "conditions": [
             {
                 "id":
                 "sentry.rules.conditions.event_frequency.EventFrequencyCondition"
             },
             {
                 "id": "tests.sentry.rules.test_processor.MockConditionTrue"
             },
         ],
         "action_match":
         "any",
         "actions": [EMAIL_ACTION_DATA],
     }, )
     with patch("sentry.rules.processor.rules", init_registry()), patch(
             "sentry.rules.conditions.event_frequency.BaseEventFrequencyCondition.passes"
     ) as passes:
         rp = RuleProcessor(
             self.event,
             is_new=True,
             is_regression=True,
             is_new_group_environment=True,
             has_reappeared=True,
         )
         results = rp.apply()
     assert len(results) == 1
     # We should never call `passes` on the frequency condition since we should run the cheap
     # mock condition first.
     assert passes.call_count == 0
Ejemplo n.º 12
0
 def test_resolved_issue(self):
     self.event.group.status = GroupStatus.RESOLVED
     self.event.group.save()
     rp = RuleProcessor(
         self.event,
         is_new=True,
         is_regression=True,
         is_new_group_environment=True,
         has_reappeared=True,
     )
     results = list(rp.apply())
     assert len(results) == 0
Ejemplo n.º 13
0
def post_process_group(event, is_new, is_regression, is_sample, **kwargs):
    """
    Fires post processing hooks for a group.
    """
    # NOTE: we must pass through the full Event object, and not an
    # event_id since the Event object may not actually have been stored
    # in the database due to sampling.
    from sentry.models import Project
    from sentry.models.group import get_group_with_redirect
    from sentry.rules.processor import RuleProcessor

    # Re-bind Group since we're pickling the whole Event object
    # which may contain a stale Group.
    event.group, _ = get_group_with_redirect(event.group_id)
    event.group_id = event.group.id

    project_id = event.group.project_id
    Raven.tags_context({
        'project': project_id,
    })

    # Re-bind Project since we're pickling the whole Event object
    # which may contain a stale Project.
    event.project = Project.objects.get_from_cache(id=project_id)

    _capture_stats(event, is_new)

    # we process snoozes before rules as it might create a regression
    process_snoozes(event.group)

    rp = RuleProcessor(event, is_new, is_regression, is_sample)
    # TODO(dcramer): ideally this would fanout, but serializing giant
    # objects back and forth isn't super efficient
    for callback, futures in rp.apply():
        safe_execute(callback, event, futures)

    for plugin in plugins.for_project(event.project):
        plugin_post_process_group(
            plugin_slug=plugin.slug,
            event=event,
            is_new=is_new,
            is_regresion=is_regression,
            is_sample=is_sample,
        )

    event_processed.send_robust(
        sender=post_process_group,
        project=event.project,
        group=event.group,
        event=event,
    )
Ejemplo n.º 14
0
def post_process_group(event, is_new, is_regression, is_sample, **kwargs):
    """
    Fires post processing hooks for a group.
    """
    # NOTE: we must pass through the full Event object, and not an
    # event_id since the Event object may not actually have been stored
    # in the database due to sampling.
    from sentry.models import Project
    from sentry.models.group import get_group_with_redirect
    from sentry.rules.processor import RuleProcessor

    # Re-bind Group since we're pickling the whole Event object
    # which may contain a stale Group.
    event.group, _ = get_group_with_redirect(event.group_id)
    event.group_id = event.group.id

    project_id = event.group.project_id
    Raven.tags_context({
        'project': project_id,
    })

    # Re-bind Project since we're pickling the whole Event object
    # which may contain a stale Project.
    event.project = Project.objects.get_from_cache(id=project_id)

    _capture_stats(event, is_new)

    # we process snoozes before rules as it might create a regression
    process_snoozes(event.group)

    rp = RuleProcessor(event, is_new, is_regression, is_sample)
    # TODO(dcramer): ideally this would fanout, but serializing giant
    # objects back and forth isn't super efficient
    for callback, futures in rp.apply():
        safe_execute(callback, event, futures)

    for plugin in plugins.for_project(event.project):
        plugin_post_process_group(
            plugin_slug=plugin.slug,
            event=event,
            is_new=is_new,
            is_regresion=is_regression,
            is_sample=is_sample,
        )

    event_processed.send_robust(
        sender=post_process_group,
        project=event.project,
        group=event.group,
        event=event,
    )
Ejemplo n.º 15
0
    def test_latest_release_environment(self):
        # setup an alert rule with 1 conditions and no filters that passes
        release = self.create_release(
            project=self.project,
            version="2021-02.newRelease",
            date_added=datetime(2020, 9, 1, 3, 8, 24, 880386),
            environments=[self.environment],
        )

        self.event = self.store_event(
            data={
                "release": release.version,
                "tags": [["environment", self.environment.name]],
            },
            project_id=self.project.id,
        )

        Rule.objects.filter(project=self.event.project).delete()
        self.rule = Rule.objects.create(
            environment_id=self.environment.id,
            project=self.event.project,
            data={
                "actions": [EMAIL_ACTION_DATA],
                "filter_match":
                "any",
                "conditions": [
                    {
                        "id":
                        "sentry.rules.filters.latest_release.LatestReleaseFilter",
                        "name": "The event is from the latest release",
                    },
                ],
            },
        )

        rp = RuleProcessor(
            self.event,
            is_new=True,
            is_regression=False,
            is_new_group_environment=True,
            has_reappeared=False,
        )
        results = list(rp.apply())
        assert len(results) == 1
        callback, futures = results[0]
        assert len(futures) == 1
        assert futures[0].rule == self.rule
        assert futures[0].kwargs == {}
Ejemplo n.º 16
0
    def test_integrated(self):
        event = self.create_event()

        action_data = {
            'id': 'sentry.rules.actions.notify_event.NotifyEventAction',
        }
        condition_data = {
            'id': 'sentry.rules.conditions.every_event.EveryEventCondition',
        }

        Rule.objects.filter(project=event.project).delete()
        rule = Rule.objects.create(
            project=event.project,
            data={
                'conditions': [condition_data],
                'actions': [action_data],
            }
        )

        rp = RuleProcessor(
            event,
            is_new=True,
            is_regression=True,
            is_new_group_environment=True,
            has_reappeared=True)
        results = list(rp.apply())
        assert len(results) == 1
        callback, futures = results[0]
        assert callback == plugins.get('mail').rule_notify
        assert len(futures) == 1
        assert futures[0].rule == rule
        assert futures[0].kwargs == {}

        # should not apply twice due to default frequency
        results = list(rp.apply())
        assert len(results) == 0

        # now ensure that moving the last update backwards
        # in time causes the rule to trigger again
        GroupRuleStatus.objects.filter(rule=rule).update(
            last_active=timezone.now() - timedelta(minutes=Rule.DEFAULT_FREQUENCY + 1),
        )

        results = list(rp.apply())
        assert len(results) == 1
Ejemplo n.º 17
0
    def test_multiple_rules(self):
        rule_2 = Rule.objects.create(
            project=self.event.project,
            data={
                "conditions": [EVERY_EVENT_COND_DATA],
                "actions": [EMAIL_ACTION_DATA]
            },
        )
        rp = RuleProcessor(
            self.event,
            is_new=True,
            is_regression=True,
            is_new_group_environment=True,
            has_reappeared=True,
        )
        self.run_query_test(rp, 3)

        GroupRuleStatus.objects.filter(rule__in=[self.rule, rule_2]).update(
            last_active=timezone.now() -
            timedelta(minutes=Rule.DEFAULT_FREQUENCY + 1))

        # GroupRuleStatus queries should be cached
        self.run_query_test(rp, 0)

        cache.clear()
        GroupRuleStatus.objects.filter(rule__in=[self.rule, rule_2]).update(
            last_active=timezone.now() -
            timedelta(minutes=Rule.DEFAULT_FREQUENCY + 1))

        # GroupRuleStatus rows should be created, so we should perform two fewer queries since we
        # don't need to create/fetch the rows
        self.run_query_test(rp, 1)

        cache.clear()
        GroupRuleStatus.objects.filter(rule__in=[self.rule, rule_2]).update(
            last_active=timezone.now() -
            timedelta(minutes=Rule.DEFAULT_FREQUENCY + 1))

        # Test that we don't get errors if we try to create statuses that already exist due to a
        # race condition
        with mock.patch("sentry.rules.processor.GroupRuleStatus"
                        ) as mocked_GroupRuleStatus:
            call_count = 0

            def mock_filter(*args, **kwargs):
                nonlocal call_count
                if call_count == 0:
                    call_count += 1
                    # Make a query here to not throw the query counts off
                    return GroupRuleStatus.objects.filter(id=-1)
                return GroupRuleStatus.objects.filter(*args, **kwargs)

            mocked_GroupRuleStatus.objects.filter.side_effect = mock_filter
            # Even though the rows already exist, we should go through the creation step and make
            # the extra queries. The conflicting insert doesn't seem to be counted here since it
            # creates no rows.
            self.run_query_test(rp, 2)
Ejemplo n.º 18
0
    def test_integrated(self):
        event = self.store_event(data={}, project_id=self.project.id)
        action_data = {
            "id": "sentry.mail.actions.NotifyEmailAction",
            "targetType": ActionTargetType.ISSUE_OWNERS.value,
            "targetIdentifier": None,
        }
        condition_data = {
            "id": "sentry.rules.conditions.every_event.EveryEventCondition"
        }

        Rule.objects.filter(project=event.project).delete()
        rule = Rule.objects.create(project=event.project,
                                   data={
                                       "conditions": [condition_data],
                                       "actions": [action_data]
                                   })

        rp = RuleProcessor(
            event,
            is_new=True,
            is_regression=True,
            is_new_group_environment=True,
            has_reappeared=True,
        )
        results = list(rp.apply())
        assert len(results) == 1
        callback, futures = results[0]
        assert len(futures) == 1
        assert futures[0].rule == rule
        assert futures[0].kwargs == {}

        # should not apply twice due to default frequency
        results = list(rp.apply())
        assert len(results) == 0

        # now ensure that moving the last update backwards
        # in time causes the rule to trigger again
        GroupRuleStatus.objects.filter(
            rule=rule).update(last_active=timezone.now() -
                              timedelta(minutes=Rule.DEFAULT_FREQUENCY + 1))

        results = list(rp.apply())
        assert len(results) == 1
Ejemplo n.º 19
0
    def test_integrated(self):
        event = self.create_event()

        action_data = {
            'id': 'sentry.rules.actions.notify_event.NotifyEventAction',
        }
        condition_data = {
            'id': 'sentry.rules.conditions.every_event.EveryEventCondition',
        }

        Rule.objects.filter(project=event.project).delete()
        rule = Rule.objects.create(project=event.project,
                                   data={
                                       'conditions': [condition_data],
                                       'actions': [action_data],
                                   })

        rp = RuleProcessor(event,
                           is_new=True,
                           is_regression=True,
                           is_new_group_environment=True,
                           has_reappeared=True)
        results = list(rp.apply())
        assert len(results) == 1
        callback, futures = results[0]
        assert callback == plugins.get('mail').rule_notify
        assert len(futures) == 1
        assert futures[0].rule == rule
        assert futures[0].kwargs == {}

        # should not apply twice due to default frequency
        results = list(rp.apply())
        assert len(results) == 0

        # now ensure that moving the last update backwards
        # in time causes the rule to trigger again
        GroupRuleStatus.objects.filter(rule=rule).update(
            last_active=timezone.now() -
            timedelta(minutes=Rule.DEFAULT_FREQUENCY + 1), )

        results = list(rp.apply())
        assert len(results) == 1
Ejemplo n.º 20
0
def post_process_group(event, is_new, is_regression, is_sample, **kwargs):
    """
    Fires post processing hooks for a group.
    """
    from sentry.models import Project
    from sentry.rules.processor import RuleProcessor

    project_id = event.group.project_id
    Raven.tags_context({
        'project': project_id,
    })

    project = Project.objects.get_from_cache(id=project_id)

    _capture_stats(event, is_new)

    rp = RuleProcessor(event, is_new, is_regression, is_sample)
    # TODO(dcramer): ideally this would fanout, but serializing giant
    # objects back and forth isn't super efficient
    for callback, futures in rp.apply():
        safe_execute(callback, event, futures)

    for plugin in plugins.for_project(project):
        plugin_post_process_group(
            plugin_slug=plugin.slug,
            event=event,
            is_new=is_new,
            is_regresion=is_regression,
            is_sample=is_sample,
        )

    event_processed.send_robust(
        sender=post_process_group,
        project=project,
        group=event.group,
        event=event,
    )
Ejemplo n.º 21
0
def post_process_group(
    is_new, is_regression, is_new_group_environment, cache_key, group_id=None, **kwargs
):
    """
    Fires post processing hooks for a group.
    """
    from sentry.eventstore.models import Event
    from sentry.eventstore.processing import event_processing_store
    from sentry.reprocessing2 import is_reprocessed_event
    from sentry.utils import snuba

    with snuba.options_override({"consistent": True}):
        # We use the data being present/missing in the processing store
        # to ensure that we don't duplicate work should the forwarding consumers
        # need to rewind history.
        data = event_processing_store.get(cache_key)
        if not data:
            logger.info(
                "post_process.skipped",
                extra={"cache_key": cache_key, "reason": "missing_cache"},
            )
            return
        event = Event(
            project_id=data["project"], event_id=data["event_id"], group_id=group_id, data=data
        )

        set_current_event_project(event.project_id)

        is_transaction_event = not bool(event.group_id)

        from sentry.models import EventDict, Organization, Project

        # Re-bind node data to avoid renormalization. We only want to
        # renormalize when loading old data from the database.
        event.data = EventDict(event.data, skip_renormalization=True)

        # Re-bind Project and Org since we're reading the Event object
        # from cache which may contain stale parent models.
        event.project = Project.objects.get_from_cache(id=event.project_id)
        event.project.set_cached_field_value(
            "organization", Organization.objects.get_from_cache(id=event.project.organization_id)
        )

        # Simplified post processing for transaction events.
        # This should eventually be completely removed and transactions
        # will not go through any post processing.
        if is_transaction_event:
            transaction_processed.send_robust(
                sender=post_process_group,
                project=event.project,
                event=event,
            )

            event_processing_store.delete_by_key(cache_key)

            return

        is_reprocessed = is_reprocessed_event(event.data)

        # NOTE: we must pass through the full Event object, and not an
        # event_id since the Event object may not actually have been stored
        # in the database due to sampling.
        from sentry.models import Commit, GroupInboxReason
        from sentry.models.group import get_group_with_redirect
        from sentry.models.groupinbox import add_group_to_inbox
        from sentry.rules.processor import RuleProcessor
        from sentry.tasks.groupowner import process_suspect_commits
        from sentry.tasks.servicehooks import process_service_hook

        # Re-bind Group since we're reading the Event object
        # from cache, which may contain a stale group and project
        event.group, _ = get_group_with_redirect(event.group_id)
        event.group_id = event.group.id

        event.group.project = event.project
        event.group.project.set_cached_field_value("organization", event.project.organization)

        bind_organization_context(event.project.organization)

        _capture_stats(event, is_new)

        if is_reprocessed and is_new:
            add_group_to_inbox(event.group, GroupInboxReason.REPROCESSED)

        if not is_reprocessed:
            # we process snoozes before rules as it might create a regression
            # but not if it's new because you can't immediately snooze a new group
            has_reappeared = False if is_new else process_snoozes(event.group)
            if not has_reappeared:  # If true, we added the .UNIGNORED reason already
                if is_new:
                    add_group_to_inbox(event.group, GroupInboxReason.NEW)
                elif is_regression:
                    add_group_to_inbox(event.group, GroupInboxReason.REGRESSION)

            handle_owner_assignment(event.project, event.group, event)

            rp = RuleProcessor(
                event, is_new, is_regression, is_new_group_environment, has_reappeared
            )
            has_alert = False
            # TODO(dcramer): ideally this would fanout, but serializing giant
            # objects back and forth isn't super efficient
            for callback, futures in rp.apply():
                has_alert = True
                safe_execute(callback, event, futures, _with_transaction=False)

            try:
                lock = locks.get(
                    f"w-o:{event.group_id}-d-l",
                    duration=10,
                )
                with lock.acquire():
                    has_commit_key = f"w-o:{event.project.organization_id}-h-c"
                    org_has_commit = cache.get(has_commit_key)
                    if org_has_commit is None:
                        org_has_commit = Commit.objects.filter(
                            organization_id=event.project.organization_id
                        ).exists()
                        cache.set(has_commit_key, org_has_commit, 3600)

                    if org_has_commit:
                        group_cache_key = f"w-o-i:g-{event.group_id}"
                        if cache.get(group_cache_key):
                            metrics.incr(
                                "sentry.tasks.process_suspect_commits.debounce",
                                tags={"detail": "w-o-i:g debounce"},
                            )
                        else:
                            from sentry.utils.committers import get_frame_paths

                            cache.set(group_cache_key, True, 604800)  # 1 week in seconds
                            event_frames = get_frame_paths(event.data)
                            process_suspect_commits.delay(
                                event_id=event.event_id,
                                event_platform=event.platform,
                                event_frames=event_frames,
                                group_id=event.group_id,
                                project_id=event.project_id,
                            )
            except UnableToAcquireLock:
                pass
            except Exception:
                logger.exception("Failed to process suspect commits")

            if features.has("projects:servicehooks", project=event.project):
                allowed_events = {"event.created"}
                if has_alert:
                    allowed_events.add("event.alert")

                if allowed_events:
                    for servicehook_id, events in _get_service_hooks(project_id=event.project_id):
                        if any(e in allowed_events for e in events):
                            process_service_hook.delay(servicehook_id=servicehook_id, event=event)

            from sentry.tasks.sentry_apps import process_resource_change_bound

            if event.get_event_type() == "error" and _should_send_error_created_hooks(
                event.project
            ):
                process_resource_change_bound.delay(
                    action="created", sender="Error", instance_id=event.event_id, instance=event
                )
            if is_new:
                process_resource_change_bound.delay(
                    action="created", sender="Group", instance_id=event.group_id
                )

            from sentry.plugins.base import plugins

            for plugin in plugins.for_project(event.project):
                plugin_post_process_group(
                    plugin_slug=plugin.slug, event=event, is_new=is_new, is_regresion=is_regression
                )

            from sentry import similarity

            safe_execute(similarity.record, event.project, [event], _with_transaction=False)

        # Patch attachments that were ingested on the standalone path.
        update_existing_attachments(event)

        if not is_reprocessed:
            event_processed.send_robust(
                sender=post_process_group,
                project=event.project,
                event=event,
                primary_hash=kwargs.get("primary_hash"),
            )

        with metrics.timer("tasks.post_process.delete_event_cache"):
            event_processing_store.delete_by_key(cache_key)
Ejemplo n.º 22
0
def post_process_group(event, is_new, is_regression, is_sample,
                       is_new_group_environment, **kwargs):
    """
    Fires post processing hooks for a group.
    """
    with snuba.options_override({'consistent': True}):
        if check_event_already_post_processed(event):
            logger.info('post_process.skipped',
                        extra={
                            'project_id': event.project_id,
                            'event_id': event.event_id,
                            'reason': 'duplicate',
                        })
            return

        # NOTE: we must pass through the full Event object, and not an
        # event_id since the Event object may not actually have been stored
        # in the database due to sampling.
        from sentry.models import Project
        from sentry.models.group import get_group_with_redirect
        from sentry.rules.processor import RuleProcessor
        from sentry.tasks.servicehooks import process_service_hook

        # Re-bind node data to avoid renormalization. We only want to
        # renormalize when loading old data from the database.
        event.data = EventDict(event.data, skip_renormalization=True)

        # Re-bind Group since we're pickling the whole Event object
        # which may contain a stale Group.
        event.group, _ = get_group_with_redirect(event.group_id)
        event.group_id = event.group.id

        project_id = event.group.project_id
        with configure_scope() as scope:
            scope.set_tag("project", project_id)

        # Re-bind Project since we're pickling the whole Event object
        # which may contain a stale Project.
        event.project = Project.objects.get_from_cache(id=project_id)

        _capture_stats(event, is_new)

        # we process snoozes before rules as it might create a regression
        has_reappeared = process_snoozes(event.group)

        handle_owner_assignment(event.project, event.group, event)

        rp = RuleProcessor(event, is_new, is_regression,
                           is_new_group_environment, has_reappeared)
        has_alert = False
        # TODO(dcramer): ideally this would fanout, but serializing giant
        # objects back and forth isn't super efficient
        for callback, futures in rp.apply():
            has_alert = True
            safe_execute(callback, event, futures)

        if features.has(
                'projects:servicehooks',
                project=event.project,
        ):
            allowed_events = set(['event.created'])
            if has_alert:
                allowed_events.add('event.alert')

            if allowed_events:
                for servicehook_id, events in _get_service_hooks(
                        project_id=event.project_id):
                    if any(e in allowed_events for e in events):
                        process_service_hook.delay(
                            servicehook_id=servicehook_id,
                            event=event,
                        )

        if event.get_event_type(
        ) == 'error' and _should_send_error_created_hooks(event.project):
            process_resource_change_bound.delay(
                action='created',
                sender='Error',
                instance_id=event.event_id,
                instance=event,
            )
        if is_new:
            process_resource_change_bound.delay(
                action='created',
                sender='Group',
                instance_id=event.group_id,
            )

        for plugin in plugins.for_project(event.project):
            plugin_post_process_group(
                plugin_slug=plugin.slug,
                event=event,
                is_new=is_new,
                is_regresion=is_regression,
                is_sample=is_sample,
            )

        event_processed.send_robust(
            sender=post_process_group,
            project=event.project,
            event=event,
            primary_hash=kwargs.get('primary_hash'),
        )
Ejemplo n.º 23
0
def post_process_group(event, is_new, is_regression, is_new_group_environment,
                       **kwargs):
    """
    Fires post processing hooks for a group.
    """
    set_current_project(event.project_id)

    from sentry.utils import snuba

    with snuba.options_override({"consistent": True}):
        if check_event_already_post_processed(event):
            logger.info(
                "post_process.skipped",
                extra={
                    "project_id": event.project_id,
                    "event_id": event.event_id,
                    "reason": "duplicate",
                },
            )
            return

        # NOTE: we must pass through the full Event object, and not an
        # event_id since the Event object may not actually have been stored
        # in the database due to sampling.
        from sentry.models import Project, Organization, EventDict
        from sentry.models.group import get_group_with_redirect
        from sentry.rules.processor import RuleProcessor
        from sentry.tasks.servicehooks import process_service_hook

        # Re-bind node data to avoid renormalization. We only want to
        # renormalize when loading old data from the database.
        event.data = EventDict(event.data, skip_renormalization=True)

        if event.group_id:
            # Re-bind Group since we're pickling the whole Event object
            # which may contain a stale Project.
            event.group, _ = get_group_with_redirect(event.group_id)
            event.group_id = event.group.id

        # Re-bind Project and Org since we're pickling the whole Event object
        # which may contain stale parent models.
        event.project = Project.objects.get_from_cache(id=event.project_id)
        event.project._organization_cache = Organization.objects.get_from_cache(
            id=event.project.organization_id)
        bind_organization_context(event.project.organization)

        _capture_stats(event, is_new)

        if event.group_id:
            # we process snoozes before rules as it might create a regression
            # but not if it's new because you can't immediately snooze a new group
            has_reappeared = False if is_new else process_snoozes(event.group)

            handle_owner_assignment(event.project, event.group, event)

            rp = RuleProcessor(event, is_new, is_regression,
                               is_new_group_environment, has_reappeared)
            has_alert = False
            # TODO(dcramer): ideally this would fanout, but serializing giant
            # objects back and forth isn't super efficient
            for callback, futures in rp.apply():
                has_alert = True
                with sentry_sdk.start_transaction(op="post_process_group",
                                                  name="rule_processor_apply",
                                                  sampled=True):
                    safe_execute(callback, event, futures)

            if features.has("projects:servicehooks", project=event.project):
                allowed_events = set(["event.created"])
                if has_alert:
                    allowed_events.add("event.alert")

                if allowed_events:
                    for servicehook_id, events in _get_service_hooks(
                            project_id=event.project_id):
                        if any(e in allowed_events for e in events):
                            process_service_hook.delay(
                                servicehook_id=servicehook_id, event=event)

            from sentry.tasks.sentry_apps import process_resource_change_bound

            if event.get_event_type(
            ) == "error" and _should_send_error_created_hooks(event.project):
                process_resource_change_bound.delay(action="created",
                                                    sender="Error",
                                                    instance_id=event.event_id,
                                                    instance=event)
            if is_new:
                process_resource_change_bound.delay(action="created",
                                                    sender="Group",
                                                    instance_id=event.group_id)

            from sentry.plugins.base import plugins

            for plugin in plugins.for_project(event.project):
                plugin_post_process_group(plugin_slug=plugin.slug,
                                          event=event,
                                          is_new=is_new,
                                          is_regresion=is_regression)

        event_processed.send_robust(
            sender=post_process_group,
            project=event.project,
            event=event,
            primary_hash=kwargs.get("primary_hash"),
        )
Ejemplo n.º 24
0
def post_process_group(event, is_new, is_regression, is_sample, is_new_group_environment, **kwargs):
    """
    Fires post processing hooks for a group.
    """
    with snuba.options_override({'consistent': True}):
        if check_event_already_post_processed(event):
            logger.info('post_process.skipped', extra={
                'project_id': event.project_id,
                'event_id': event.event_id,
                'reason': 'duplicate',
            })
            return

        # NOTE: we must pass through the full Event object, and not an
        # event_id since the Event object may not actually have been stored
        # in the database due to sampling.
        from sentry.models import Project
        from sentry.models.group import get_group_with_redirect
        from sentry.rules.processor import RuleProcessor
        from sentry.tasks.servicehooks import process_service_hook

        # Re-bind node data to avoid renormalization. We only want to
        # renormalize when loading old data from the database.
        event.data = EventDict(event.data, skip_renormalization=True)

        # Re-bind Group since we're pickling the whole Event object
        # which may contain a stale Group.
        event.group, _ = get_group_with_redirect(event.group_id)
        event.group_id = event.group.id

        project_id = event.group.project_id
        with configure_scope() as scope:
            scope.set_tag("project", project_id)

        # Re-bind Project since we're pickling the whole Event object
        # which may contain a stale Project.
        event.project = Project.objects.get_from_cache(id=project_id)

        _capture_stats(event, is_new)

        # we process snoozes before rules as it might create a regression
        has_reappeared = process_snoozes(event.group)

        handle_owner_assignment(event.project, event.group, event)

        rp = RuleProcessor(event, is_new, is_regression, is_new_group_environment, has_reappeared)
        has_alert = False
        # TODO(dcramer): ideally this would fanout, but serializing giant
        # objects back and forth isn't super efficient
        for callback, futures in rp.apply():
            has_alert = True
            safe_execute(callback, event, futures)

        if features.has(
            'projects:servicehooks',
            project=event.project,
        ):
            allowed_events = set(['event.created'])
            if has_alert:
                allowed_events.add('event.alert')

            if allowed_events:
                for servicehook_id, events in _get_service_hooks(project_id=event.project_id):
                    if any(e in allowed_events for e in events):
                        process_service_hook.delay(
                            servicehook_id=servicehook_id,
                            event=event,
                        )

        if is_new:
            process_resource_change_bound.delay(
                action='created',
                sender='Group',
                instance_id=event.group_id,
            )

        for plugin in plugins.for_project(event.project):
            plugin_post_process_group(
                plugin_slug=plugin.slug,
                event=event,
                is_new=is_new,
                is_regresion=is_regression,
                is_sample=is_sample,
            )

        event_processed.send_robust(
            sender=post_process_group,
            project=event.project,
            group=event.group,
            event=event,
            primary_hash=kwargs.get('primary_hash'),
        )
Ejemplo n.º 25
0
def post_process_group(event, is_new, is_regression, is_sample,
                       is_new_group_environment, **kwargs):
    """
    Fires post processing hooks for a group.
    """
    with redis.clusters.get('default').map() as client:
        result = client.set(
            u'pp:{}/{}'.format(event.project_id, event.event_id),
            u'{:.0f}'.format(time.time()),
            ex=60 * 60,
            nx=True,
        )

    if not result.value:
        logger.info('post_process.skipped',
                    extra={
                        'project_id': event.project_id,
                        'event_id': event.event_id,
                        'reason': 'duplicate',
                    })
        return

    # NOTE: we must pass through the full Event object, and not an
    # event_id since the Event object may not actually have been stored
    # in the database due to sampling.
    from sentry.models import Project
    from sentry.models.group import get_group_with_redirect
    from sentry.rules.processor import RuleProcessor
    from sentry.tasks.servicehooks import process_service_hook

    # Re-bind Group since we're pickling the whole Event object
    # which may contain a stale Group.
    event.group, _ = get_group_with_redirect(event.group_id)
    event.group_id = event.group.id

    project_id = event.group.project_id
    Raven.tags_context({
        'project': project_id,
    })

    # Re-bind Project since we're pickling the whole Event object
    # which may contain a stale Project.
    event.project = Project.objects.get_from_cache(id=project_id)

    _capture_stats(event, is_new)

    # we process snoozes before rules as it might create a regression
    process_snoozes(event.group)

    rp = RuleProcessor(event, is_new, is_regression, is_new_group_environment)
    has_alert = False
    # TODO(dcramer): ideally this would fanout, but serializing giant
    # objects back and forth isn't super efficient
    for callback, futures in rp.apply():
        has_alert = True
        safe_execute(callback, event, futures)

    if features.has(
            'projects:servicehooks',
            project=event.project,
    ):
        allowed_events = set(['event.created'])
        if has_alert:
            allowed_events.add('event.alert')

        if allowed_events:
            for servicehook_id, events in _get_service_hooks(
                    project_id=event.project_id):
                if any(e in allowed_events for e in events):
                    process_service_hook.delay(
                        servicehook_id=servicehook_id,
                        event=event,
                    )

    for plugin in plugins.for_project(event.project):
        plugin_post_process_group(
            plugin_slug=plugin.slug,
            event=event,
            is_new=is_new,
            is_regresion=is_regression,
            is_sample=is_sample,
        )

    event_processed.send_robust(
        sender=post_process_group,
        project=event.project,
        group=event.group,
        event=event,
        primary_hash=kwargs.get('primary_hash'),
    )
Ejemplo n.º 26
0
def post_process_group(event, is_new, is_regression, is_sample, is_new_group_environment, **kwargs):
    """
    Fires post processing hooks for a group.
    """
    # NOTE: we must pass through the full Event object, and not an
    # event_id since the Event object may not actually have been stored
    # in the database due to sampling.
    from sentry.models import Project
    from sentry.models.group import get_group_with_redirect
    from sentry.rules.processor import RuleProcessor
    from sentry.tasks.servicehooks import process_service_hook

    # Re-bind Group since we're pickling the whole Event object
    # which may contain a stale Group.
    event.group, _ = get_group_with_redirect(event.group_id)
    event.group_id = event.group.id

    project_id = event.group.project_id
    Raven.tags_context({
        'project': project_id,
    })

    # Re-bind Project since we're pickling the whole Event object
    # which may contain a stale Project.
    event.project = Project.objects.get_from_cache(id=project_id)

    _capture_stats(event, is_new)

    # we process snoozes before rules as it might create a regression
    process_snoozes(event.group)

    rp = RuleProcessor(event, is_new, is_regression, is_new_group_environment)
    has_alert = False
    # TODO(dcramer): ideally this would fanout, but serializing giant
    # objects back and forth isn't super efficient
    for callback, futures in rp.apply():
        has_alert = True
        safe_execute(callback, event, futures)

    if features.has(
        'projects:servicehooks',
        project=event.project,
    ):
        allowed_events = set(['event.created'])
        if has_alert:
            allowed_events.add('event.alert')

        if allowed_events:
            for servicehook_id, events in _get_service_hooks(project_id=event.project_id):
                if any(e in allowed_events for e in events):
                    process_service_hook.delay(
                        servicehook_id=servicehook_id,
                        event=event,
                    )

    for plugin in plugins.for_project(event.project):
        plugin_post_process_group(
            plugin_slug=plugin.slug,
            event=event,
            is_new=is_new,
            is_regresion=is_regression,
            is_sample=is_sample,
        )

    event_processed.send_robust(
        sender=post_process_group,
        project=event.project,
        group=event.group,
        event=event,
        primary_hash=kwargs.get('primary_hash'),
    )
Ejemplo n.º 27
0
def post_process_group(is_new,
                       is_regression,
                       is_new_group_environment,
                       cache_key,
                       group_id=None,
                       event=None,
                       **kwargs):
    """
    Fires post processing hooks for a group.
    """
    from sentry.eventstore.models import Event
    from sentry.eventstore.processing import event_processing_store
    from sentry.utils import snuba
    from sentry.reprocessing2 import is_reprocessed_event

    with snuba.options_override({"consistent": True}):
        # We use the data being present/missing in the processing store
        # to ensure that we don't duplicate work should the forwarding consumers
        # need to rewind history.
        #
        # While we always send the cache_key and never send the event parameter now,
        # the code to handle `event` has to stick around for a self-hosted release cycle.
        if cache_key and event is None:
            data = event_processing_store.get(cache_key)
            if not data:
                logger.info(
                    "post_process.skipped",
                    extra={
                        "cache_key": cache_key,
                        "reason": "missing_cache"
                    },
                )
                return
            event = Event(project_id=data["project"],
                          event_id=data["event_id"],
                          group_id=group_id,
                          data=data)
        elif event and check_event_already_post_processed(event):
            if cache_key:
                event_processing_store.delete_by_key(cache_key)
            logger.info(
                "post_process.skipped",
                extra={
                    "reason": "duplicate",
                    "project_id": event.project_id,
                    "event_id": event.event_id,
                },
            )
            return

        if is_reprocessed_event(event.data):
            logger.info(
                "post_process.skipped",
                extra={
                    "project_id": event.project_id,
                    "event_id": event.event_id,
                    "reason": "reprocessed",
                },
            )
            return

        set_current_project(event.project_id)

        # NOTE: we must pass through the full Event object, and not an
        # event_id since the Event object may not actually have been stored
        # in the database due to sampling.
        from sentry.models import Project, Organization, EventDict
        from sentry.models.group import get_group_with_redirect
        from sentry.rules.processor import RuleProcessor
        from sentry.tasks.servicehooks import process_service_hook

        # Re-bind node data to avoid renormalization. We only want to
        # renormalize when loading old data from the database.
        event.data = EventDict(event.data, skip_renormalization=True)

        if event.group_id:
            # Re-bind Group since we're reading the Event object
            # from cache, which may contain a stale group and project
            event.group, _ = get_group_with_redirect(event.group_id)
            event.group_id = event.group.id

        # Re-bind Project and Org since we're reading the Event object
        # from cache which may contain stale parent models.
        event.project = Project.objects.get_from_cache(id=event.project_id)
        event.project._organization_cache = Organization.objects.get_from_cache(
            id=event.project.organization_id)
        bind_organization_context(event.project.organization)

        _capture_stats(event, is_new)

        if event.group_id:
            # we process snoozes before rules as it might create a regression
            # but not if it's new because you can't immediately snooze a new group
            has_reappeared = False if is_new else process_snoozes(event.group)

            handle_owner_assignment(event.project, event.group, event)

            rp = RuleProcessor(event, is_new, is_regression,
                               is_new_group_environment, has_reappeared)
            has_alert = False
            # TODO(dcramer): ideally this would fanout, but serializing giant
            # objects back and forth isn't super efficient
            for callback, futures in rp.apply():
                has_alert = True
                with sentry_sdk.start_transaction(op="post_process_group",
                                                  name="rule_processor_apply",
                                                  sampled=True):
                    safe_execute(callback, event, futures)

            if features.has("projects:servicehooks", project=event.project):
                allowed_events = set(["event.created"])
                if has_alert:
                    allowed_events.add("event.alert")

                if allowed_events:
                    for servicehook_id, events in _get_service_hooks(
                            project_id=event.project_id):
                        if any(e in allowed_events for e in events):
                            process_service_hook.delay(
                                servicehook_id=servicehook_id, event=event)

            from sentry.tasks.sentry_apps import process_resource_change_bound

            if event.get_event_type(
            ) == "error" and _should_send_error_created_hooks(event.project):
                process_resource_change_bound.delay(action="created",
                                                    sender="Error",
                                                    instance_id=event.event_id,
                                                    instance=event)
            if is_new:
                process_resource_change_bound.delay(action="created",
                                                    sender="Group",
                                                    instance_id=event.group_id)

            from sentry.plugins.base import plugins

            for plugin in plugins.for_project(event.project):
                plugin_post_process_group(plugin_slug=plugin.slug,
                                          event=event,
                                          is_new=is_new,
                                          is_regresion=is_regression)

        event_processed.send_robust(
            sender=post_process_group,
            project=event.project,
            event=event,
            primary_hash=kwargs.get("primary_hash"),
        )
        with metrics.timer("tasks.post_process.delete_event_cache"):
            event_processing_store.delete_by_key(cache_key)