Ejemplo n.º 1
0
 def test_override_options(self):
     assert snuba.OVERRIDE_OPTIONS == {}
     with snuba.options_override({'foo': 1}):
         assert snuba.OVERRIDE_OPTIONS == {'foo': 1}
         with snuba.options_override({'foo': 2}):
             assert snuba.OVERRIDE_OPTIONS == {'foo': 2}
         assert snuba.OVERRIDE_OPTIONS == {'foo': 1}
     assert snuba.OVERRIDE_OPTIONS == {}
Ejemplo n.º 2
0
 def test_override_options(self):
     assert snuba.OVERRIDE_OPTIONS == {'consistent': False}
     with snuba.options_override({'foo': 1}):
         assert snuba.OVERRIDE_OPTIONS == {'foo': 1, 'consistent': False}
         with snuba.options_override({'foo': 2}):
             assert snuba.OVERRIDE_OPTIONS == {'foo': 2, 'consistent': False}
         assert snuba.OVERRIDE_OPTIONS == {'foo': 1, 'consistent': False}
     assert snuba.OVERRIDE_OPTIONS == {'consistent': False}
Ejemplo n.º 3
0
 def test_override_options(self):
     assert snuba.OVERRIDE_OPTIONS == {"consistent": False}
     with snuba.options_override({"foo": 1}):
         assert snuba.OVERRIDE_OPTIONS == {"foo": 1, "consistent": False}
         with snuba.options_override({"foo": 2}):
             assert snuba.OVERRIDE_OPTIONS == {"foo": 2, "consistent": False}
         assert snuba.OVERRIDE_OPTIONS == {"foo": 1, "consistent": False}
     assert snuba.OVERRIDE_OPTIONS == {"consistent": False}
Ejemplo n.º 4
0
def deliver_digest(key, schedule_timestamp=None):
    from sentry import digests
    from sentry.mail import mail_adapter

    try:
        project, target_type, target_identifier = split_key(key)
    except Project.DoesNotExist as error:
        logger.info("Cannot deliver digest %r due to error: %s", key, error)
        digests.delete(key)
        return

    minimum_delay = ProjectOption.objects.get_value(
        project, get_option_key("mail", "minimum_delay"))

    with snuba.options_override({"consistent": True}):
        try:
            with digests.digest(key, minimum_delay=minimum_delay) as records:
                digest = build_digest(project, records)
        except InvalidState as error:
            logger.info("Skipped digest delivery: %s", error, exc_info=True)
            return

        if digest:
            mail_adapter.notify_digest(project, digest, target_type,
                                       target_identifier)
Ejemplo n.º 5
0
def deliver_digest(key, schedule_timestamp=None):
    from sentry import digests
    from sentry.models import ProjectOption  # Django 1.9 setup issue
    from sentry.models import Project  # Django 1.9 setup issue

    try:
        plugin, project = split_key(key)
    except Project.DoesNotExist as error:
        logger.info('Cannot deliver digest %r due to error: %s', key, error)
        digests.delete(key)
        return

    minimum_delay = ProjectOption.objects.get_value(
        project, get_option_key(plugin.get_conf_key(), 'minimum_delay'))

    with snuba.options_override({'consistent': True}):
        try:
            with digests.digest(key, minimum_delay=minimum_delay) as records:
                digest = build_digest(project, records)
        except InvalidState as error:
            logger.info('Skipped digest delivery: %s', error, exc_info=True)
            return

        if digest:
            plugin.notify_digest(project, digest)
Ejemplo n.º 6
0
def deliver_digest(key, schedule_timestamp=None):
    from sentry import digests
    from sentry.mail import mail_adapter

    try:
        project, target_type, target_identifier = split_key(key)
    except Project.DoesNotExist as error:
        logger.info(f"Cannot deliver digest {key} due to error: {error}")
        digests.delete(key)
        return

    minimum_delay = ProjectOption.objects.get_value(
        project, get_option_key("mail", "minimum_delay"))

    with snuba.options_override({"consistent": True}):
        try:
            with digests.digest(key, minimum_delay=minimum_delay) as records:
                digest, logs = build_digest(project, records)
        except InvalidState as error:
            logger.info(f"Skipped digest delivery: {error}", exc_info=True)
            return

        if digest:
            mail_adapter.notify_digest(project, digest, target_type,
                                       target_identifier)
        else:
            logger.info(
                "Skipped digest delivery due to empty digest",
                extra={
                    "project": project.id,
                    "target_type": target_type.value,
                    "target_identifier": target_identifier,
                    "build_digest_logs": logs,
                },
            )
Ejemplo n.º 7
0
    def query_hook(self, event, start, end, environment_id):
        project_id = event.project_id
        cache_key = f"r.c.spc:{project_id}-{environment_id}"
        session_count_last_hour = cache.get(cache_key)
        if session_count_last_hour is None:
            filters = {"project_id": [project_id]}
            if environment_id:
                filters["environment"] = [environment_id]
            with options_override({"consistent": False}):
                result_totals = raw_query(
                    selected_columns=["sessions"],
                    rollup=60,
                    dataset=Dataset.Sessions,
                    start=end - timedelta(minutes=60),
                    end=end,
                    filter_keys=filters,
                    groupby=["bucketed_started"],
                    referrer=
                    "rules.conditions.event_frequency.EventFrequencyPercentCondition",
                )

            if result_totals["data"]:
                session_count_last_hour = sum(
                    bucket["sessions"] for bucket in result_totals["data"])
            else:
                session_count_last_hour = False
            cache.set(cache_key, session_count_last_hour, 600)

        if session_count_last_hour >= MIN_SESSIONS_TO_FIRE:
            interval_in_minutes = (percent_intervals[self.get_option(
                "interval")][1].total_seconds() // 60)
            avg_sessions_in_interval = session_count_last_hour / (
                60 / interval_in_minutes)
            issue_count = self.tsdb.get_sums(
                model=self.tsdb.models.group,
                keys=[event.group_id],
                start=start,
                end=end,
                environment_id=environment_id,
                use_cache=True,
            )[event.group_id]
            if issue_count > avg_sessions_in_interval:
                # We want to better understand when and why this is happening, so we're logging it for now
                self.logger.info(
                    "EventFrequencyPercentCondition.query_hook",
                    extra={
                        "issue_count": issue_count,
                        "project_id": project_id,
                        "avg_sessions_in_interval": avg_sessions_in_interval,
                    },
                )
            return 100 * round(issue_count / avg_sessions_in_interval, 4)

        return 0
Ejemplo n.º 8
0
    def query_hook(self, event: Event, start: datetime, end: datetime,
                   environment_id: str) -> int:
        project_id = event.project_id
        cache_key = f"r.c.spc:{project_id}-{environment_id}"
        session_count_last_hour = cache.get(cache_key)
        if session_count_last_hour is None:
            with options_override({"consistent": False}):
                session_count_last_hour = release_health.get_project_sessions_count(  # type: ignore
                    project_id=project_id,
                    environment_id=environment_id,
                    rollup=60,
                    start=end - timedelta(minutes=60),
                    end=end,
                )

            cache.set(cache_key, session_count_last_hour, 600)

        if session_count_last_hour >= MIN_SESSIONS_TO_FIRE:
            interval_in_minutes = (percent_intervals[self.get_option(
                "interval")][1].total_seconds() // 60)
            avg_sessions_in_interval = session_count_last_hour / (
                60 / interval_in_minutes)
            issue_count = self.tsdb.get_sums(
                model=self.tsdb.models.group,
                keys=[event.group_id],
                start=start,
                end=end,
                environment_id=environment_id,
                use_cache=True,
            )[event.group_id]
            if issue_count > avg_sessions_in_interval:
                # We want to better understand when and why this is happening, so we're logging it for now
                self.logger.info(
                    "EventFrequencyPercentCondition.query_hook",
                    extra={
                        "issue_count": issue_count,
                        "project_id": project_id,
                        "avg_sessions_in_interval": avg_sessions_in_interval,
                    },
                )
            percent: int = 100 * round(issue_count / avg_sessions_in_interval,
                                       4)
            return percent

        return 0
Ejemplo n.º 9
0
    def query_hook(self, event, start, end, environment_id):
        project_id = event.project_id
        cache_key = f"r.c.spc:{project_id}-{environment_id}"
        session_count_last_hour = cache.get(cache_key)
        if session_count_last_hour is None:
            filters = {"project_id": [project_id]}
            if environment_id:
                filters["environment"] = [environment_id]
            with options_override({"consistent": False}):
                result_totals = raw_query(
                    selected_columns=["sessions"],
                    rollup=60,
                    dataset=Dataset.Sessions,
                    start=end - timedelta(minutes=60),
                    end=end,
                    filter_keys=filters,
                    groupby=["bucketed_started"],
                    referrer=
                    "rules.conditions.event_frequency.EventFrequencyPercentCondition",
                )

            if result_totals["data"]:
                session_count_last_hour = sum(
                    bucket["sessions"] for bucket in result_totals["data"])
            else:
                session_count_last_hour = False
            cache.set(cache_key, session_count_last_hour, 600)

        if session_count_last_hour:
            interval_in_minutes = (percent_intervals[self.get_option(
                "interval")][1].total_seconds() // 60)
            avg_sessions_in_interval = session_count_last_hour / (
                60 / interval_in_minutes)
            issue_count = self.tsdb.get_sums(
                model=self.tsdb.models.group,
                keys=[event.group_id],
                start=start,
                end=end,
                environment_id=environment_id,
                use_cache=True,
            )[event.group_id]
            return 100 * round(issue_count / avg_sessions_in_interval, 4)

        return 0
Ejemplo n.º 10
0
def deliver_digest(key, schedule_timestamp=None):
    from sentry import digests

    try:
        plugin, project = split_key(key)
    except Project.DoesNotExist as error:
        logger.info('Cannot deliver digest %r due to error: %s', key, error)
        digests.delete(key)
        return

    minimum_delay = ProjectOption.objects.get_value(
        project, get_option_key(plugin.get_conf_key(), 'minimum_delay')
    )

    with snuba.options_override({'consistent': True}):
        try:
            with digests.digest(key, minimum_delay=minimum_delay) as records:
                digest = build_digest(project, records)
        except InvalidState as error:
            logger.info('Skipped digest delivery: %s', error, exc_info=True)
            return

        if digest:
            plugin.notify_digest(project, digest)
Ejemplo n.º 11
0
def post_process_group(
    is_new, is_regression, is_new_group_environment, cache_key, group_id=None, **kwargs
):
    """
    Fires post processing hooks for a group.
    """
    from sentry.eventstore.models import Event
    from sentry.eventstore.processing import event_processing_store
    from sentry.reprocessing2 import is_reprocessed_event
    from sentry.utils import snuba

    with snuba.options_override({"consistent": True}):
        # We use the data being present/missing in the processing store
        # to ensure that we don't duplicate work should the forwarding consumers
        # need to rewind history.
        data = event_processing_store.get(cache_key)
        if not data:
            logger.info(
                "post_process.skipped",
                extra={"cache_key": cache_key, "reason": "missing_cache"},
            )
            return
        event = Event(
            project_id=data["project"], event_id=data["event_id"], group_id=group_id, data=data
        )

        set_current_event_project(event.project_id)

        is_transaction_event = not bool(event.group_id)

        from sentry.models import EventDict, Organization, Project

        # Re-bind node data to avoid renormalization. We only want to
        # renormalize when loading old data from the database.
        event.data = EventDict(event.data, skip_renormalization=True)

        # Re-bind Project and Org since we're reading the Event object
        # from cache which may contain stale parent models.
        event.project = Project.objects.get_from_cache(id=event.project_id)
        event.project.set_cached_field_value(
            "organization", Organization.objects.get_from_cache(id=event.project.organization_id)
        )

        # Simplified post processing for transaction events.
        # This should eventually be completely removed and transactions
        # will not go through any post processing.
        if is_transaction_event:
            transaction_processed.send_robust(
                sender=post_process_group,
                project=event.project,
                event=event,
            )

            event_processing_store.delete_by_key(cache_key)

            return

        is_reprocessed = is_reprocessed_event(event.data)

        # NOTE: we must pass through the full Event object, and not an
        # event_id since the Event object may not actually have been stored
        # in the database due to sampling.
        from sentry.models import Commit, GroupInboxReason
        from sentry.models.group import get_group_with_redirect
        from sentry.models.groupinbox import add_group_to_inbox
        from sentry.rules.processor import RuleProcessor
        from sentry.tasks.groupowner import process_suspect_commits
        from sentry.tasks.servicehooks import process_service_hook

        # Re-bind Group since we're reading the Event object
        # from cache, which may contain a stale group and project
        event.group, _ = get_group_with_redirect(event.group_id)
        event.group_id = event.group.id

        event.group.project = event.project
        event.group.project.set_cached_field_value("organization", event.project.organization)

        bind_organization_context(event.project.organization)

        _capture_stats(event, is_new)

        if is_reprocessed and is_new:
            add_group_to_inbox(event.group, GroupInboxReason.REPROCESSED)

        if not is_reprocessed:
            # we process snoozes before rules as it might create a regression
            # but not if it's new because you can't immediately snooze a new group
            has_reappeared = False if is_new else process_snoozes(event.group)
            if not has_reappeared:  # If true, we added the .UNIGNORED reason already
                if is_new:
                    add_group_to_inbox(event.group, GroupInboxReason.NEW)
                elif is_regression:
                    add_group_to_inbox(event.group, GroupInboxReason.REGRESSION)

            handle_owner_assignment(event.project, event.group, event)

            rp = RuleProcessor(
                event, is_new, is_regression, is_new_group_environment, has_reappeared
            )
            has_alert = False
            # TODO(dcramer): ideally this would fanout, but serializing giant
            # objects back and forth isn't super efficient
            for callback, futures in rp.apply():
                has_alert = True
                safe_execute(callback, event, futures, _with_transaction=False)

            try:
                lock = locks.get(
                    f"w-o:{event.group_id}-d-l",
                    duration=10,
                )
                with lock.acquire():
                    has_commit_key = f"w-o:{event.project.organization_id}-h-c"
                    org_has_commit = cache.get(has_commit_key)
                    if org_has_commit is None:
                        org_has_commit = Commit.objects.filter(
                            organization_id=event.project.organization_id
                        ).exists()
                        cache.set(has_commit_key, org_has_commit, 3600)

                    if org_has_commit:
                        group_cache_key = f"w-o-i:g-{event.group_id}"
                        if cache.get(group_cache_key):
                            metrics.incr(
                                "sentry.tasks.process_suspect_commits.debounce",
                                tags={"detail": "w-o-i:g debounce"},
                            )
                        else:
                            from sentry.utils.committers import get_frame_paths

                            cache.set(group_cache_key, True, 604800)  # 1 week in seconds
                            event_frames = get_frame_paths(event.data)
                            process_suspect_commits.delay(
                                event_id=event.event_id,
                                event_platform=event.platform,
                                event_frames=event_frames,
                                group_id=event.group_id,
                                project_id=event.project_id,
                            )
            except UnableToAcquireLock:
                pass
            except Exception:
                logger.exception("Failed to process suspect commits")

            if features.has("projects:servicehooks", project=event.project):
                allowed_events = {"event.created"}
                if has_alert:
                    allowed_events.add("event.alert")

                if allowed_events:
                    for servicehook_id, events in _get_service_hooks(project_id=event.project_id):
                        if any(e in allowed_events for e in events):
                            process_service_hook.delay(servicehook_id=servicehook_id, event=event)

            from sentry.tasks.sentry_apps import process_resource_change_bound

            if event.get_event_type() == "error" and _should_send_error_created_hooks(
                event.project
            ):
                process_resource_change_bound.delay(
                    action="created", sender="Error", instance_id=event.event_id, instance=event
                )
            if is_new:
                process_resource_change_bound.delay(
                    action="created", sender="Group", instance_id=event.group_id
                )

            from sentry.plugins.base import plugins

            for plugin in plugins.for_project(event.project):
                plugin_post_process_group(
                    plugin_slug=plugin.slug, event=event, is_new=is_new, is_regresion=is_regression
                )

            from sentry import similarity

            safe_execute(similarity.record, event.project, [event], _with_transaction=False)

        # Patch attachments that were ingested on the standalone path.
        update_existing_attachments(event)

        if not is_reprocessed:
            event_processed.send_robust(
                sender=post_process_group,
                project=event.project,
                event=event,
                primary_hash=kwargs.get("primary_hash"),
            )

        with metrics.timer("tasks.post_process.delete_event_cache"):
            event_processing_store.delete_by_key(cache_key)
Ejemplo n.º 12
0
def post_process_group(event, is_new, is_regression, is_sample,
                       is_new_group_environment, **kwargs):
    """
    Fires post processing hooks for a group.
    """
    with snuba.options_override({'consistent': True}):
        if check_event_already_post_processed(event):
            logger.info('post_process.skipped',
                        extra={
                            'project_id': event.project_id,
                            'event_id': event.event_id,
                            'reason': 'duplicate',
                        })
            return

        # NOTE: we must pass through the full Event object, and not an
        # event_id since the Event object may not actually have been stored
        # in the database due to sampling.
        from sentry.models import Project
        from sentry.models.group import get_group_with_redirect
        from sentry.rules.processor import RuleProcessor
        from sentry.tasks.servicehooks import process_service_hook

        # Re-bind node data to avoid renormalization. We only want to
        # renormalize when loading old data from the database.
        event.data = EventDict(event.data, skip_renormalization=True)

        # Re-bind Group since we're pickling the whole Event object
        # which may contain a stale Group.
        event.group, _ = get_group_with_redirect(event.group_id)
        event.group_id = event.group.id

        project_id = event.group.project_id
        with configure_scope() as scope:
            scope.set_tag("project", project_id)

        # Re-bind Project since we're pickling the whole Event object
        # which may contain a stale Project.
        event.project = Project.objects.get_from_cache(id=project_id)

        _capture_stats(event, is_new)

        # we process snoozes before rules as it might create a regression
        has_reappeared = process_snoozes(event.group)

        handle_owner_assignment(event.project, event.group, event)

        rp = RuleProcessor(event, is_new, is_regression,
                           is_new_group_environment, has_reappeared)
        has_alert = False
        # TODO(dcramer): ideally this would fanout, but serializing giant
        # objects back and forth isn't super efficient
        for callback, futures in rp.apply():
            has_alert = True
            safe_execute(callback, event, futures)

        if features.has(
                'projects:servicehooks',
                project=event.project,
        ):
            allowed_events = set(['event.created'])
            if has_alert:
                allowed_events.add('event.alert')

            if allowed_events:
                for servicehook_id, events in _get_service_hooks(
                        project_id=event.project_id):
                    if any(e in allowed_events for e in events):
                        process_service_hook.delay(
                            servicehook_id=servicehook_id,
                            event=event,
                        )

        if event.get_event_type(
        ) == 'error' and _should_send_error_created_hooks(event.project):
            process_resource_change_bound.delay(
                action='created',
                sender='Error',
                instance_id=event.event_id,
                instance=event,
            )
        if is_new:
            process_resource_change_bound.delay(
                action='created',
                sender='Group',
                instance_id=event.group_id,
            )

        for plugin in plugins.for_project(event.project):
            plugin_post_process_group(
                plugin_slug=plugin.slug,
                event=event,
                is_new=is_new,
                is_regresion=is_regression,
                is_sample=is_sample,
            )

        event_processed.send_robust(
            sender=post_process_group,
            project=event.project,
            event=event,
            primary_hash=kwargs.get('primary_hash'),
        )
Ejemplo n.º 13
0
def post_process_group(event, is_new, is_regression, is_new_group_environment,
                       **kwargs):
    """
    Fires post processing hooks for a group.
    """
    set_current_project(event.project_id)

    from sentry.utils import snuba

    with snuba.options_override({"consistent": True}):
        if check_event_already_post_processed(event):
            logger.info(
                "post_process.skipped",
                extra={
                    "project_id": event.project_id,
                    "event_id": event.event_id,
                    "reason": "duplicate",
                },
            )
            return

        # NOTE: we must pass through the full Event object, and not an
        # event_id since the Event object may not actually have been stored
        # in the database due to sampling.
        from sentry.models import Project, Organization, EventDict
        from sentry.models.group import get_group_with_redirect
        from sentry.rules.processor import RuleProcessor
        from sentry.tasks.servicehooks import process_service_hook

        # Re-bind node data to avoid renormalization. We only want to
        # renormalize when loading old data from the database.
        event.data = EventDict(event.data, skip_renormalization=True)

        if event.group_id:
            # Re-bind Group since we're pickling the whole Event object
            # which may contain a stale Project.
            event.group, _ = get_group_with_redirect(event.group_id)
            event.group_id = event.group.id

        # Re-bind Project and Org since we're pickling the whole Event object
        # which may contain stale parent models.
        event.project = Project.objects.get_from_cache(id=event.project_id)
        event.project._organization_cache = Organization.objects.get_from_cache(
            id=event.project.organization_id)
        bind_organization_context(event.project.organization)

        _capture_stats(event, is_new)

        if event.group_id:
            # we process snoozes before rules as it might create a regression
            # but not if it's new because you can't immediately snooze a new group
            has_reappeared = False if is_new else process_snoozes(event.group)

            handle_owner_assignment(event.project, event.group, event)

            rp = RuleProcessor(event, is_new, is_regression,
                               is_new_group_environment, has_reappeared)
            has_alert = False
            # TODO(dcramer): ideally this would fanout, but serializing giant
            # objects back and forth isn't super efficient
            for callback, futures in rp.apply():
                has_alert = True
                with sentry_sdk.start_transaction(op="post_process_group",
                                                  name="rule_processor_apply",
                                                  sampled=True):
                    safe_execute(callback, event, futures)

            if features.has("projects:servicehooks", project=event.project):
                allowed_events = set(["event.created"])
                if has_alert:
                    allowed_events.add("event.alert")

                if allowed_events:
                    for servicehook_id, events in _get_service_hooks(
                            project_id=event.project_id):
                        if any(e in allowed_events for e in events):
                            process_service_hook.delay(
                                servicehook_id=servicehook_id, event=event)

            from sentry.tasks.sentry_apps import process_resource_change_bound

            if event.get_event_type(
            ) == "error" and _should_send_error_created_hooks(event.project):
                process_resource_change_bound.delay(action="created",
                                                    sender="Error",
                                                    instance_id=event.event_id,
                                                    instance=event)
            if is_new:
                process_resource_change_bound.delay(action="created",
                                                    sender="Group",
                                                    instance_id=event.group_id)

            from sentry.plugins.base import plugins

            for plugin in plugins.for_project(event.project):
                plugin_post_process_group(plugin_slug=plugin.slug,
                                          event=event,
                                          is_new=is_new,
                                          is_regresion=is_regression)

        event_processed.send_robust(
            sender=post_process_group,
            project=event.project,
            event=event,
            primary_hash=kwargs.get("primary_hash"),
        )
Ejemplo n.º 14
0
def post_process_group(is_new,
                       is_regression,
                       is_new_group_environment,
                       cache_key,
                       group_id=None,
                       event=None,
                       **kwargs):
    """
    Fires post processing hooks for a group.
    """
    from sentry.eventstore.models import Event
    from sentry.eventstore.processing import event_processing_store
    from sentry.utils import snuba
    from sentry.reprocessing2 import is_reprocessed_event

    with snuba.options_override({"consistent": True}):
        # We use the data being present/missing in the processing store
        # to ensure that we don't duplicate work should the forwarding consumers
        # need to rewind history.
        #
        # While we always send the cache_key and never send the event parameter now,
        # the code to handle `event` has to stick around for a self-hosted release cycle.
        if cache_key and event is None:
            data = event_processing_store.get(cache_key)
            if not data:
                logger.info(
                    "post_process.skipped",
                    extra={
                        "cache_key": cache_key,
                        "reason": "missing_cache"
                    },
                )
                return
            event = Event(project_id=data["project"],
                          event_id=data["event_id"],
                          group_id=group_id,
                          data=data)
        elif event and check_event_already_post_processed(event):
            if cache_key:
                event_processing_store.delete_by_key(cache_key)
            logger.info(
                "post_process.skipped",
                extra={
                    "reason": "duplicate",
                    "project_id": event.project_id,
                    "event_id": event.event_id,
                },
            )
            return

        if is_reprocessed_event(event.data):
            logger.info(
                "post_process.skipped",
                extra={
                    "project_id": event.project_id,
                    "event_id": event.event_id,
                    "reason": "reprocessed",
                },
            )
            return

        set_current_project(event.project_id)

        # NOTE: we must pass through the full Event object, and not an
        # event_id since the Event object may not actually have been stored
        # in the database due to sampling.
        from sentry.models import Project, Organization, EventDict
        from sentry.models.group import get_group_with_redirect
        from sentry.rules.processor import RuleProcessor
        from sentry.tasks.servicehooks import process_service_hook

        # Re-bind node data to avoid renormalization. We only want to
        # renormalize when loading old data from the database.
        event.data = EventDict(event.data, skip_renormalization=True)

        if event.group_id:
            # Re-bind Group since we're reading the Event object
            # from cache, which may contain a stale group and project
            event.group, _ = get_group_with_redirect(event.group_id)
            event.group_id = event.group.id

        # Re-bind Project and Org since we're reading the Event object
        # from cache which may contain stale parent models.
        event.project = Project.objects.get_from_cache(id=event.project_id)
        event.project._organization_cache = Organization.objects.get_from_cache(
            id=event.project.organization_id)
        bind_organization_context(event.project.organization)

        _capture_stats(event, is_new)

        if event.group_id:
            # we process snoozes before rules as it might create a regression
            # but not if it's new because you can't immediately snooze a new group
            has_reappeared = False if is_new else process_snoozes(event.group)

            handle_owner_assignment(event.project, event.group, event)

            rp = RuleProcessor(event, is_new, is_regression,
                               is_new_group_environment, has_reappeared)
            has_alert = False
            # TODO(dcramer): ideally this would fanout, but serializing giant
            # objects back and forth isn't super efficient
            for callback, futures in rp.apply():
                has_alert = True
                with sentry_sdk.start_transaction(op="post_process_group",
                                                  name="rule_processor_apply",
                                                  sampled=True):
                    safe_execute(callback, event, futures)

            if features.has("projects:servicehooks", project=event.project):
                allowed_events = set(["event.created"])
                if has_alert:
                    allowed_events.add("event.alert")

                if allowed_events:
                    for servicehook_id, events in _get_service_hooks(
                            project_id=event.project_id):
                        if any(e in allowed_events for e in events):
                            process_service_hook.delay(
                                servicehook_id=servicehook_id, event=event)

            from sentry.tasks.sentry_apps import process_resource_change_bound

            if event.get_event_type(
            ) == "error" and _should_send_error_created_hooks(event.project):
                process_resource_change_bound.delay(action="created",
                                                    sender="Error",
                                                    instance_id=event.event_id,
                                                    instance=event)
            if is_new:
                process_resource_change_bound.delay(action="created",
                                                    sender="Group",
                                                    instance_id=event.group_id)

            from sentry.plugins.base import plugins

            for plugin in plugins.for_project(event.project):
                plugin_post_process_group(plugin_slug=plugin.slug,
                                          event=event,
                                          is_new=is_new,
                                          is_regresion=is_regression)

        event_processed.send_robust(
            sender=post_process_group,
            project=event.project,
            event=event,
            primary_hash=kwargs.get("primary_hash"),
        )
        with metrics.timer("tasks.post_process.delete_event_cache"):
            event_processing_store.delete_by_key(cache_key)
Ejemplo n.º 15
0
def post_process_group(event, is_new, is_regression, is_sample, is_new_group_environment, **kwargs):
    """
    Fires post processing hooks for a group.
    """
    with snuba.options_override({'consistent': True}):
        if check_event_already_post_processed(event):
            logger.info('post_process.skipped', extra={
                'project_id': event.project_id,
                'event_id': event.event_id,
                'reason': 'duplicate',
            })
            return

        # NOTE: we must pass through the full Event object, and not an
        # event_id since the Event object may not actually have been stored
        # in the database due to sampling.
        from sentry.models import Project
        from sentry.models.group import get_group_with_redirect
        from sentry.rules.processor import RuleProcessor
        from sentry.tasks.servicehooks import process_service_hook

        # Re-bind node data to avoid renormalization. We only want to
        # renormalize when loading old data from the database.
        event.data = EventDict(event.data, skip_renormalization=True)

        # Re-bind Group since we're pickling the whole Event object
        # which may contain a stale Group.
        event.group, _ = get_group_with_redirect(event.group_id)
        event.group_id = event.group.id

        project_id = event.group.project_id
        with configure_scope() as scope:
            scope.set_tag("project", project_id)

        # Re-bind Project since we're pickling the whole Event object
        # which may contain a stale Project.
        event.project = Project.objects.get_from_cache(id=project_id)

        _capture_stats(event, is_new)

        # we process snoozes before rules as it might create a regression
        has_reappeared = process_snoozes(event.group)

        handle_owner_assignment(event.project, event.group, event)

        rp = RuleProcessor(event, is_new, is_regression, is_new_group_environment, has_reappeared)
        has_alert = False
        # TODO(dcramer): ideally this would fanout, but serializing giant
        # objects back and forth isn't super efficient
        for callback, futures in rp.apply():
            has_alert = True
            safe_execute(callback, event, futures)

        if features.has(
            'projects:servicehooks',
            project=event.project,
        ):
            allowed_events = set(['event.created'])
            if has_alert:
                allowed_events.add('event.alert')

            if allowed_events:
                for servicehook_id, events in _get_service_hooks(project_id=event.project_id):
                    if any(e in allowed_events for e in events):
                        process_service_hook.delay(
                            servicehook_id=servicehook_id,
                            event=event,
                        )

        if is_new:
            process_resource_change_bound.delay(
                action='created',
                sender='Group',
                instance_id=event.group_id,
            )

        for plugin in plugins.for_project(event.project):
            plugin_post_process_group(
                plugin_slug=plugin.slug,
                event=event,
                is_new=is_new,
                is_regresion=is_regression,
                is_sample=is_sample,
            )

        event_processed.send_robust(
            sender=post_process_group,
            project=event.project,
            group=event.group,
            event=event,
            primary_hash=kwargs.get('primary_hash'),
        )