Beispiel #1
0
    def test_fingerprint_ignored(self):
        manager1 = EventManager(
            make_event(event_id="a" * 32, fingerprint="fingerprint1"))
        event1 = manager1.save(self.project.id)

        manager2 = EventManager(
            make_event(
                event_id="b" * 32,
                fingerprint="fingerprint1",
                transaction="wait",
                contexts={
                    "trace": {
                        "parent_span_id": "bce14471e0e9654d",
                        "trace_id": "a0fa8803753e40fd8124b21eeb2986b5",
                        "span_id": "bf5be759039ede9a",
                    }
                },
                spans=[],
                start_timestamp="2019-06-14T14:01:40Z",
                type="transaction",
                platform="python",
            ))
        event2 = manager2.save(self.project.id)

        assert event1.group is not None
        assert event2.group is None
        assert (tsdb.get_sums(tsdb.models.project, [self.project.id],
                              event1.datetime,
                              event1.datetime)[self.project.id] == 2)

        assert (tsdb.get_sums(tsdb.models.group, [event1.group.id],
                              event1.datetime,
                              event1.datetime)[event1.group.id] == 1)
Beispiel #2
0
def prepare_project_usage_summary(start__stop, project):
    start, stop = start__stop
    return (
        tsdb.get_sums(
            tsdb.models.project_total_blacklisted, [project.id], start, stop, rollup=60 * 60 * 24
        )[project.id],
        tsdb.get_sums(
            tsdb.models.project_total_rejected, [project.id], start, stop, rollup=60 * 60 * 24
        )[project.id],
    )
Beispiel #3
0
def prepare_project_issue_summaries(interval, project):
    start, stop = interval

    queryset = project.group_set.exclude(status=GroupStatus.IGNORED)

    # Fetch all new issues.
    new_issue_ids = set(
        queryset.filter(first_seen__gte=start,
                        first_seen__lt=stop).values_list("id", flat=True))

    # Fetch all regressions. This is a little weird, since there's no way to
    # tell *when* a group regressed using the Group model. Instead, we query
    # all groups that have been seen in the last week and have ever regressed
    # and query the Activity model to find out if they regressed within the
    # past week. (In theory, the activity table *could* be used to answer this
    # query without the subselect, but there's no suitable indexes to make it's
    # performance predictable.)
    reopened_issue_ids = set(
        Activity.objects.filter(
            group__in=queryset.filter(
                last_seen__gte=start,
                last_seen__lt=stop,
                resolved_at__isnull=
                False,  # signals this has *ever* been resolved
            ),
            type__in=(Activity.SET_REGRESSION, Activity.SET_UNRESOLVED),
            datetime__gte=start,
            datetime__lt=stop,
        ).distinct().values_list("group_id", flat=True))

    rollup = 60 * 60 * 24

    event_counts = tsdb.get_sums(tsdb.models.group,
                                 new_issue_ids | reopened_issue_ids,
                                 start,
                                 stop,
                                 rollup=rollup)

    new_issue_count = sum(event_counts[id] for id in new_issue_ids)
    reopened_issue_count = sum(event_counts[id] for id in reopened_issue_ids)
    existing_issue_count = max(
        tsdb.get_sums(
            tsdb.models.project, [project.id], start, stop,
            rollup=rollup)[project.id] - new_issue_count -
        reopened_issue_count,
        0,
    )

    return [new_issue_count, reopened_issue_count, existing_issue_count]
Beispiel #4
0
    def test_save_issueless_event(self):
        manager = EventManager(
            make_event(
                transaction="wait",
                contexts={
                    "trace": {
                        "parent_span_id": "bce14471e0e9654d",
                        "op": "foobar",
                        "trace_id": "a0fa8803753e40fd8124b21eeb2986b5",
                        "span_id": "bf5be759039ede9a",
                    }
                },
                spans=[],
                timestamp="2019-06-14T14:01:40Z",
                start_timestamp="2019-06-14T14:01:40Z",
                type="transaction",
                platform="python",
            ))

        event = manager.save(self.project.id)

        assert event.group is None
        assert (tsdb.get_sums(tsdb.models.project, [self.project.id],
                              event.datetime,
                              event.datetime)[self.project.id] == 1)
Beispiel #5
0
    def test_save_issueless_event(self):
        manager = EventManager(
            make_event(
                transaction='wait',
                contexts={
                    'trace': {
                        'parent_span_id': 'bce14471e0e9654d',
                        'trace_id': 'a0fa8803753e40fd8124b21eeb2986b5',
                        'span_id': 'bf5be759039ede9a'
                    }
                },
                spans=[],
                start_timestamp='2019-06-14T14:01:40Z',
                type='transaction',
                platform='python',
            ))

        event = manager.save(self.project.id)

        assert event.group is None
        assert tsdb.get_sums(
            tsdb.models.project,
            [self.project.id],
            event.datetime,
            event.datetime,
        )[self.project.id] == 1
Beispiel #6
0
def fetch_state(project, records):
    # This reads a little strange, but remember that records are returned in
    # reverse chronological order, and we query the database in chronological
    # order.
    # NOTE: This doesn't account for any issues that are filtered out later.
    start = records[-1].datetime
    end = records[0].datetime

    groups = Group.objects.in_bulk(record.value.event.group_id
                                   for record in records)
    return {
        "project":
        project,
        "groups":
        groups,
        "rules":
        Rule.objects.in_bulk(
            itertools.chain.from_iterable(record.value.rules
                                          for record in records)),
        "event_counts":
        tsdb.get_sums(tsdb.models.group, groups.keys(), start, end),
        "user_counts":
        tsdb.get_distinct_counts_totals(tsdb.models.users_affected_by_group,
                                        groups.keys(), start, end),
    }
Beispiel #7
0
def prepare_project_issue_list(interval, project):
    start, stop = interval

    queryset = project.group_set.exclude(status=GroupStatus.MUTED)

    issue_ids = set()

    # Fetch all new issues.
    issue_ids.update(
        queryset.filter(
            first_seen__gte=start,
            first_seen__lt=stop,
        ).values_list('id', flat=True)
    )

    # Fetch all regressions. This is a little weird, since there's no way to
    # tell *when* a group regressed using the Group model. Instead, we query
    # all groups that have been seen in the last week and have ever regressed
    # and query the Activity model to find out if they regressed within the
    # past week. (In theory, the activity table *could* be used to answer this
    # query without the subselect, but there's no suitable indexes to make it's
    # performance predictable.)
    issue_ids.update(
        Activity.objects.filter(
            group__in=queryset.filter(
                last_seen__gte=start,
                last_seen__lt=stop,
                resolved_at__isnull=False,  # signals this has *ever* been resolved
            ),
            type__in=(
                Activity.SET_REGRESSION,
                Activity.SET_UNRESOLVED,
            ),
            datetime__gte=start,
            datetime__lt=stop,
        ).distinct().values_list('group_id', flat=True)
    )

    rollup = 60 * 60 * 24

    events = tsdb.get_sums(
        tsdb.models.group,
        issue_ids,
        start,
        stop,
        rollup=rollup,
    )

    users = tsdb.get_distinct_counts_totals(
        tsdb.models.users_affected_by_group,
        issue_ids,
        start,
        stop,
        rollup=rollup,
    )

    return (
        len(issue_ids),
        trim_issue_list([(id, (events[id], users[id])) for id in issue_ids]),
    )
Beispiel #8
0
def prepare_project_issue_list(interval, project):
    start, stop = interval

    queryset = project.group_set.exclude(status=GroupStatus.MUTED)

    issue_ids = set()

    # Fetch all new issues.
    issue_ids.update(
        queryset.filter(
            first_seen__gte=start,
            first_seen__lt=stop,
        ).values_list('id', flat=True))

    # Fetch all regressions. This is a little weird, since there's no way to
    # tell *when* a group regressed using the Group model. Instead, we query
    # all groups that have been seen in the last week and have ever regressed
    # and query the Activity model to find out if they regressed within the
    # past week. (In theory, the activity table *could* be used to answer this
    # query without the subselect, but there's no suitable indexes to make it's
    # performance predictable.)
    issue_ids.update(
        Activity.objects.filter(
            group__in=queryset.filter(
                last_seen__gte=start,
                last_seen__lt=stop,
                resolved_at__isnull=
                False,  # signals this has *ever* been resolved
            ),
            type__in=(
                Activity.SET_REGRESSION,
                Activity.SET_UNRESOLVED,
            ),
            datetime__gte=start,
            datetime__lt=stop,
        ).distinct().values_list('group_id', flat=True))

    rollup = 60 * 60 * 24

    events = tsdb.get_sums(
        tsdb.models.group,
        issue_ids,
        start,
        stop,
        rollup=rollup,
    )

    users = tsdb.get_distinct_counts_totals(
        tsdb.models.users_affected_by_group,
        issue_ids,
        start,
        stop,
        rollup=rollup,
    )

    return (
        len(issue_ids),
        trim_issue_list([(id, (events[id], users[id])) for id in issue_ids]),
    )
Beispiel #9
0
 def get_aggregate_value(start, stop):
     return tsdb.get_sums(
         tsdb.models.project,
         (project.id,),
         start,
         stop,
         rollup=60 * 60 * 24,
     )[project.id]
Beispiel #10
0
def prepare_project_usage_summary(start__stop, project):
    start, stop = start__stop
    return (
        tsdb.get_sums(
            tsdb.models.project_total_blacklisted,
            [project.id],
            start,
            stop,
            rollup=60 * 60 * 24,
        )[project.id], tsdb.get_sums(
            tsdb.models.project_total_rejected,
            [project.id],
            start,
            stop,
            rollup=60 * 60 * 24,
        )[project.id],
    )
Beispiel #11
0
 def get_aggregate_value(start, stop):
     return tsdb.get_sums(
         tsdb.models.project,
         (project.id,),
         start,
         stop,
         rollup=60 * 60 * 24,
     )[project.id]
Beispiel #12
0
def get_event_counts(issue_ids, start, stop, rollup):
    combined = {}

    for chunk in chunked(issue_ids, BATCH_SIZE):
        combined.update(
            tsdb.get_sums(tsdb.models.group, chunk, start, stop,
                          rollup=rollup))

    return combined
Beispiel #13
0
    def test_fingerprint_ignored(self):
        manager1 = EventManager(
            make_event(
                event_id='a' * 32,
                fingerprint='fingerprint1'
            )
        )
        event1 = manager1.save(self.project.id)

        manager2 = EventManager(
            make_event(
                event_id='b' * 32,
                fingerprint='fingerprint1',
                transaction='wait',
                contexts={
                    'trace': {
                        'parent_span_id': 'bce14471e0e9654d',
                        'trace_id': 'a0fa8803753e40fd8124b21eeb2986b5',
                        'span_id': 'bf5be759039ede9a'
                    }
                },
                spans=[],
                start_timestamp='2019-06-14T14:01:40Z',
                type='transaction',
                platform='python',
            )
        )
        event2 = manager2.save(self.project.id)

        assert event1.group is not None
        assert event2.group is None
        assert tsdb.get_sums(
            tsdb.models.project,
            [self.project.id],
            event1.datetime,
            event1.datetime,
        )[self.project.id] == 2

        assert tsdb.get_sums(
            tsdb.models.group,
            [event1.group.id],
            event1.datetime,
            event1.datetime,
        )[event1.group.id] == 1
Beispiel #14
0
def send_beacon():
    """
    Send a Beacon to a remote server operated by the Sentry team.

    See the documentation for more details.
    """
    from sentry import options
    from sentry.models import Organization, Project, Team, User

    if not settings.SENTRY_BEACON:
        logger.info('Not sending beacon (disabled)')
        return

    install_id = options.get('sentry:install-id')
    if not install_id:
        logger.info('Generated installation ID: %s', install_id)
        install_id = sha1(uuid4().hex).hexdigest()
        options.set('sentry:install-id', install_id)

    end = timezone.now()
    events_24h = tsdb.get_sums(
        model=tsdb.models.internal,
        keys=['events.total'],
        start=end - timedelta(hours=24),
        end=end,
    )['events.total']

    payload = {
        'install_id': install_id,
        'version': sentry.get_version(),
        'admin_email': settings.SENTRY_ADMIN_EMAIL,
        'data': {
            # TODO(dcramer): we'd also like to get an idea about the throughput
            # of the system (i.e. events in 24h)
            'users': User.objects.count(),
            'projects': Project.objects.count(),
            'teams': Team.objects.count(),
            'organizations': Organization.objects.count(),
            'events.24h': events_24h,
        }
    }

    # TODO(dcramer): relay the response 'notices' as admin broadcasts
    try:
        request = safe_urlopen(BEACON_URL, json=payload, timeout=5)
        response = safe_urlread(request)
    except Exception:
        logger.warning('Failed sending beacon', exc_info=True)
        return

    data = json.loads(response)
    if 'version' in data:
        options.set('sentry:latest_version', data['version']['stable'])
Beispiel #15
0
def fetch_state(project, records):
    # This reads a little strange, but remember that records are returned in
    # reverse chronological order, and we query the database in chronological
    # order.
    # NOTE: This doesn't account for any issues that are filtered out later.
    start = records[-1].datetime
    end = records[0].datetime

    groups = Group.objects.in_bulk(record.value.event.group_id for record in records)
    return {
        'project': project,
        'groups': groups,
        'rules': Rule.objects.in_bulk(itertools.chain.from_iterable(record.value.rules for record in records)),
        'event_counts': tsdb.get_sums(tsdb.models.group, groups.keys(), start, end),
        'user_counts': tsdb.get_distinct_counts_totals(tsdb.models.users_affected_by_group, groups.keys(), start, end),
    }
Beispiel #16
0
def prepare_project_issue_summaries(interval, project):
    start, stop = interval

    queryset = project.group_set.exclude(status=GroupStatus.IGNORED)

    # Fetch all new issues.
    new_issue_ids = set(
        queryset.filter(
            first_seen__gte=start,
            first_seen__lt=stop,
        ).values_list('id', flat=True)
    )

    # Fetch all regressions. This is a little weird, since there's no way to
    # tell *when* a group regressed using the Group model. Instead, we query
    # all groups that have been seen in the last week and have ever regressed
    # and query the Activity model to find out if they regressed within the
    # past week. (In theory, the activity table *could* be used to answer this
    # query without the subselect, but there's no suitable indexes to make it's
    # performance predictable.)
    reopened_issue_ids = set(
        Activity.objects.filter(
            group__in=queryset.filter(
                last_seen__gte=start,
                last_seen__lt=stop,
                resolved_at__isnull=False,  # signals this has *ever* been resolved
            ),
            type__in=(Activity.SET_REGRESSION, Activity.SET_UNRESOLVED, ),
            datetime__gte=start,
            datetime__lt=stop,
        ).distinct().values_list('group_id', flat=True)
    )

    rollup = 60 * 60 * 24

    event_counts = tsdb.get_sums(
        tsdb.models.group,
        new_issue_ids | reopened_issue_ids,
        start,
        stop,
        rollup=rollup,
    )

    new_issue_count = sum(event_counts[id] for id in new_issue_ids)
    reopened_issue_count = sum(event_counts[id] for id in reopened_issue_ids)
    existing_issue_count = max(
        tsdb.get_sums(
            tsdb.models.project,
            [project.id],
            start,
            stop,
            rollup=rollup,
        )[project.id] - new_issue_count - reopened_issue_count,
        0,
    )

    return [
        new_issue_count,
        reopened_issue_count,
        existing_issue_count,
    ]
Beispiel #17
0
        0,
    )

    return [
        new_issue_count,
        reopened_issue_count,
        existing_issue_count,
    ]


def prepare_project_usage_summary((start, stop), project):
    return (
        tsdb.get_sums(
            tsdb.models.project_total_blacklisted,
            [project.id],
            start,
            stop,
            rollup=60 * 60 * 24,
        )[project.id],
        tsdb.get_sums(
            tsdb.models.project_total_rejected,
            [project.id],
            start,
            stop,
            rollup=60 * 60 * 24,
        )[project.id],
    )


def get_calendar_range((_, stop_time), months):
    assert (
Beispiel #18
0
 def query(model, key, **kwargs):
     return tsdb.get_sums(model, [key], event.datetime, event.datetime, **kwargs)[key]
Beispiel #19
0
        reverse=True,
    )[:5]


def prepare_project_release_list((start, stop), project):
    return trim_release_list(
        filter(
            lambda item: item[1] > 0,
            tsdb.get_sums(
                tsdb.models.release,
                Release.objects.filter(
                    project=project,
                    version__in=TagValue.objects.filter(
                        project=project,
                        key='sentry:release',
                        last_seen__gte=
                        start,  # lack of upper bound is intentional
                    ).values_list('value', flat=True),
                ).values_list('id', flat=True),
                start,
                stop,
                rollup=60 * 60 * 24,
            ).items(),
        ))


def prepare_project_usage_summary((start, stop), project):
    return (
        tsdb.get_sums(
            tsdb.models.project_total_blacklisted,
            [project.id],
Beispiel #20
0
def send_beacon():
    """
    Send a Beacon to a remote server operated by the Sentry team.

    See the documentation for more details.
    """
    from sentry import options
    from sentry.models import Broadcast, Organization, Project, Team, User

    install_id = options.get('sentry:install-id')
    if not install_id:
        install_id = sha1(uuid4().bytes).hexdigest()
        logger.info('beacon.generated-install-id',
                    extra={'install_id': install_id})
        options.set('sentry:install-id', install_id)

    if not settings.SENTRY_BEACON:
        logger.info('beacon.skipped',
                    extra={
                        'install_id': install_id,
                        'reason': 'disabled'
                    })
        return

    if settings.DEBUG:
        logger.info('beacon.skipped',
                    extra={
                        'install_id': install_id,
                        'reason': 'debug'
                    })
        return

    end = timezone.now()
    events_24h = tsdb.get_sums(
        model=tsdb.models.internal,
        keys=['events.total'],
        start=end - timedelta(hours=24),
        end=end,
    )['events.total']

    # we need this to be explicitly configured and it defaults to None,
    # which is the same as False
    anonymous = options.get('beacon.anonymous') is not False

    payload = {
        'install_id': install_id,
        'version': sentry.get_version(),
        'docker': sentry.is_docker(),
        'data': {
            # TODO(dcramer): we'd also like to get an idea about the throughput
            # of the system (i.e. events in 24h)
            'users': User.objects.count(),
            'projects': Project.objects.count(),
            'teams': Team.objects.count(),
            'organizations': Organization.objects.count(),
            'events.24h': events_24h,
        },
        'packages': get_all_package_versions(),
        'anonymous': anonymous,
    }

    if not anonymous:
        payload['admin_email'] = options.get('system.admin-email')

    # TODO(dcramer): relay the response 'notices' as admin broadcasts
    try:
        request = safe_urlopen(BEACON_URL, json=payload, timeout=5)
        response = safe_urlread(request)
    except Exception:
        logger.warning('beacon.failed',
                       exc_info=True,
                       extra={'install_id': install_id})
        return
    else:
        logger.info('beacon.sent', extra={'install_id': install_id})

    data = json.loads(response)

    if 'version' in data:
        options.set('sentry:latest_version', data['version']['stable'])

    if 'notices' in data:
        upstream_ids = set()
        for notice in data['notices']:
            upstream_ids.add(notice['id'])
            Broadcast.objects.create_or_update(upstream_id=notice['id'],
                                               defaults={
                                                   'title': notice['title'],
                                                   'link': notice.get('link'),
                                                   'message':
                                                   notice['message'],
                                               })

        Broadcast.objects.filter(upstream_id__isnull=False, ).exclude(
            upstream_id__in=upstream_ids, ).update(is_active=False, )
Beispiel #21
0
def send_beacon():
    """
    Send a Beacon to a remote server operated by the Sentry team.

    See the documentation for more details.
    """
    from sentry import options
    from sentry.models import Broadcast, Organization, Project, Team, User

    install_id = options.get('sentry:install-id')
    if not install_id:
        install_id = sha1(uuid4().bytes).hexdigest()
        logger.info('beacon.generated-install-id', extra={'install_id': install_id})
        options.set('sentry:install-id', install_id)

    if not settings.SENTRY_BEACON:
        logger.info('beacon.skipped', extra={'install_id': install_id, 'reason': 'disabled'})
        return

    if settings.DEBUG:
        logger.info('beacon.skipped', extra={'install_id': install_id, 'reason': 'debug'})
        return

    end = timezone.now()
    events_24h = tsdb.get_sums(
        model=tsdb.models.internal,
        keys=['events.total'],
        start=end - timedelta(hours=24),
        end=end,
    )['events.total']

    # we need this to be explicitly configured and it defaults to None,
    # which is the same as False
    anonymous = options.get('beacon.anonymous') is not False

    payload = {
        'install_id': install_id,
        'version': sentry.get_version(),
        'docker': sentry.is_docker(),
        'data': {
            # TODO(dcramer): we'd also like to get an idea about the throughput
            # of the system (i.e. events in 24h)
            'users': User.objects.count(),
            'projects': Project.objects.count(),
            'teams': Team.objects.count(),
            'organizations': Organization.objects.count(),
            'events.24h': events_24h,
        },
        'packages': get_all_package_versions(),
        'anonymous': anonymous,
    }

    if not anonymous:
        payload['admin_email'] = options.get('system.admin-email')

    # TODO(dcramer): relay the response 'notices' as admin broadcasts
    try:
        request = safe_urlopen(BEACON_URL, json=payload, timeout=5)
        response = safe_urlread(request)
    except Exception:
        logger.warning('beacon.failed', exc_info=True, extra={'install_id': install_id})
        return
    else:
        logger.info('beacon.sent', extra={'install_id': install_id})

    data = json.loads(response)

    if 'version' in data:
        options.set('sentry:latest_version', data['version']['stable'])

    if 'notices' in data:
        upstream_ids = set()
        for notice in data['notices']:
            upstream_ids.add(notice['id'])
            Broadcast.objects.create_or_update(
                upstream_id=notice['id'],
                defaults={
                    'title': notice['title'],
                    'link': notice.get('link'),
                    'message': notice['message'],
                }
            )

        Broadcast.objects.filter(
            upstream_id__isnull=False,
        ).exclude(
            upstream_id__in=upstream_ids,
        ).update(
            is_active=False,
        )
Beispiel #22
0
        0,
    )

    return [
        new_issue_count,
        reopened_issue_count,
        existing_issue_count,
    ]


def prepare_project_usage_summary((start, stop), project):
    return (
        tsdb.get_sums(
            tsdb.models.project_total_blacklisted,
            [project.id],
            start,
            stop,
            rollup=60 * 60 * 24,
        )[project.id],
        tsdb.get_sums(
            tsdb.models.project_total_rejected,
            [project.id],
            start,
            stop,
            rollup=60 * 60 * 24,
        )[project.id],
    )


def get_calendar_range((_, stop_time), months):
    assert (
Beispiel #23
0
def send_beacon():
    """
    Send a Beacon to a remote server operated by the Sentry team.

    See the documentation for more details.
    """
    from sentry import options
    from sentry.models import Broadcast, Organization, Project, Team, User

    if not settings.SENTRY_BEACON:
        logger.info('Not sending beacon (disabled)')
        return

    install_id = options.get('sentry:install-id')
    if not install_id:
        logger.info('Generated installation ID: %s', install_id)
        install_id = sha1(uuid4().hex).hexdigest()
        options.set('sentry:install-id', install_id)

    end = timezone.now()
    events_24h = tsdb.get_sums(
        model=tsdb.models.internal,
        keys=['events.total'],
        start=end - timedelta(hours=24),
        end=end,
    )['events.total']

    payload = {
        'install_id': install_id,
        'version': sentry.get_version(),
        'admin_email': options.get('system.admin-email'),
        'data': {
            # TODO(dcramer): we'd also like to get an idea about the throughput
            # of the system (i.e. events in 24h)
            'users': User.objects.count(),
            'projects': Project.objects.count(),
            'teams': Team.objects.count(),
            'organizations': Organization.objects.count(),
            'events.24h': events_24h,
        },
        'packages': get_all_package_versions(),
    }

    # TODO(dcramer): relay the response 'notices' as admin broadcasts
    try:
        request = safe_urlopen(BEACON_URL, json=payload, timeout=5)
        response = safe_urlread(request)
    except Exception:
        logger.warning('Failed sending beacon', exc_info=True)
        return

    data = json.loads(response)

    if 'version' in data:
        options.set('sentry:latest_version', data['version']['stable'])

    if 'notices' in data:
        upstream_ids = set()
        for notice in data['notices']:
            upstream_ids.add(notice['id'])
            Broadcast.objects.create_or_update(upstream_id=notice['id'],
                                               defaults={
                                                   'title': notice['title'],
                                                   'link': notice.get('link'),
                                                   'message':
                                                   notice['message'],
                                               })

        Broadcast.objects.filter(upstream_id__isnull=False, ).exclude(
            upstream_id__in=upstream_ids, ).update(is_active=False, )
Beispiel #24
0
def send_beacon():
    """
    Send a Beacon to a remote server operated by the Sentry team.

    See the documentation for more details.
    """
    from sentry import options
    from sentry.models import Broadcast, Organization, Project, Team, User

    if not settings.SENTRY_BEACON:
        logger.info('Not sending beacon (disabled)')
        return

    install_id = options.get('sentry:install-id')
    if not install_id:
        logger.info('Generated installation ID: %s', install_id)
        install_id = sha1(uuid4().hex).hexdigest()
        options.set('sentry:install-id', install_id)

    end = timezone.now()
    events_24h = tsdb.get_sums(
        model=tsdb.models.internal,
        keys=['events.total'],
        start=end - timedelta(hours=24),
        end=end,
    )['events.total']

    payload = {
        'install_id': install_id,
        'version': sentry.get_version(),
        'admin_email': options.get('system.admin-email'),
        'data': {
            # TODO(dcramer): we'd also like to get an idea about the throughput
            # of the system (i.e. events in 24h)
            'users': User.objects.count(),
            'projects': Project.objects.count(),
            'teams': Team.objects.count(),
            'organizations': Organization.objects.count(),
            'events.24h': events_24h,
        },
        'packages': get_all_package_versions(),
    }

    # TODO(dcramer): relay the response 'notices' as admin broadcasts
    try:
        request = safe_urlopen(BEACON_URL, json=payload, timeout=5)
        response = safe_urlread(request)
    except Exception:
        logger.warning('Failed sending beacon', exc_info=True)
        return

    data = json.loads(response)

    if 'version' in data:
        options.set('sentry:latest_version', data['version']['stable'])

    if 'notices' in data:
        upstream_ids = set()
        for notice in data['notices']:
            upstream_ids.add(notice['id'])
            Broadcast.objects.create_or_update(
                upstream_id=notice['id'],
                defaults={
                    'title': notice['title'],
                    'link': notice.get('link'),
                    'message': notice['message'],
                }
            )

        Broadcast.objects.filter(
            upstream_id__isnull=False,
        ).exclude(
            upstream_id__in=upstream_ids,
        ).update(
            is_active=False,
        )
Beispiel #25
0
def send_beacon():
    """
    Send a Beacon to a remote server operated by the Sentry team.

    See the documentation for more details.
    """
    from sentry import options
    from sentry.models import Broadcast, Organization, Project, Team, User

    install_id = options.get("sentry:install-id")
    if not install_id:
        install_id = sha1(uuid4().bytes).hexdigest()
        logger.info("beacon.generated-install-id",
                    extra={"install_id": install_id})
        options.set("sentry:install-id", install_id)

    if not settings.SENTRY_BEACON:
        logger.info("beacon.skipped",
                    extra={
                        "install_id": install_id,
                        "reason": "disabled"
                    })
        return

    if settings.DEBUG:
        logger.info("beacon.skipped",
                    extra={
                        "install_id": install_id,
                        "reason": "debug"
                    })
        return

    end = timezone.now()
    events_24h = tsdb.get_sums(model=tsdb.models.internal,
                               keys=["events.total"],
                               start=end - timedelta(hours=24),
                               end=end)["events.total"]

    # we need this to be explicitly configured and it defaults to None,
    # which is the same as False
    anonymous = options.get("beacon.anonymous") is not False

    payload = {
        "install_id": install_id,
        "version": sentry.get_version(),
        "docker": sentry.is_docker(),
        "python_version": platform.python_version(),
        "data": {
            "users": User.objects.count(),
            "projects": Project.objects.count(),
            "teams": Team.objects.count(),
            "organizations": Organization.objects.count(),
            "events.24h": events_24h,
        },
        "packages": get_all_package_versions(),
        "anonymous": anonymous,
    }

    if not anonymous:
        payload["admin_email"] = options.get("system.admin-email")

    # TODO(dcramer): relay the response 'notices' as admin broadcasts
    try:
        request = safe_urlopen(BEACON_URL, json=payload, timeout=5)
        response = safe_urlread(request)
    except Exception:
        logger.warning("beacon.failed",
                       exc_info=True,
                       extra={"install_id": install_id})
        return
    else:
        logger.info("beacon.sent", extra={"install_id": install_id})

    data = json.loads(response)

    if "version" in data:
        options.set("sentry:latest_version", data["version"]["stable"])

    if "notices" in data:
        upstream_ids = set()
        for notice in data["notices"]:
            upstream_ids.add(notice["id"])
            defaults = {
                "title": notice["title"],
                "link": notice.get("link"),
                "message": notice["message"],
            }
            # XXX(dcramer): we're missing a unique constraint on upstream_id
            # so we're using a lock to work around that. In the future we'd like
            # to have a data migration to clean up the duplicates and add the constraint
            lock = locks.get("broadcasts:{}".format(notice["id"]), duration=60)
            with lock.acquire():
                affected = Broadcast.objects.filter(
                    upstream_id=notice["id"]).update(**defaults)
                if not affected:
                    Broadcast.objects.create(upstream_id=notice["id"],
                                             **defaults)

        Broadcast.objects.filter(upstream_id__isnull=False).exclude(
            upstream_id__in=upstream_ids).update(is_active=False)
Beispiel #26
0
 def query(model, key, **kwargs):
     return tsdb.get_sums(model, [key], event.datetime, event.datetime, **kwargs)[key]
Beispiel #27
0
        key=lambda (id, count): count,
        reverse=True,
    )[:5]


def prepare_project_release_list((start, stop), project):
    return trim_release_list(
        filter(
            lambda item: item[1] > 0,
            tsdb.get_sums(
                tsdb.models.release,
                Release.objects.filter(
                    project=project,
                    version__in=TagValue.objects.filter(
                        project=project,
                        key='sentry:release',
                        last_seen__gte=start,  # lack of upper bound is intentional
                    ).values_list('value', flat=True),
                ).values_list('id', flat=True),
                start,
                stop,
                rollup=60 * 60 * 24,
            ).items(),
        )
    )


def prepare_project_usage_summary((start, stop), project):
    return (
        tsdb.get_sums(
            tsdb.models.project_total_blacklisted,
            [project.id],