def process_pending_incident_snapshots(): """ Processes PendingIncidentSnapshots and creates a snapshot for any snapshot that has passed it's target_run_date. """ from sentry.incidents.logic import create_incident_snapshot batch_size = 50 now = timezone.now() pending_snapshots = PendingIncidentSnapshot.objects.filter( target_run_date__lte=now ).select_related("incident") if not pending_snapshots: return for processed, pending_snapshot in enumerate(pending_snapshots): incident = pending_snapshot.incident if processed > batch_size: process_pending_incident_snapshots.apply_async(countdown=1) break else: try: with transaction.atomic(): if ( incident.status == IncidentStatus.CLOSED.value and not IncidentSnapshot.objects.filter(incident=incident).exists() ): if IncidentProject.objects.filter(incident=incident).exists(): create_incident_snapshot(incident, windowed_stats=True) pending_snapshot.delete() except Exception: logger.exception("An error occurred while taking an incident snapshot")
def process_pending_incident_snapshots(next_id=None): """ Processes PendingIncidentSnapshots and creates a snapshot for any snapshot that has passed it's target_run_date. """ from sentry.incidents.logic import create_incident_snapshot if next_id is None: # When next_id is None we know we just started running the task. Take the count # of total pending snapshots so that we can alert if we notice the queue # constantly growing. metrics.incr( "incidents.pending_snapshots", amount=PendingIncidentSnapshot.objects.count(), sample_rate=1.0, ) now = timezone.now() pending_snapshots = PendingIncidentSnapshot.objects.filter( target_run_date__lte=now) if next_id is not None: pending_snapshots = pending_snapshots.filter(id__lte=next_id) pending_snapshots = pending_snapshots.order_by("-id").select_related( "incident")[:INCIDENT_SNAPSHOT_BATCH_SIZE + 1] if not pending_snapshots: return for processed, pending_snapshot in enumerate(pending_snapshots): incident = pending_snapshot.incident if processed >= INCIDENT_SNAPSHOT_BATCH_SIZE: process_pending_incident_snapshots.apply_async( countdown=1, kwargs={"next_id": pending_snapshot.id}) break else: try: with transaction.atomic(): if (incident.status == IncidentStatus.CLOSED.value and not IncidentSnapshot.objects.filter( incident=incident).exists()): if IncidentProject.objects.filter( incident=incident).exists(): create_incident_snapshot(incident, windowed_stats=True) pending_snapshot.delete() except Exception: logger.exception( "An error occurred while taking an incident snapshot")
def process_pending_incident_snapshots(next_id=None): """ Processes PendingIncidentSnapshots and creates a snapshot for any snapshot that has passed it's target_run_date. """ from sentry.incidents.logic import create_incident_snapshot now = timezone.now() pending_snapshots = PendingIncidentSnapshot.objects.filter( target_run_date__lte=now) if next_id is not None: pending_snapshots = pending_snapshots.filter(id__lte=next_id) pending_snapshots = pending_snapshots.order_by("-id").select_related( "incident")[:INCIDENT_SNAPSHOT_BATCH_SIZE + 1] if not pending_snapshots: return for processed, pending_snapshot in enumerate(pending_snapshots): incident = pending_snapshot.incident if processed >= INCIDENT_SNAPSHOT_BATCH_SIZE: process_pending_incident_snapshots.apply_async( countdown=1, kwargs={"next_id": pending_snapshot.id}) break else: try: with transaction.atomic(): if (incident.status == IncidentStatus.CLOSED.value and not IncidentSnapshot.objects.filter( incident=incident).exists()): if IncidentProject.objects.filter( incident=incident).exists(): create_incident_snapshot(incident, windowed_stats=True) pending_snapshot.delete() except Exception: logger.exception( "An error occurred while taking an incident snapshot")
def test_windowed(self): incident = self.create_incident(self.organization) incident.update(status=IncidentStatus.CLOSED.value) snapshot = create_incident_snapshot(incident, windowed_stats=True) expected_snapshot = create_event_stat_snapshot(incident, windowed_stats=True) assert snapshot.event_stats_snapshot.start == expected_snapshot.start assert snapshot.event_stats_snapshot.end == expected_snapshot.end assert snapshot.event_stats_snapshot.values == expected_snapshot.values assert snapshot.event_stats_snapshot.period == expected_snapshot.period assert snapshot.event_stats_snapshot.date_added == expected_snapshot.date_added aggregates = get_incident_aggregates(incident) assert snapshot.unique_users == aggregates["unique_users"] assert snapshot.total_events == aggregates["count"]
def test(self): incident = self.create_incident(self.organization) incident.update(status=IncidentStatus.CLOSED.value) snapshot = create_incident_snapshot(incident) expected_snapshot = create_event_stat_snapshot( incident, incident.date_started, incident.date_closed, ) assert snapshot.event_stats_snapshot.start == expected_snapshot.start assert snapshot.event_stats_snapshot.end == expected_snapshot.end assert snapshot.event_stats_snapshot.values == expected_snapshot.values assert snapshot.event_stats_snapshot.period == expected_snapshot.period assert snapshot.event_stats_snapshot.date_added == expected_snapshot.date_added aggregates = get_incident_aggregates(incident) assert snapshot.unique_users == aggregates['unique_users'] assert snapshot.total_events == aggregates['count']
def exploding_create_snapshot(*args, **kwargs): if snapshot_calls[0] < 1: snapshot_calls[0] += 1 raise Exception("bad snapshot") return create_incident_snapshot(*args, **kwargs)