def test_normal(self): cluster_name = settings.KAFKA_TOPICS[self.topic]["cluster"] conf = { "bootstrap.servers": settings.KAFKA_CLUSTERS[cluster_name]["bootstrap.servers"], "session.timeout.ms": 6000, } producer = Producer(conf) producer.produce(self.topic, json.dumps(self.valid_wrapper)) producer.flush() mock_callback = Mock() mock_callback.side_effect = KeyboardInterrupt() register_subscriber(self.registration_key)(mock_callback) sub = self.create_subscription() consumer = QuerySubscriptionConsumer("hi", topic=self.topic, commit_batch_size=1) consumer.run() payload = self.valid_payload payload["values"] = payload["result"] payload["timestamp"] = parse_date( payload["timestamp"]).replace(tzinfo=pytz.utc) mock_callback.assert_called_once_with(payload, sub)
def test_normal(self): cluster_name = settings.KAFKA_TOPICS[self.topic]["cluster"] conf = { "bootstrap.servers": settings.KAFKA_CLUSTERS[cluster_name]["bootstrap.servers"], "session.timeout.ms": 6000, } producer = Producer(conf) producer.produce(self.topic, json.dumps(self.valid_wrapper)) producer.flush() mock_callback = Mock() mock_callback.side_effect = KeyboardInterrupt() register_subscriber(self.registration_key)(mock_callback) sub = QuerySubscription.objects.create( project=self.project, type=self.registration_key, subscription_id=self.subscription_id, dataset="something", query="hello", aggregations=[], time_window=1, resolution=1, ) consumer = QuerySubscriptionConsumer("hi", topic=self.topic, commit_batch_size=1) consumer.run() mock_callback.assert_called_once_with(self.valid_payload, sub)
def test(self): # Full integration test to ensure that when a subscription receives an update # the `QuerySubscriptionConsumer` successfully retries the subscription and # calls the correct callback, which should result in an incident being created. callback = subscriber_registry[INCIDENTS_SNUBA_SUBSCRIPTION_TYPE] def exception_callback(*args, **kwargs): # We want to just error after the callback so that we can see the result of # processing. This means the offset won't be committed, but that's fine, we # can still check the results. callback(*args, **kwargs) raise KeyboardInterrupt() value_name = query_aggregation_to_snuba[QueryAggregations( self.subscription.aggregation)][2] subscriber_registry[ INCIDENTS_SNUBA_SUBSCRIPTION_TYPE] = exception_callback message = { "version": 1, "payload": { "subscription_id": self.subscription.subscription_id, "values": { value_name: self.trigger.alert_threshold + 1 }, "timestamp": 1235, "interval": 5, "partition": 50, "offset": 10, }, } self.producer.produce(self.topic, json.dumps(message)) self.producer.flush() def active_incident(): return Incident.objects.filter( type=IncidentType.ALERT_TRIGGERED.value, status=IncidentStatus.OPEN.value, alert_rule=self.rule, ) consumer = QuerySubscriptionConsumer("hi", topic=self.topic) with self.assertChanges(lambda: active_incident().exists(), before=False, after=True), self.tasks(): consumer.run() assert len(mail.outbox) == 1 handler = EmailActionHandler(self.action, active_incident().get(), self.project) message = handler.build_message( handler.generate_email_context(TriggerStatus.ACTIVE), TriggerStatus.ACTIVE, self.user.id) out = mail.outbox[0] assert out.to == [self.user.email] assert out.subject == message.subject built_message = message.build(self.user.email) assert out.body == built_message.body
def test(self): # Full integration test to ensure that when a subscription receives an update # the `QuerySubscriptionConsumer` successfully retries the subscription and # calls the correct callback, which should result in an incident being created. message = { "version": 1, "payload": { "subscription_id": self.subscription.subscription_id, "values": {"data": [{"some_col": self.trigger.alert_threshold + 1}]}, "timestamp": "2020-01-01T01:23:45.1234", }, } self.producer.produce(self.topic, json.dumps(message)) self.producer.flush() def active_incident(): return Incident.objects.filter( type=IncidentType.ALERT_TRIGGERED.value, alert_rule=self.rule ).exclude(status=IncidentStatus.CLOSED.value) consumer = QuerySubscriptionConsumer("hi", topic=self.topic) original_callback = subscriber_registry[INCIDENTS_SNUBA_SUBSCRIPTION_TYPE] def shutdown_callback(*args, **kwargs): # We want to just exit after the callback so that we can see the result of # processing. original_callback(*args, **kwargs) consumer.shutdown() subscriber_registry[INCIDENTS_SNUBA_SUBSCRIPTION_TYPE] = shutdown_callback with self.feature(["organizations:incidents", "organizations:performance-view"]): with self.assertChanges( lambda: active_incident().exists(), before=False, after=True ), self.tasks(), self.capture_on_commit_callbacks(execute=True): consumer.run() assert len(mail.outbox) == 1 handler = EmailActionHandler(self.action, active_incident().get(), self.project) message = handler.build_message( generate_incident_trigger_email_context( handler.project, handler.incident, handler.action.alert_rule_trigger, TriggerStatus.ACTIVE, ), TriggerStatus.ACTIVE, self.user.id, ) out = mail.outbox[0] assert out.to == [self.user.email] assert out.subject == message.subject built_message = message.build(self.user.email) assert out.body == built_message.body
def test_shutdown(self): self.producer.produce(self.topic, json.dumps(self.valid_wrapper)) valid_wrapper_2 = deepcopy(self.valid_wrapper) valid_wrapper_2["payload"]["values"]["hello"] = 25 valid_wrapper_3 = deepcopy(valid_wrapper_2) valid_wrapper_3["payload"]["values"]["hello"] = 5000 self.producer.produce(self.topic, json.dumps(valid_wrapper_2)) self.producer.flush() counts = [0] def mock_callback(*args, **kwargs): counts[0] += 1 if counts[0] > 1: raise KeyboardInterrupt() mock = Mock() mock.side_effect = mock_callback register_subscriber(self.registration_key)(mock) sub = QuerySubscription.objects.create( project=self.project, type=self.registration_key, subscription_id=self.subscription_id, dataset="something", query="hello", aggregation=0, time_window=1, resolution=1, ) consumer = QuerySubscriptionConsumer("hi", topic=self.topic, commit_batch_size=100) consumer.run() valid_payload = self.valid_payload valid_payload["timestamp"] = parse_date( valid_payload["timestamp"]).replace(tzinfo=pytz.utc) valid_wrapper_2["payload"]["timestamp"] = parse_date( valid_wrapper_2["payload"]["timestamp"]).replace(tzinfo=pytz.utc) mock.assert_has_calls( [call(valid_payload, sub), call(valid_wrapper_2["payload"], sub)]) # Offset should be committed for the first message, so second run should process # the second message again self.producer.produce(self.topic, json.dumps(valid_wrapper_3)) self.producer.flush() mock.reset_mock() counts[0] = 0 consumer.run() valid_wrapper_3["payload"]["timestamp"] = parse_date( valid_wrapper_3["payload"]["timestamp"]).replace(tzinfo=pytz.utc) mock.assert_has_calls([ call(valid_wrapper_2["payload"], sub), call(valid_wrapper_3["payload"], sub) ])
def test_shutdown(self): valid_wrapper_2 = deepcopy(self.valid_wrapper) valid_wrapper_2["payload"]["result"]["hello"] = 25 valid_wrapper_3 = deepcopy(self.valid_wrapper) valid_wrapper_3["payload"]["result"]["hello"] = 5000 self.producer.produce(self.topic, json.dumps(self.valid_wrapper)) self.producer.produce(self.topic, json.dumps(valid_wrapper_2)) self.producer.produce(self.topic, json.dumps(valid_wrapper_3)) self.producer.flush() def normalize_payload(payload): return { **payload, "values": payload["result"], "timestamp": parse_date(payload["timestamp"]).replace(tzinfo=pytz.utc), } consumer = QuerySubscriptionConsumer("hi", topic=self.topic, commit_batch_size=100) def mock_callback(*args, **kwargs): if mock.call_count >= len(expected_calls): consumer.shutdown() mock = Mock(side_effect=mock_callback) register_subscriber(self.registration_key)(mock) sub = self.create_subscription() expected_calls = [ call(normalize_payload(self.valid_payload), sub), call(normalize_payload(valid_wrapper_2["payload"]), sub), ] consumer.run() mock.assert_has_calls(expected_calls) expected_calls = [ call(normalize_payload(valid_wrapper_3["payload"]), sub) ] mock.reset_mock() consumer.run() mock.assert_has_calls(expected_calls)
def test(self): # Full integration test to ensure that when a subscription receives an update # the `QuerySubscriptionConsumer` successfully retries the subscription and # calls the correct callback, which should result in an incident being created. callback = subscriber_registry[INCIDENTS_SNUBA_SUBSCRIPTION_TYPE] def exception_callback(*args, **kwargs): # We want to just error after the callback so that we can see the result of # processing. This means the offset won't be committed, but that's fine, we # can still check the results. callback(*args, **kwargs) raise KeyboardInterrupt() value_name = query_aggregation_to_snuba[QueryAggregations( self.subscription.aggregation)][2] subscriber_registry[ INCIDENTS_SNUBA_SUBSCRIPTION_TYPE] = exception_callback message = { "version": 1, "payload": { "subscription_id": self.subscription.subscription_id, "values": { value_name: self.trigger.alert_threshold + 1 }, "timestamp": 1235, "interval": 5, "partition": 50, "offset": 10, }, } self.producer.produce(self.topic, json.dumps(message)) self.producer.flush() def active_incident_exists(): return Incident.objects.filter( type=IncidentType.ALERT_TRIGGERED.value, status=IncidentStatus.OPEN.value, alert_rule=self.rule, ).exists() consumer = QuerySubscriptionConsumer("hi", topic=self.topic) with self.assertChanges(active_incident_exists, before=False, after=True), self.tasks(): # TODO: Need to check that the email gets sent once we hook that up consumer.run()
def query_subscription_consumer(**options): from sentry.snuba.query_subscription_consumer import QuerySubscriptionConsumer subscriber = QuerySubscriptionConsumer( group_id=options["group"], topic=options["topic"], commit_batch_size=options["commit_batch_size"], initial_offset_reset=options["initial_offset_reset"], ) def handler(signum, frame): subscriber.shutdown() signal.signal(signal.SIGINT, handler) subscriber.run()
def test_batch_timeout(self, commit_offset_mock): self.producer.produce(self.topic, json.dumps(self.valid_wrapper)) self.producer.flush() consumer = QuerySubscriptionConsumer("hi", topic=self.topic, commit_batch_size=100, commit_batch_timeout_ms=1) def mock_callback(*args, **kwargs): time.sleep(0.1) consumer.shutdown() mock = Mock(side_effect=mock_callback) register_subscriber(self.registration_key)(mock) self.create_subscription() consumer.run() # Once on revoke, once on shutdown, and once due to batch timeout assert len(commit_offset_mock.call_args_list) == 3