def test_no_metrics_not_in_lpq(self, store) -> None:
        assert store.get_lpq_projects() == set()

        with TaskRunner():
            _scan_for_suspect_projects()

        assert store.get_lpq_projects() == set()
Beispiel #2
0
def test_submit_symbolicate_queue_switch(default_project,
                                         mock_should_demote_symbolication,
                                         mock_submit_symbolicate):
    data = {
        "project": default_project.id,
        "platform": "native",
        "logentry": {
            "formatted": "test"
        },
        "event_id": EVENT_ID,
        "extra": {
            "foo": "bar"
        },
    }

    is_low_priority = mock_should_demote_symbolication(default_project.id)
    assert is_low_priority

    with TaskRunner():
        mock_submit_symbolicate(
            is_low_priority=is_low_priority,
            from_reprocessing=False,
            cache_key="e:1",
            event_id=EVENT_ID,
            start_time=0,
            data=data,
        )
    assert mock_submit_symbolicate.call_count == 4
    def test_has_metric_not_in_lpq(self, store) -> None:
        store.increment_project_event_counter(17, 0)
        assert store.get_lpq_projects() == set()

        with TaskRunner():
            _scan_for_suspect_projects()

        assert store.get_lpq_projects() == {17}
    def test_no_metrics_in_lpq(self, store) -> None:
        store.add_project_to_lpq(17)
        assert store.get_lpq_projects() == {17}

        with TaskRunner():
            _scan_for_suspect_projects()

        assert store.get_lpq_projects() == set()
    def test_has_metric(
        self, store: RealtimeMetricsStore, mock_update_lpq_eligibility: mock.Mock
    ) -> None:
        store.increment_project_event_counter(project_id=17, timestamp=0)

        with TaskRunner():
            _scan_for_suspect_projects()

        assert mock_update_lpq_eligibility.delay.called
    def test_remove_recently_moved_project(self, store) -> None:
        store._backoff_timer = 10
        store.add_project_to_lpq(17)
        assert store.get_lpq_projects() == {17}

        with TaskRunner():
            _scan_for_suspect_projects()

        assert store.get_lpq_projects() == {17}
    def test_no_metrics_not_in_lpq(
        self, store: RealtimeMetricsStore, mock_update_lpq_eligibility: mock.Mock
    ) -> None:
        assert store.get_lpq_projects() == set()

        with TaskRunner():
            _scan_for_suspect_projects()

        assert store.get_lpq_projects() == set()
        assert not mock_update_lpq_eligibility.delay.called
    def test_add_one_project_remove_one_project(self, store) -> None:
        store.increment_project_event_counter(17, 0)
        store.remove_projects_from_lpq([17])
        store.add_project_to_lpq(1)
        assert store.get_lpq_projects() == {1}

        with TaskRunner():
            _scan_for_suspect_projects()

        assert store.get_lpq_projects() == {17}
    def test_add_recently_moved_project(self, store) -> None:
        store._backoff_timer = 10
        store.increment_project_event_counter(17, 0)
        # Abusing the fact that removing always updates the backoff timer even if it's a noop
        store.remove_projects_from_lpq([17])
        assert store.get_lpq_projects() == set()

        with TaskRunner():
            _scan_for_suspect_projects()

        assert store.get_lpq_projects() == set()
    def test_metrics_consumer(self, mock_task):
        ingest_producer = self._get_producer(self.ingest_topic)
        message = json.dumps(payload).encode()

        # produce message to the dummy ingest-metrics topic
        ingest_producer.produce(self.ingest_topic, message)

        assert ingest_producer.flush() == 0

        options = {
            "max_batch_size": 1,
            "max_batch_time": 5000,
            "group_id": "test-metrics-indexer-consumer",
            "auto_offset_reset": "earliest",
        }
        batching_consumer = get_metrics_consumer(topic=self.ingest_topic,
                                                 **options)

        # couldn't use _run_once() here because .poll() is called
        # with a 1 second timeout which seems to not be enough.
        msg = batching_consumer.consumer.poll(5)
        assert msg

        with TaskRunner():
            # _handle_message calls worker's process_message
            # and then we flush() to make sure we call flush_batch
            batching_consumer._handle_message(msg)
            batching_consumer._flush()

            # make sure we produced the message during flush_batch
            snuba_producer = batching_consumer.worker._MetricsIndexerWorker__producer
            assert snuba_producer.flush() == 0

            translated_msg = translate_payload()
            expected_msg = {
                k: translated_msg[k]
                for k in ["tags", "name", "org_id"]
            }
            mock_task.apply_async.assert_called_once_with(
                kwargs={"messages": [expected_msg]})

        # in order to test that the message we produced to the dummy
        # snuba-metrics topic was the message we expected, we make a
        # dummy consumer to subscribe to the topic
        snuba_metrics_consumer = Consumer({
            "bootstrap.servers": "localhost:9092",
            "group.id": "test-snuba-metrics-consumer",
            "default.topic.config": {
                "auto.offset.reset": "earliest"
            },
        })
        snuba_metrics_consumer.subscribe([self.snuba_topic])

        # once we have the message, we don't need the consumer anymore
        translated_msg = snuba_metrics_consumer.poll(5)
        snuba_metrics_consumer.close()
        assert translated_msg

        # finally test the payload of the translated message
        parsed = json.loads(translated_msg.value(), use_rapid_json=True)
        expected = translate_payload()
        # loading the json converts the keys to strings e.g. {"tags": {1: 3}} --> {"tags": {"1": 3}}
        assert parsed["tags"] == {
            str(k): v
            for k, v in expected["tags"].items()
        }
        assert parsed["metric_id"] == expected["metric_id"]