예제 #1
0
def test_outcome_consumer_ignores_outcomes_already_handled(
        kafka_producer, task_runner, kafka_admin, requires_kafka):
    producer, project_id, topic_name = _setup_outcome_test(
        kafka_producer, kafka_admin)

    group_id = "test-outcome-consumer-1"
    last_event_id = None

    # put a few outcome messages on the kafka topic and also mark them in the cache
    for i in range(4):
        msg = _get_outcome(
            event_id=i,
            project_id=project_id,
            outcome=Outcome.FILTERED,
            reason="some_reason",
            remote_addr="127.33.44.{}".format(i),
        )
        if i in (0, 1):
            # pretend that we have already processed this outcome before
            mark_signal_sent(project_id=project_id, event_id=_get_event_id(i))
        else:
            # Last event is used to check when the outcome producer is done
            last_event_id = _get_event_id(i)
        # put the outcome on the kafka topic
        producer.produce(topic_name, msg)

    # setup django signals for event_filtered and event_dropped
    event_filtered_sink = []
    event_dropped_sink = []

    def event_filtered_receiver(**kwargs):
        event_filtered_sink.append(kwargs.get("ip"))

    def event_dropped_receiver(**kwargs):
        event_dropped_sink.append("something")

    event_filtered.connect(event_filtered_receiver)
    event_dropped.connect(event_dropped_receiver)

    consumer = get_outcomes_consumer(max_batch_size=1,
                                     max_batch_time=100,
                                     group_id=group_id,
                                     auto_offset_reset="earliest")

    # run the outcome consumer
    with task_runner():
        i = 0
        while (not is_signal_sent(project_id=project_id,
                                  event_id=last_event_id)
               and i < MAX_POLL_ITERATIONS):
            consumer._run_once()
            i += 1

    assert is_signal_sent(project_id=project_id, event_id=last_event_id)

    # verify that no signal was called (since the events have been previously processed)
    assert event_filtered_sink == ["127.33.44.2", "127.33.44.3"]
    assert len(event_dropped_sink) == 0
예제 #2
0
def test_outcome_consumer_remembers_handled_outcomes(kafka_producer,
                                                     task_runner, kafka_admin,
                                                     requires_kafka):
    producer, project_id, topic_name = _setup_outcome_test(
        kafka_producer, kafka_admin)

    group_id = "test-outcome-consumer-3"

    # put a few outcome messages on the kafka topic
    for i in six.moves.range(1, 3):
        # emit the same outcome twice ( simulate the case when the  producer goes down without
        # committing the kafka offsets and is restarted)
        msg = _get_outcome(
            event_id=1,
            project_id=project_id,
            outcome=Outcome.FILTERED,
            reason="some_reason",
            remote_addr="127.33.44.{}".format(1),
        )

        producer.produce(topic_name, msg)

    # setup django signals for event_filtered and event_dropped
    event_filtered_sink = []
    event_dropped_sink = []

    def event_filtered_receiver(**kwargs):
        event_filtered_sink.append(kwargs.get("ip"))

    def event_dropped_receiver(**kwargs):
        event_dropped_sink.append("something")

    event_filtered.connect(event_filtered_receiver)
    event_dropped.connect(event_dropped_receiver)

    consumer = get_outcomes_consumer(max_batch_size=1,
                                     max_batch_time=100,
                                     group_id=group_id,
                                     auto_offset_reset="earliest")

    # run the outcome consumer
    with task_runner():
        i = 0
        while not event_filtered_sink and i < MAX_POLL_ITERATIONS:
            consumer._run_once()
            i += 1

    # verify that the appropriate filters were called
    assert len(event_filtered_sink) == 1
    assert event_filtered_sink == ["127.33.44.1"]
    assert len(event_dropped_sink) == 0
예제 #3
0
def test_outcome_consumer_handles_rate_limited_outcomes(
        kafka_producer, task_runner, kafka_admin, requires_kafka):
    producer, project_id, topic_name = _setup_outcome_test(
        kafka_producer, kafka_admin)

    group_id = "test-outcome-consumer-5"

    # put a few outcome messages on the kafka topic
    for i in six.moves.range(1, 3):
        msg = _get_outcome(
            event_id=i,
            project_id=project_id,
            outcome=Outcome.RATE_LIMITED,
            reason="reason_{}".format(i),
            remote_addr="127.33.44.{}".format(i),
        )

        producer.produce(topic_name, msg)

    # setup django signals for event_filtered and event_dropped
    event_filtered_sink = []
    event_dropped_sink = []

    def event_filtered_receiver(**kwargs):
        event_filtered_sink.append("something")

    def event_dropped_receiver(**kwargs):
        event_dropped_sink.append(
            (kwargs.get("ip"), kwargs.get("reason_code")))

    event_filtered.connect(event_filtered_receiver)
    event_dropped.connect(event_dropped_receiver)

    consumer = get_outcomes_consumer(max_batch_size=1,
                                     max_batch_time=100,
                                     group_id=group_id,
                                     auto_offset_reset="earliest")

    # run the outcome consumer
    with task_runner():
        i = 0
        while len(event_dropped_sink) < 2 and i < MAX_POLL_ITERATIONS:
            consumer._run_once()
            i += 1

    # verify that the appropriate filters were called
    assert len(event_filtered_sink) == 0
    assert len(event_dropped_sink) == 2
    assert set(event_dropped_sink) == set([("127.33.44.1", "reason_1"),
                                           ("127.33.44.2", "reason_2")])
예제 #4
0
def test_outcome_consumer_ignores_invalid_outcomes(kafka_producer, task_runner,
                                                   kafka_admin,
                                                   requires_kafka):
    producer, project_id, topic_name = _setup_outcome_test(
        kafka_producer, kafka_admin)

    group_id = "test-outcome-consumer-2"

    # put a few outcome messages on the kafka topic. Add two FILTERED items so
    # we know when the producer has reached the end
    for i in range(4):
        msg = _get_outcome(
            event_id=i,
            project_id=project_id,
            outcome=Outcome.INVALID if i < 2 else Outcome.FILTERED,
            reason="some_reason",
            remote_addr="127.33.44.{}".format(i),
        )

        producer.produce(topic_name, msg)

    # setup django signals for event_filtered and event_dropped
    event_filtered_sink = []
    event_dropped_sink = []

    def event_filtered_receiver(**kwargs):
        event_filtered_sink.append(kwargs.get("ip"))

    def event_dropped_receiver(**kwargs):
        event_dropped_sink.append("something")

    event_filtered.connect(event_filtered_receiver)
    event_dropped.connect(event_dropped_receiver)

    consumer = get_outcomes_consumer(max_batch_size=1,
                                     max_batch_time=100,
                                     group_id=group_id,
                                     auto_offset_reset="earliest")

    # run the outcome consumer
    with task_runner():
        i = 0
        while len(event_filtered_sink) < 2 and i < MAX_POLL_ITERATIONS:
            consumer._run_once()
            i += 1

    # verify that the appropriate filters were called
    assert event_filtered_sink == ["127.33.44.2", "127.33.44.3"]
    assert len(event_dropped_sink) == 0
예제 #5
0
def test_outcome_consumer_handles_rate_limited_outcomes(
        kafka_producer, task_runner, kafka_admin):
    producer, project_id, topic_name = _setup_outcome_test(
        kafka_producer, kafka_admin)

    consumer_group = "test-outcome-consumer-5"

    # put a few outcome messages on the kafka topic
    for i in six.moves.range(1, 3):
        msg = _get_outcome(
            event_id=i,
            project_id=project_id,
            outcome=Outcome.RATE_LIMITED,
            reason="reason_{}".format(i),
            remote_addr="127.33.44.{}".format(i),
        )

        producer.produce(topic_name, msg)

    # setup django signals for event_filtered and event_dropped
    event_filtered_sink = []
    event_dropped_sink = []

    def event_filtered_receiver(**kwargs):
        event_filtered_sink.append("something")

    def event_dropped_receiver(**kwargs):
        event_dropped_sink.append(
            (kwargs.get("ip"), kwargs.get("reason_code")))

    event_filtered.connect(event_filtered_receiver)
    event_dropped.connect(event_dropped_receiver)

    # run the outcome consumer
    with task_runner():
        run_outcomes_consumer(
            commit_batch_size=2,
            consumer_group=consumer_group,
            max_fetch_time_seconds=0.1,
            initial_offset_reset="earliest",
            is_shutdown_requested=_shutdown_requested(
                max_secs=10, num_outcomes=1, signal_sink=event_filtered_sink),
        )

    # verify that the appropriate filters were called
    assert len(event_filtered_sink) == 0
    assert len(event_dropped_sink) == 2
    assert event_dropped_sink == [("127.33.44.1", "reason_1"),
                                  ("127.33.44.2", "reason_2")]
예제 #6
0
def test_outcome_consumer_remembers_handled_outcomes(kafka_producer,
                                                     task_runner, kafka_admin):
    producer, project_id, topic_name = _setup_outcome_test(
        kafka_producer, kafka_admin)

    consumer_group = "test-outcome-consumer-3"

    # put a few outcome messages on the kafka topic
    for i in six.moves.range(1, 3):
        # emit the same outcome twice ( simulate the case when the  producer goes down without
        # committing the kafka offsets and is restarted)
        msg = _get_outcome(
            event_id=1,
            project_id=project_id,
            outcome=Outcome.FILTERED,
            reason="some_reason",
            remote_addr="127.33.44.{}".format(1),
        )

        producer.produce(topic_name, msg)

    # setup django signals for event_filtered and event_dropped
    event_filtered_sink = []
    event_dropped_sink = []

    def event_filtered_receiver(**kwargs):
        event_filtered_sink.append(kwargs.get("ip"))

    def event_dropped_receiver(**kwargs):
        event_dropped_sink.append("something")

    event_filtered.connect(event_filtered_receiver)
    event_dropped.connect(event_dropped_receiver)

    # run the outcome consumer
    with task_runner():
        run_outcomes_consumer(
            commit_batch_size=2,
            consumer_group=consumer_group,
            max_fetch_time_seconds=0.1,
            initial_offset_reset="earliest",
            is_shutdown_requested=_shutdown_requested(
                max_secs=10, num_outcomes=1, signal_sink=event_filtered_sink),
        )

    # verify that the appropriate filters were called
    assert len(event_filtered_sink) == 1
    assert event_filtered_sink == ["127.33.44.1"]
    assert len(event_dropped_sink) == 0
예제 #7
0
def test_outcome_consumer_ignores_outcomes_already_handled(
    kafka_producer, task_runner, kafka_admin
):
    producer, project_id, topic_name = _setup_outcome_test(kafka_producer, kafka_admin)

    consumer_group = "test-outcome-consumer-1"

    # put a few outcome messages on the kafka topic and also mark them in the cache
    for i in six.moves.range(1, 3):
        msg = _get_outcome(
            event_id=i,
            project_id=project_id,
            outcome=Outcome.FILTERED,
            reason="some_reason",
            remote_addr="127.33.44.{}".format(i),
        )
        # pretend that we have already processed this outcome before
        mark_signal_sent(project_id=project_id, event_id=_get_event_id(i))
        # put the outcome on the kafka topic
        producer.produce(topic_name, msg)

    # setup django signals for event_filtered and event_dropped
    event_filtered_sink = []
    event_dropped_sink = []

    def event_filtered_receiver(**kwargs):
        event_filtered_sink.append(kwargs.get("ip"))

    def event_dropped_receiver(**kwargs):
        event_dropped_sink.append("something")

    event_filtered.connect(event_filtered_receiver)
    event_dropped.connect(event_dropped_receiver)

    # run the outcome consumer
    with task_runner():
        run_outcomes_consumer(
            commit_batch_size=2,
            consumer_group=consumer_group,
            max_fetch_time_seconds=0.1,
            initial_offset_reset="earliest",
            is_shutdown_requested=_shutdown_requested(
                max_secs=10, num_outcomes=1, signal_sink=event_filtered_sink
            ),
        )

    # verify that no signal was called (since the events have been previously processed)
    assert len(event_filtered_sink) == 0
    assert len(event_dropped_sink) == 0
예제 #8
0
    def test_dropped_signal(self, mock_is_rate_limited):
        mock_is_rate_limited.is_limited = True

        mock_event_dropped = Mock()

        event_dropped.connect(mock_event_dropped)

        resp = self._postWithHeader({"logentry": {"message": u"hello"}})

        assert resp.status_code == 429, resp.content

        assert_mock_called_once_with_partial(mock_event_dropped,
                                             ip="127.0.0.1",
                                             project=self.project,
                                             signal=event_dropped)
예제 #9
0
    def test_dropped_signal(self, mock_is_rate_limited):
        mock_is_rate_limited.is_limited = True

        mock_event_dropped = Mock()

        event_dropped.connect(mock_event_dropped)

        resp = self._postWithHeader({'sentry.interfaces.Message': {'message': u'hello'}})

        assert resp.status_code == 429, resp.content

        assert_mock_called_once_with_partial(
            mock_event_dropped,
            ip='127.0.0.1',
            project=self.project,
            signal=event_dropped,
        )
    def __init__(self, kafka_producer, kafka_admin, task_runner):
        self.events_filtered = []
        self.events_discarded = []
        self.events_dropped = []
        self.events_saved = []

        event_filtered.connect(self._event_filtered_receiver)
        event_discarded.connect(self._event_discarded_receiver)
        event_dropped.connect(self._event_dropped_receiver)
        event_saved.connect(self._event_saved_receiver)

        self.task_runner = task_runner
        self.topic_name = settings.KAFKA_OUTCOMES
        self.organization = Factories.create_organization()
        self.project = Factories.create_project(organization=self.organization)

        self.producer = self._create_producer(kafka_producer, kafka_admin)