예제 #1
0
    def test_producer_sync_fail_on_error(self):
        error = FailedPayloadsError('failure')
        with patch.object(KafkaClient, 'load_metadata_for_topics'):
            with patch.object(KafkaClient,
                              'get_partition_ids_for_topic',
                              return_value=[0, 1]):
                with patch.object(KafkaClient,
                                  '_send_broker_aware_request',
                                  return_value=[error]):

                    client = KafkaClient(MagicMock())
                    producer = SimpleProducer(client,
                                              async=False,
                                              sync_fail_on_error=False)

                    # This should not raise
                    (response, ) = producer.send_messages(
                        'foobar', b'test message')
                    self.assertEqual(response, error)

                    producer = SimpleProducer(client,
                                              async=False,
                                              sync_fail_on_error=True)
                    with self.assertRaises(FailedPayloadsError):
                        producer.send_messages('foobar', b'test message')
예제 #2
0
 def test_populate_topic_to_offset_map(self, producer, topic):
     response_one = ProduceResponse(topic, partition=0, error=0, offset=1)
     response_two = FailedPayloadsError(payload=mock.Mock())
     responses = [response_one, response_two]
     topics_map = producer._kafka_producer._populate_topics_to_offset_map(
         responses)
     assert len(topics_map) == 1
     assert topic in topics_map
예제 #3
0
 def send_side_effect(reqs, *args, **kwargs):
     if self.client.is_first_time:
         self.client.is_first_time = False
         return [FailedPayloadsError(req) for req in reqs]
     responses = []
     for req in reqs:
         offset = offsets[req.topic][req.partition]
         offsets[req.topic][req.partition] += len(req.messages)
         responses.append(
             ProduceResponse(req.topic, req.partition, 0, offset))
     return responses
예제 #4
0
 def test_offset_retry_on_network_flake(self, consumer_instance):
     mock_offsets = {'test-topic': {0: 10}}
     exception = FailedPayloadsError("Network flake!")
     with mock.patch.object(
             consumer_instance.kafka_client,
             'send_offset_commit_request',
             side_effect=[exception, exception, exception,
                          None]) as mock_send_offsets, mock.patch.object(
                              consumer_instance,
                              '_get_offsets_map_to_be_committed',
                              return_value=mock_offsets):
         consumer_instance.commit_offsets(mock_offsets)
         assert mock_send_offsets.call_count == 4
예제 #5
0
    def test_publish_one_msg_succeeds_one_fails_after_retry(
            self, message, another_message, topic, producer):
        # TODO(DATAPIPE-606|clin) investigate better way than mocking response
        mock_response = ProduceResponse(topic, partition=0, error=0, offset=1)
        fail_response = FailedPayloadsError(payload=mock.Mock())
        side_effect = ([[mock_response, fail_response]] +
                       [[fail_response]] * self.max_retry_count)
        with mock.patch.object(
                producer._kafka_producer.kafka_client,
                'send_produce_request',
                side_effect=side_effect), pytest.raises(MaxRetryError) as e:
            producer.publish(message)
            producer.publish(another_message)
            producer.flush()

            self.assert_last_retry_result(e.value.last_result,
                                          another_message,
                                          expected_published_msgs_count=1)
예제 #6
0
                    failed = True
            except ConnectionError, e:
                log.warning("Could not send request [%s] to server %s: %s",
                            request, conn, e)
                failed = True

            if failed:
                failed_payloads += payloads
                self.reset_all_metadata()
                continue

            for response in decoder_fn(response):
                acc[(response.topic, response.partition)] = response

        if failed_payloads:
            raise FailedPayloadsError(failed_payloads)

        # Order the accumulated responses by the original key order
        return (acc[k] for k in original_keys) if acc else ()

    def __repr__(self):
        return '<KafkaClient client_id=%s>' % (self.client_id)

    def _raise_on_response_error(self, resp):
        if resp.error == ErrorMapping.NO_ERROR:
            return

        if resp.error in (ErrorMapping.UNKNOWN_TOPIC_OR_PARTITON,
                          ErrorMapping.NOT_LEADER_FOR_PARTITION):
            self.reset_topic_metadata(resp.topic)
예제 #7
0
 def mock_offset_commit_request(group, payloads, **kwargs):
     raise FailedPayloadsError(payloads[0])
예제 #8
0
 def failed_payloads(payload):
     return FailedPayloadsError(payload)
예제 #9
0
    def _send_broker_aware_request(self, payloads, encoder_fn, decoder_fn):
        """
        Group a list of request payloads by topic+partition and send them to
        the leader broker for that partition using the supplied encode/decode
        functions

        Params
        ======
        payloads: list of object-like entities with a topic (str) and
                  partition (int) attribute
        encode_fn: a method to encode the list of payloads to a request body,
                   must accept client_id, correlation_id, and payloads as
                   keyword arguments
        decode_fn: a method to decode a response body into response objects.
                   The response objects must be object-like and have topic
                   and partition attributes

        Return
        ======
        List of response objects in the same order as the supplied payloads
        """

        # Group the requests by topic+partition
        original_keys = []
        payloads_by_broker = collections.defaultdict(list)

        for payload in payloads:
            leader = self._get_leader_for_partition(payload.topic,
                                                    payload.partition)

            payloads_by_broker[leader].append(payload)
            original_keys.append((payload.topic, payload.partition))

        # Accumulate the responses in a dictionary
        acc = {}

        # keep a list of payloads that were failed to be sent to brokers
        failed_payloads = []

        # For each broker, send the list of request payloads
        for broker, payloads in payloads_by_broker.items():
            conn = self._get_conn(broker.host.decode('utf-8'), broker.port)
            requestId = self._next_id()
            request = encoder_fn(client_id=self.client_id,
                                 correlation_id=requestId, payloads=payloads)

            failed = False
            # Send the request, recv the response
            try:
                conn.send(requestId, request)
                if decoder_fn is None:
                    continue
                try:
                    response = conn.recv(requestId)
                except ConnectionError as e:
                    log.warning("Could not receive response to request [%s] "
                                "from server %s: %s", binascii.b2a_hex(request), conn, e)
                    failed = True
            except ConnectionError as e:
                log.warning("Could not send request [%s] to server %s: %s",
                            binascii.b2a_hex(request), conn, e)
                failed = True

            if failed:
                failed_payloads += payloads
                self.reset_all_metadata()
                continue

            for response in decoder_fn(response):
                acc[(response.topic, response.partition)] = response

        if failed_payloads:
            raise FailedPayloadsError(failed_payloads)

        # Order the accumulated responses by the original key order
        return (acc[k] for k in original_keys) if acc else ()
예제 #10
0
 def send_side_effect(reqs, *args, **kwargs):
     return [FailedPayloadsError(req) for req in reqs]
예제 #11
0
    def _send_consumer_aware_request(self, group, payloads, encoder_fn,
                                     decoder_fn):
        """
        Send a list of requests to the consumer coordinator for the group
        specified using the supplied encode/decode functions. As the payloads
        that use consumer-aware requests do not contain the group (e.g.
        OffsetFetchRequest), all payloads must be for a single group.

        Arguments:

        group: the name of the consumer group (str) the payloads are for
        payloads: list of object-like entities with topic (str) and
            partition (int) attributes; payloads with duplicate
            topic+partition are not supported.

        encode_fn: a method to encode the list of payloads to a request body,
            must accept client_id, correlation_id, and payloads as
            keyword arguments

        decode_fn: a method to decode a response body into response objects.
            The response objects must be object-like and have topic
            and partition attributes

        Returns:

        List of response objects in the same order as the supplied payloads
        """
        # encoders / decoders do not maintain ordering currently
        # so we need to keep this so we can rebuild order before returning
        original_ordering = [(p.topic, p.partition) for p in payloads]

        broker = self._get_coordinator_for_group(group)

        # Send the list of request payloads and collect the responses and
        # errors
        responses = {}
        requestId = self._next_id()
        log.debug('Request %s to %s: %s', requestId, broker, payloads)
        request = encoder_fn(client_id=self.client_id,
                             correlation_id=requestId,
                             payloads=payloads)

        # Send the request, recv the response
        try:
            host, port, afi = get_ip_port_afi(broker.host)
            conn = self._get_conn(host, broker.port, afi)
            conn.send(requestId, request)

        except ConnectionError as e:
            log.warning(
                'ConnectionError attempting to send request %s '
                'to server %s: %s', requestId, broker, e)

            for payload in payloads:
                topic_partition = (payload.topic, payload.partition)
                responses[topic_partition] = FailedPayloadsError(payload)

        # No exception, try to get response
        else:

            # decoder_fn=None signal that the server is expected to not
            # send a response.  This probably only applies to
            # ProduceRequest w/ acks = 0
            if decoder_fn is None:
                log.debug(
                    'Request %s does not expect a response '
                    '(skipping conn.recv)', requestId)
                for payload in payloads:
                    topic_partition = (payload.topic, payload.partition)
                    responses[topic_partition] = None
                return []

            try:
                response = conn.recv(requestId)
            except ConnectionError as e:
                log.warning(
                    'ConnectionError attempting to receive a '
                    'response to request %s from server %s: %s', requestId,
                    broker, e)

                for payload in payloads:
                    topic_partition = (payload.topic, payload.partition)
                    responses[topic_partition] = FailedPayloadsError(payload)

            else:
                _resps = []
                for payload_response in decoder_fn(response):
                    topic_partition = (payload_response.topic,
                                       payload_response.partition)
                    responses[topic_partition] = payload_response
                    _resps.append(payload_response)
                log.debug('Response %s: %s', requestId, _resps)

        # Return responses in the same order as provided
        return [responses[tp] for tp in original_ordering]
예제 #12
0
 def failed_payloads(payloads):
     for payload in payloads:
         topic_partition = (str(payload.topic), payload.partition)
         responses[(topic_partition)] = FailedPayloadsError(payload)
예제 #13
0
    def _send_broker_aware_request(self, payloads, encoder_fn, decoder_fn):
        """
        Group a list of request payloads by topic+partition and send them to
        the leader broker for that partition using the supplied encode/decode
        functions

        Arguments:

        payloads: list of object-like entities with a topic (str) and
            partition (int) attribute; payloads with duplicate topic-partitions
            are not supported.

        encode_fn: a method to encode the list of payloads to a request body,
            must accept client_id, correlation_id, and payloads as
            keyword arguments

        decode_fn: a method to decode a response body into response objects.
            The response objects must be object-like and have topic
            and partition attributes

        Returns:

        List of response objects in the same order as the supplied payloads
        """
        # encoders / decoders do not maintain ordering currently
        # so we need to keep this so we can rebuild order before returning
        original_ordering = [(p.topic, p.partition) for p in payloads]

        # Group the requests by topic+partition
        brokers_for_payloads = []
        payloads_by_broker = collections.defaultdict(list)

        responses = {}
        for payload in payloads:
            try:
                leader = self._get_leader_for_partition(
                    payload.topic, payload.partition)
                payloads_by_broker[leader].append(payload)
                brokers_for_payloads.append(leader)
            except KafkaUnavailableError as e:
                log.warning(
                    'KafkaUnavailableError attempting to send request '
                    'on topic %s partition %d', payload.topic,
                    payload.partition)
                topic_partition = (payload.topic, payload.partition)
                responses[topic_partition] = FailedPayloadsError(payload)

        # For each broker, send the list of request payloads
        # and collect the responses and errors
        broker_failures = []

        # For each KafkaConnection keep the real socket so that we can use
        # a select to perform unblocking I/O
        connections_by_socket = {}
        for broker, payloads in payloads_by_broker.items():
            requestId = self._next_id()
            log.debug('Request %s to %s: %s', requestId, broker, payloads)
            request = encoder_fn(client_id=self.client_id,
                                 correlation_id=requestId,
                                 payloads=payloads)

            # Send the request, recv the response
            try:
                conn = self._get_conn(broker.host.decode('utf-8'), broker.port)
                conn.send(requestId, request)

            except ConnectionError as e:
                broker_failures.append(broker)
                log.warning(
                    'ConnectionError attempting to send request %s '
                    'to server %s: %s', requestId, broker, e)

                for payload in payloads:
                    topic_partition = (payload.topic, payload.partition)
                    responses[topic_partition] = FailedPayloadsError(payload)

            # No exception, try to get response
            else:

                # decoder_fn=None signal that the server is expected to not
                # send a response.  This probably only applies to
                # ProduceRequest w/ acks = 0
                if decoder_fn is None:
                    log.debug(
                        'Request %s does not expect a response '
                        '(skipping conn.recv)', requestId)
                    for payload in payloads:
                        topic_partition = (payload.topic, payload.partition)
                        responses[topic_partition] = None
                    continue
                else:
                    connections_by_socket[conn.get_connected_socket()] = (
                        conn, broker, requestId)

        conn = None
        while connections_by_socket:
            sockets = connections_by_socket.keys()
            rlist, _, _ = select.select(sockets, [], [], self.timeout)
            if rlist:
                conn, broker, requestId = connections_by_socket.pop(rlist[0])
                try:
                    response = conn.recv(requestId)
                except ConnectionError as e:
                    broker_failures.append(broker)
                    log.warning(
                        'ConnectionError attempting to receive a '
                        'response to request %s from server %s: %s', requestId,
                        broker, e)

                    for payload in payloads_by_broker[broker]:
                        topic_partition = (payload.topic, payload.partition)
                        responses[topic_partition] = FailedPayloadsError(
                            payload)

                else:
                    _resps = []
                    for payload_response in decoder_fn(response):
                        topic_partition = (payload_response.topic,
                                           payload_response.partition)
                        responses[topic_partition] = payload_response
                        _resps.append(payload_response)
                    log.debug('Response %s: %s', requestId, _resps)
            # If the timeout expires rlist is empty and
            # all pending requests are considered failed
            else:
                for conn, broker, requestId in connections_by_socket.values():
                    conn.close()
                    broker_failures.append(broker)
                    log.warning(
                        'Socket timeout error attempting to receive a '
                        'response to request %s from server %s', requestId,
                        broker)
                    for payload in payloads_by_broker[broker]:
                        topic_partition = (payload.topic, payload.partition)
                        responses[topic_partition] = FailedPayloadsError(
                            payload)

        # Connection errors generally mean stale metadata
        # although sometimes it means incorrect api request
        # Unfortunately there is no good way to tell the difference
        # so we'll just reset metadata on all errors to be safe
        if broker_failures:
            self.reset_all_metadata()

        # Return responses in the same order as provided
        return [responses[tp] for tp in original_ordering]
예제 #14
0
    def _send_broker_aware_request(self, payloads, encoder_fn, decoder_fn):
        """
        Group a list of request payloads by topic+partition and send them to
        the leader broker for that partition using the supplied encode/decode
        functions

        Arguments:

        payloads: list of object-like entities with a topic (str) and
            partition (int) attribute

        encode_fn: a method to encode the list of payloads to a request body,
            must accept client_id, correlation_id, and payloads as
            keyword arguments

        decode_fn: a method to decode a response body into response objects.
            The response objects must be object-like and have topic
            and partition attributes

        Returns:

        List of response objects in the same order as the supplied payloads
        """

        log.debug("Sending Payloads: %s" % payloads)

        # Group the requests by topic+partition
        brokers_for_payloads = []
        payloads_by_broker = collections.defaultdict(list)

        for payload in payloads:
            leader = self._get_leader_for_partition(payload.topic,
                                                    payload.partition)

            payloads_by_broker[leader].append(payload)
            brokers_for_payloads.append(leader)

        # For each broker, send the list of request payloads
        # and collect the responses and errors
        responses_by_broker = collections.defaultdict(list)
        broker_failures = []
        for broker, payloads in payloads_by_broker.items():
            conn = self._get_conn(broker.host.decode('utf-8'), broker.port)
            requestId = self._next_id()
            request = encoder_fn(client_id=self.client_id,
                                 correlation_id=requestId,
                                 payloads=payloads)

            # Send the request, recv the response
            try:
                conn.send(requestId, request)

            except ConnectionError as e:
                broker_failures.append(broker)
                log.warning("Could not send request [%s] to server %s: %s",
                            binascii.b2a_hex(request), conn, e)

                for payload in payloads:
                    responses_by_broker[broker].append(
                        FailedPayloadsError(payload))

            # No exception, try to get response
            else:

                # decoder_fn=None signal that the server  is expected to not
                # send a response.  This probably only applies to
                # ProduceRequest w/ acks = 0
                if decoder_fn is None:
                    for payload in payloads:
                        responses_by_broker[broker].append(None)
                    continue

                try:
                    response = conn.recv(requestId)
                except ConnectionError as e:
                    broker_failures.append(broker)
                    log.warning(
                        "Could not receive response to request [%s] "
                        "from server %s: %s", binascii.b2a_hex(request), conn,
                        e)

                    for payload in payloads:
                        responses_by_broker[broker].append(
                            FailedPayloadsError(payload))

                else:

                    for payload_response in decoder_fn(response):
                        responses_by_broker[broker].append(payload_response)

        # Connection errors generally mean stale metadata
        # although sometimes it means incorrect api request
        # Unfortunately there is no good way to tell the difference
        # so we'll just reset metadata on all errors to be safe
        if broker_failures:
            self.reset_all_metadata()

        # Return responses in the same order as provided
        responses_by_payload = [
            responses_by_broker[broker].pop(0)
            for broker in brokers_for_payloads
        ]
        log.debug('Responses: %s' % responses_by_payload)
        return responses_by_payload