Beispiel #1
0
def test_kafka_producer_proper_record_metadata(kafka_broker, compression):
    connect_str = ':'.join([kafka_broker.host, str(kafka_broker.port)])
    producer = KafkaProducer(bootstrap_servers=connect_str,
                             retries=5,
                             max_block_ms=30000,
                             compression_type=compression)
    magic = producer._max_usable_produce_magic()

    # record headers are supported in 0.11.0
    if version() < (0, 11, 0):
        headers = None
    else:
        headers = [("Header Key", b"Header Value")]

    topic = random_string(5)
    future = producer.send(
        topic,
        value=b"Simple value", key=b"Simple key", headers=headers, timestamp_ms=9999999,
        partition=0)
    record = future.get(timeout=5)
    assert record is not None
    assert record.topic == topic
    assert record.partition == 0
    assert record.topic_partition == TopicPartition(topic, 0)
    assert record.offset == 0
    if magic >= 1:
        assert record.timestamp == 9999999
    else:
        assert record.timestamp == -1  # NO_TIMESTAMP

    if magic >= 2:
        assert record.checksum is None
    elif magic == 1:
        assert record.checksum == 1370034956
    else:
        assert record.checksum == 3296137851

    assert record.serialized_key_size == 10
    assert record.serialized_value_size == 12
    if headers:
        assert record.serialized_header_size == 22

    # generated timestamp case is skipped for broker 0.9 and below
    if magic == 0:
        return

    send_time = time.time() * 1000
    future = producer.send(
        topic,
        value=b"Simple value", key=b"Simple key", timestamp_ms=None,
        partition=0)
    record = future.get(timeout=5)
    assert abs(record.timestamp - send_time) <= 1000  # Allow 1s deviation
Beispiel #2
0
def test_end_to_end(kafka_broker, compression):

    if compression == 'lz4':
        # LZ4 requires 0.8.2
        if version() < (0, 8, 2):
            return
        # python-lz4 crashes on older versions of pypy
        elif platform.python_implementation() == 'PyPy':
            return

    connect_str = ':'.join([kafka_broker.host, str(kafka_broker.port)])
    producer = KafkaProducer(bootstrap_servers=connect_str,
                             retries=5,
                             max_block_ms=30000,
                             compression_type=compression,
                             value_serializer=str.encode)
    consumer = KafkaConsumer(bootstrap_servers=connect_str,
                             group_id=None,
                             consumer_timeout_ms=30000,
                             auto_offset_reset='earliest',
                             value_deserializer=bytes.decode)

    topic = random_string(5)

    messages = 100
    futures = []
    for i in range(messages):
        futures.append(producer.send(topic, 'msg %d' % i))
    ret = [f.get(timeout=30) for f in futures]
    assert len(ret) == messages
    producer.close()

    consumer.subscribe([topic])
    msgs = set()
    for i in range(messages):
        try:
            msgs.add(next(consumer).value)
        except StopIteration:
            break

    assert msgs == set(['msg %d' % (i,) for i in range(messages)])
    consumer.close()
import pytest
from kafka.vendor import six

from kafka.conn import ConnectionStates
from kafka.consumer.group import KafkaConsumer
from kafka.coordinator.base import MemberState
from kafka.structs import TopicPartition

from test.fixtures import random_string, version


def get_connect_str(kafka_broker):
    return kafka_broker.host + ':' + str(kafka_broker.port)


@pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set")
def test_consumer(kafka_broker, topic, version):
    # The `topic` fixture is included because
    # 0.8.2 brokers need a topic to function well
    consumer = KafkaConsumer(bootstrap_servers=get_connect_str(kafka_broker))
    consumer.poll(500)
    assert len(consumer._client._conns) > 0
    node_id = list(consumer._client._conns.keys())[0]
    assert consumer._client._conns[node_id].state is ConnectionStates.CONNECTED
    consumer.close()


@pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set")
def test_consumer_topics(kafka_broker, topic, version):
    consumer = KafkaConsumer(bootstrap_servers=get_connect_str(kafka_broker))
    # Necessary to drive the IO
import pytest
from kafka.vendor import six

from kafka.conn import ConnectionStates
from kafka.consumer.group import KafkaConsumer
from kafka.coordinator.base import MemberState, Generation
from kafka.structs import TopicPartition

from test.fixtures import random_string, version


def get_connect_str(kafka_broker):
    return kafka_broker.host + ':' + str(kafka_broker.port)


@pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set")
def test_consumer(kafka_broker, topic, version):
    # The `topic` fixture is included because
    # 0.8.2 brokers need a topic to function well
    consumer = KafkaConsumer(bootstrap_servers=get_connect_str(kafka_broker))
    consumer.poll(500)
    assert len(consumer._client._conns) > 0
    node_id = list(consumer._client._conns.keys())[0]
    assert consumer._client._conns[node_id].state is ConnectionStates.CONNECTED
    consumer.close()


@pytest.mark.skipif(version() < (0, 9), reason='Unsupported Kafka Version')
@pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set")
def test_group(kafka_broker, topic):
    num_partitions = 4
    create_gzip_message, KafkaProducer
)
from kafka.consumer.base import MAX_FETCH_BUFFER_SIZE_BYTES
from kafka.errors import (
    ConsumerFetchSizeTooSmall, OffsetOutOfRangeError, UnsupportedVersionError,
    KafkaTimeoutError, UnsupportedCodecError
)
from kafka.structs import (
    ProduceRequestPayload, TopicPartition, OffsetAndTimestamp
)

from test.fixtures import ZookeeperFixture, KafkaFixture, random_string, version
from test.testutil import KafkaIntegrationTestCase, kafka_versions, Timer


@pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set")
def test_kafka_consumer(kafka_producer, topic, kafka_consumer_factory):
    """Test KafkaConsumer"""
    kafka_consumer = kafka_consumer_factory(auto_offset_reset='earliest')

    # TODO replace this with a `send_messages()` pytest fixture
    # as we will likely need this elsewhere
    for i in range(0, 100):
        kafka_producer.send(topic, partition=0, value=str(i).encode())
    for i in range(100, 200):
        kafka_producer.send(topic, partition=1, value=str(i).encode())
    kafka_producer.flush()

    cnt = 0
    messages = {0: set(), 1: set()}
    for message in kafka_consumer:
Beispiel #6
0
from . import unittest
from kafka import (KafkaConsumer, MultiProcessConsumer, SimpleConsumer,
                   create_message, create_gzip_message, KafkaProducer)
from kafka.consumer.base import MAX_FETCH_BUFFER_SIZE_BYTES
from kafka.errors import (ConsumerFetchSizeTooSmall, OffsetOutOfRangeError,
                          UnsupportedVersionError, KafkaTimeoutError,
                          UnsupportedCodecError)
from kafka.structs import (ProduceRequestPayload, TopicPartition,
                           OffsetAndTimestamp)

from test.fixtures import ZookeeperFixture, KafkaFixture, random_string, version
from test.testutil import KafkaIntegrationTestCase, kafka_versions, Timer


@pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set")
def test_kafka_consumer(kafka_producer, topic, kafka_consumer_factory):
    """Test KafkaConsumer"""
    kafka_consumer = kafka_consumer_factory(auto_offset_reset='earliest')

    # TODO replace this with a `send_messages()` pytest fixture
    # as we will likely need this elsewhere
    for i in range(0, 100):
        kafka_producer.send(topic, partition=0, value=str(i).encode())
    for i in range(100, 200):
        kafka_producer.send(topic, partition=1, value=str(i).encode())
    kafka_producer.flush()

    cnt = 0
    messages = {0: set(), 1: set()}
    for message in kafka_consumer: