示例#1
0
def test_1x10_partition_split():
    """Attempt to recreate bug encountered where the first consumer does not 
    give up the partitions it no longer has a right to keep after a rebalance."""
    def bp_part_nums(broker_partitions):
        return sorted(bp.partition for bp in broker_partitions)

    fake_data = ["Hello World" for i in range(10)]
    send_to_all_partitions(10, "topic_1x10_partition_split", fake_data)
    delay()
    c1 = ZKConsumer(ZK_CONNECT_STR, 
                    "group_1x10_partition_split", 
                    "topic_1x10_partition_split",
                    autocommit=True)
    r1 = c1.fetch() 
    delay()
    assert_equal(len(r1.broker_partitions), 10, "c1 should have all 10 partitions")
    c2 = ZKConsumer(ZK_CONNECT_STR, 
                    "group_1x10_partition_split", 
                    "topic_1x10_partition_split",
                    autocommit=True)
    send_to_all_partitions(10, "topic_1x10_partition_split", fake_data)
    delay()
    # The next time c1's fetch() is called, it should rebalance and reduce itself
    # to only five partitions. We need to check what's returned in the ResultSet,
    # not just what our ZKConsumer thinks its partitions are.
    r1_a = c1.fetch()
    assert_equal(len(r1_a.broker_partitions), 5, "c1 should have reduced its partitions")
    assert_equal(bp_part_nums(r1_a.broker_partitions), [0, 1, 2, 3, 4])
    r2_a = c2.fetch()
    assert_equal(len(r2_a.broker_partitions), 5, "c2 should have reduced its partitions")
    assert_equal(bp_part_nums(r2_a.broker_partitions), [5, 6, 7, 8, 9])
示例#2
0
def test_1x10_partition_split():
    """Attempt to recreate bug encountered where the first consumer does not 
    give up the partitions it no longer has a right to keep after a rebalance."""
    def bp_part_nums(broker_partitions):
        return sorted(bp.partition for bp in broker_partitions)

    fake_data = ["Hello World" for i in range(10)]
    send_to_all_partitions(10, "topic_1x10_partition_split", fake_data)
    delay()
    c1 = ZKConsumer(ZK_CONNECT_STR,
                    "group_1x10_partition_split",
                    "topic_1x10_partition_split",
                    autocommit=True)
    r1 = c1.fetch()
    delay()
    assert_equal(len(r1.broker_partitions), 10,
                 "c1 should have all 10 partitions")
    c2 = ZKConsumer(ZK_CONNECT_STR,
                    "group_1x10_partition_split",
                    "topic_1x10_partition_split",
                    autocommit=True)
    send_to_all_partitions(10, "topic_1x10_partition_split", fake_data)
    delay()
    # The next time c1's fetch() is called, it should rebalance and reduce itself
    # to only five partitions. We need to check what's returned in the ResultSet,
    # not just what our ZKConsumer thinks its partitions are.
    r1_a = c1.fetch()
    assert_equal(len(r1_a.broker_partitions), 5,
                 "c1 should have reduced its partitions")
    assert_equal(bp_part_nums(r1_a.broker_partitions), [0, 1, 2, 3, 4])
    r2_a = c2.fetch()
    assert_equal(len(r2_a.broker_partitions), 5,
                 "c2 should have reduced its partitions")
    assert_equal(bp_part_nums(r2_a.broker_partitions), [5, 6, 7, 8, 9])
示例#3
0
文件: test_zk.py 项目: amorton/brod
def test_3x5_consumers():
    """Multi-broker/partition fetches"""
    log_break("test_3x5_consumers")
    c1 = ZKConsumer(ZK_CONNECT_STR, "group_3x5_consumers", "topic_3x5_consumers")
    
    result = c1.fetch()
    assert_equals(len(result), 0, "This shouldn't error, but it should be empty")

    send_to_all_partitions("topic_3x5_consumers", ["hello"])
    time.sleep(MESSAGE_DELAY_SECS)

    # This should grab "hello" from every partition and every topic
    # c1.rebalance()
    result = c1.fetch()

    assert_equals(len(set(result.broker_partitions)), topology_3x5.total_partitions)
    for msg_set in result:
        assert_equals(msg_set.messages, ["hello"])
示例#4
0
文件: test_zk.py 项目: amorton/brod
def test_3x5_zookeeper_invalid_offset():
    """Test recovery from bad ZK offset value

    If ZooKeeper stored an invalid start offset, we should print an ERROR
    and start from the latest."""
    log_break("test_3x5_zookeeper_invalid_offset")

    c1 = ZKConsumer(ZK_CONNECT_STR, 
                    "group_3x5_zookeeper_invalid_offset", 
                    "topic_3x5_zookeeper_invalid_offset",
                    autocommit=True)
    
    send_to_all_partitions("topic_3x5_zookeeper_invalid_offset", ["hello"])
    time.sleep(MESSAGE_DELAY_SECS)

    # The following fetch will also save the ZK offset (autocommit=True)
    result = c1.fetch()

    # Now let's reach into ZooKeeper and manually set the offset to something 
    # out of range.
    z1 = c1._zk_util
    bps_to_fake_offsets = dict((bp, 1000) for bp in c1.broker_partitions)
    z1.save_offsets_for(c1.consumer_group, bps_to_fake_offsets)
    c1.close()
    time.sleep(MESSAGE_DELAY_SECS)

    # Now delete c1, and create c2, which will take over all of it's partitions
    c2 = ZKConsumer(ZK_CONNECT_STR, 
                    "group_3x5_zookeeper_invalid_offset", 
                    "topic_3x5_zookeeper_invalid_offset",
                    autocommit=True)
    # This should detect that the values in ZK are bad, and put us at the real
    # end offset.
    c2.fetch()
               
    send_to_all_partitions("topic_3x5_zookeeper_invalid_offset", ["world"])
    time.sleep(MESSAGE_DELAY_SECS)

    result = c2.fetch()
    assert result
    for msg_set in result:
        assert_equals(msg_set.messages, ["world"])
示例#5
0
def test_3x5_zookeeper_invalid_offset():
    """Test recovery from bad ZK offset value

    If ZooKeeper stored an invalid start offset, we should print an ERROR
    and start from the latest."""
    log_break("test_3x5_zookeeper_invalid_offset")

    c1 = ZKConsumer(ZK_CONNECT_STR,
                    "group_3x5_zookeeper_invalid_offset",
                    "topic_3x5_zookeeper_invalid_offset",
                    autocommit=True)

    send_to_all_partitions(5, "topic_3x5_zookeeper_invalid_offset", ["hello"])
    delay()

    # The following fetch will also save the ZK offset (autocommit=True)
    result = c1.fetch()

    # Now let's reach into ZooKeeper and manually set the offset to something
    # out of range.
    z1 = c1._zk_util
    bps_to_fake_offsets = dict((bp, 1000) for bp in c1.broker_partitions)
    z1.save_offsets_for(c1.consumer_group, bps_to_fake_offsets)
    c1.close()
    delay()

    # Now delete c1, and create c2, which will take over all of it's partitions
    c2 = ZKConsumer(ZK_CONNECT_STR,
                    "group_3x5_zookeeper_invalid_offset",
                    "topic_3x5_zookeeper_invalid_offset",
                    autocommit=True)
    # This should detect that the values in ZK are bad, and put us at the real
    # end offset.
    c2.fetch()

    send_to_all_partitions(5, "topic_3x5_zookeeper_invalid_offset", ["world"])
    delay()

    result = c2.fetch()
    assert result
    for msg_set in result:
        assert_equals(msg_set.messages, ["world"])
示例#6
0
def test_3x5_consumers():
    """Multi-broker/partition fetches"""
    log_break("test_3x5_consumers")
    c1 = ZKConsumer(ZK_CONNECT_STR, "group_3x5_consumers",
                    "topic_3x5_consumers")

    result = c1.fetch()
    assert_equals(len(result), 0,
                  "This shouldn't error, but it should be empty")

    send_to_all_partitions(5, "topic_3x5_consumers", ["hello"])
    delay()

    # This should grab "hello" from every partition and every topic
    # c1.rebalance()
    result = c1.fetch()

    assert_equals(len(set(result.broker_partitions)),
                  topology_3x5.total_partitions)
    for msg_set in result:
        assert_equals(msg_set.messages, ["hello"])
示例#7
0
文件: test_zk.py 项目: amorton/brod
def test_3x5_reconnects():
    """Test that we keep trying to read, even if our brokers go down.

    We're going to:

    1. Send messages to all partitions in a topic, across all brokers
    2. Do a fetch (this will cause the Consumer to rebalance itself and find
       everything).
    3. Set the Consumer to disable rebalancing.
    4. Shut down one of the brokers
    5. Assert that nothing blows up
    6. Restart the broker and assert that it continues to run.

    Note that the partition split is always based on what's in ZooKeeper. So 
    even if the broker is dead or unreachable, we still keep its partitions and 
    try to contact it. Maybe there's a firewall issue preventing our server from
    hitting it. We don't want to risk messing up other consumers by grabbing
    partitions that might belong to them.
    """
    send_to_all_partitions("topic_3x5_reconnects", ["Rusty"])
    time.sleep(MESSAGE_DELAY_SECS)

    c1 = ZKConsumer(ZK_CONNECT_STR, "group_3x5_reconnects", "topic_3x5_reconnects")
    result = c1.fetch()
    assert_equal(topology_3x5.total_partitions, len(result))
    for msg_set in result:
        assert_equal(msg_set.messages, ["Rusty"])

    # Now send another round of messages to our broker partitions
    send_to_all_partitions("topic_3x5_reconnects", ["Jack"])
    time.sleep(MESSAGE_DELAY_SECS)

    # Disable rebalancing to force the consumer to read from the broker we're 
    # going to kill, and then kill it.
    c1.disable_rebalance()
    fail_server = RunConfig.kafka_servers[0]
    fail_server.stop()
    time.sleep(MESSAGE_DELAY_SECS)

    # A straight fetch will give us a connection failure because it couldn't
    # reach the first broker. It won't increment any of the other partitions --
    # the whole thing should fail without any side effect.
    assert_raises(ConnectionFailure, c1.fetch)

    # But a fetch told to ignore failures will return the results from the 
    # brokers that are still up
    result = c1.fetch(ignore_failures=True)
    assert_equal(topology_3x5.total_partitions - topology_3x5.partitions_per_broker,
                 len(result))
    for msg_set in result:
        assert_equal(msg_set.messages, ["Jack"])

    # Now we restart the failed Kafka broker, and do another fetch...
    fail_server.start()
    time.sleep(MESSAGE_DELAY_SECS)

    result = c1.fetch()
    # This should have MessageSets from all brokers (they're all reachable)
    assert_equal(topology_3x5.total_partitions, len(result))
    # But the only MessageSets that have messages in them should be from our
    # fail_server (the others were already read in a previous fetch, so will be
    # empty on this fetch).
    assert_equal(topology_3x5.total_partitions - topology_3x5.partitions_per_broker,
                 len([msg_set for msg_set in result if not msg_set]))
    # The messages from our resurrected fail_server will be "Jack"s
    assert_equal(topology_3x5.partitions_per_broker,
                 len([msg_set for msg_set in result
                      if msg_set.messages == ["Jack"]]))
示例#8
0
def test_3x5_reconnects():
    """Test that we keep trying to read, even if our brokers go down.

    We're going to:

    1. Send messages to all partitions in a topic, across all brokers
    2. Do a fetch (this will cause the Consumer to rebalance itself and find
       everything).
    3. Set the Consumer to disable rebalancing.
    4. Shut down one of the brokers
    5. Assert that nothing blows up
    6. Restart the broker and assert that it continues to run.

    Note that the partition split is always based on what's in ZooKeeper. So 
    even if the broker is dead or unreachable, we still keep its partitions and 
    try to contact it. Maybe there's a firewall issue preventing our server from
    hitting it. We don't want to risk messing up other consumers by grabbing
    partitions that might belong to them.
    """
    send_to_all_partitions(5, "topic_3x5_reconnects", ["Rusty"])
    delay()

    c1 = ZKConsumer(ZK_CONNECT_STR, "group_3x5_reconnects",
                    "topic_3x5_reconnects")
    result = c1.fetch()
    assert_equal(topology_3x5.total_partitions, len(result))
    for msg_set in result:
        assert_equal(msg_set.messages, ["Rusty"])

    # Now send another round of messages to our broker partitions
    send_to_all_partitions(5, "topic_3x5_reconnects", ["Jack"])
    delay()

    # Disable rebalancing to force the consumer to read from the broker we're
    # going to kill, and then kill it.
    c1.disable_rebalance()
    fail_server = RunConfig.kafka_servers[0]
    fail_server.stop()
    delay()

    # A straight fetch will give us a connection failure because it couldn't
    # reach the first broker. It won't increment any of the other partitions --
    # the whole thing should fail without any side effect.
    assert_raises(ConnectionFailure, c1.fetch)

    # But a fetch told to ignore failures will return the results from the
    # brokers that are still up
    result = c1.fetch(ignore_failures=True)
    assert_equal(
        topology_3x5.total_partitions - topology_3x5.partitions_per_broker,
        len(result))
    for msg_set in result:
        assert_equal(msg_set.messages, ["Jack"])

    # Now we restart the failed Kafka broker, and do another fetch...
    fail_server.start()
    delay()

    result = c1.fetch()
    # This should have MessageSets from all brokers (they're all reachable)
    assert_equal(topology_3x5.total_partitions, len(result))
    # But the only MessageSets that have messages in them should be from our
    # fail_server (the others were already read in a previous fetch, so will be
    # empty on this fetch).
    assert_equal(
        topology_3x5.total_partitions - topology_3x5.partitions_per_broker,
        len([msg_set for msg_set in result if not msg_set]))
    # The messages from our resurrected fail_server will be "Jack"s
    assert_equal(
        topology_3x5.partitions_per_broker,
        len([msg_set for msg_set in result if msg_set.messages == ["Jack"]]))