def test_check_mode():
    """
    Check if can check mode do nothing
    """
    # Given
    topic_name = get_topic_name()
    ensure_kafka_topic(
        localhost,
        topic_defaut_configuration,
        topic_name
    )
    time.sleep(0.5)
    # When
    test_topic_configuration = topic_defaut_configuration.copy()
    test_topic_configuration.update({
        'state': 'absent'
    })
    ensure_kafka_topic_with_zk(
        localhost,
        test_topic_configuration,
        topic_name,
        check=True
    )
    time.sleep(0.5)
    test_topic_configuration.update({
        'state': 'present',
        'partitions': topic_defaut_configuration['partitions'] + 1,
        'replica_factor': topic_defaut_configuration['replica_factor'] + 1,
        'options': {
            'retention.ms': 1000
        }
    })
    ensure_kafka_topic_with_zk(
        localhost,
        test_topic_configuration,
        topic_name,
        check=True
    )
    time.sleep(0.5)
    new_topic_name = get_topic_name()
    ensure_kafka_topic_with_zk(
        localhost,
        test_topic_configuration,
        new_topic_name,
        check=True
    )
    time.sleep(0.5)
    # Then
    expected_topic_configuration = topic_defaut_configuration.copy()
    for host, host_vars in kafka_hosts.items():
        kfk_addr = "%s:9092" % \
            host_vars['ansible_eth0']['ipv4']['address']['__ansible_unsafe']
        check_configured_topic(host, expected_topic_configuration,
                               topic_name, kfk_addr)
        test_topic_configuration.update({
            'state': 'absent'
        })
        check_configured_topic(host, test_topic_configuration,
                               new_topic_name, kfk_addr)
Esempio n. 2
0
def test_kafka_info_topic(host):
    """
    Check if can get info on topic
    """
    # Given
    topic_name = get_topic_name()
    ensure_topic(
        host,
        topic_defaut_configuration,
        topic_name
    )
    time.sleep(0.3)

    topic_test_name = get_topic_name()
    topic_test_configuration = topic_defaut_configuration.copy()
    topic_test_configuration.update({
        'options': {
            'min.insync.replicas': 2
        }
    })
    ensure_topic(
        host,
        topic_test_configuration,
        topic_test_name
    )
    time.sleep(0.3)
    produce_and_consume_topic(topic_name, 10, get_consumer_group())
    time.sleep(0.3)
    # When
    results = call_kafka_info(
        host,
        {
            'resource': 'topic'
        }
    )
    # Then
    for r in results:
        assert topic_name in r['ansible_module_results']
        assert '__consumer_offsets' not in r['ansible_module_results']
        for name, topic_info in r['ansible_module_results'].items():
            for partition, partition_info in topic_info.items():
                assert 'earliest_offset' in partition_info
                assert 'latest_offset' in partition_info
                assert partition_info['earliest_offset'] >= 0
                assert (partition_info['latest_offset'] == 10
                        if name == topic_name
                        else partition_info['latest_offset'] >= 0)
                if name == topic_name:
                    assert partition_info['at_min_isr']
                    assert not partition_info['under_replicated']
                    assert not partition_info['under_min_isr']
                    assert not partition_info['unavailable_partition']
                elif name == topic_test_name:
                    assert not partition_info['at_min_isr']
                    assert not partition_info['under_replicated']
                    assert partition_info['under_min_isr']
                    assert not partition_info['unavailable_partition']
def test_update_replica_factor():
    """
    Check if can update replication factor
    """
    # Given
    topic_name = get_topic_name()
    ensure_kafka_topic(
        localhost,
        topic_defaut_configuration,
        topic_name
    )
    time.sleep(0.5)
    # When
    test_topic_configuration = topic_defaut_configuration.copy()
    test_topic_configuration.update({
        'replica_factor': 2
    })
    ensure_idempotency(
        ensure_kafka_topic_with_zk,
        localhost,
        test_topic_configuration,
        topic_name
    )
    time.sleep(0.5)
    # Then
    for host, host_vars in kafka_hosts.items():
        kfk_addr = "%s:9092" % \
            host_vars['ansible_eth0']['ipv4']['address']['__ansible_unsafe']
        check_configured_topic(host, test_topic_configuration,
                               topic_name, kfk_addr)
def test_delete_topic():
    """
    Check if can delete topic
    """
    # Given
    topic_name = get_topic_name()
    ensure_kafka_topic(
        localhost,
        topic_defaut_configuration,
        topic_name
    )
    time.sleep(0.5)
    # When
    test_topic_configuration = topic_defaut_configuration.copy()
    test_topic_configuration.update({
        'state': 'absent'
    })
    ensure_idempotency(
        ensure_kafka_topic,
        localhost,
        test_topic_configuration,
        topic_name
    )
    time.sleep(0.5)
    # Then
    for host, host_vars in kafka_hosts.items():
        kfk_addr = "%s:9092" % \
            host_vars['ansible_eth0']['ipv4']['address']['__ansible_unsafe']
        check_configured_topic(host, test_topic_configuration,
                               topic_name, kfk_addr)
Esempio n. 5
0
def test_consumer_lag(host):
    """
    Check if can check global consumer lag
    """
    # Given
    topic_name = get_topic_name()
    ensure_topic(
        host,
        topic_defaut_configuration,
        topic_name
    )
    time.sleep(0.3)
    consumer_group = get_consumer_group()
    total_msg = 42
    # When
    produce_and_consume_topic(
        topic_name, total_msg, consumer_group)
    time.sleep(0.3)
    # Then
    lags = call_kafka_stat_lag(host, {
        'consummer_group': consumer_group
    })
    for lag in lags:
        msg = json.loads(lag['msg'])
        global_lag_count = msg['global_lag_count']
        assert global_lag_count == (total_msg - 1)
Esempio n. 6
0
def test_kafka_info_topics_config_not_include_defaults(host):
    """
    Check if can get config on topic.
    """
    # Given
    topic_name = get_topic_name()
    test_topic_configuration = topic_defaut_configuration.copy()
    test_topic_configuration.update({
        'options': {
            'retention.ms': 66574936
        }
    })
    ensure_topic(
        host,
        test_topic_configuration,
        topic_name
    )
    time.sleep(0.3)
    # When
    results = call_kafka_info(
        host,
        {
            'resource': 'topic-config',
            'include_defaults': False
        },
    )
    # Then
    for r in results:
        assert topic_name in r['ansible_module_results']
        for name, topic_config in r['ansible_module_results'].items():
            if name == topic_name:
                assert int(topic_config['retention.ms']) == 66574936
                assert 'min.insync.replicas' not in topic_config
def test_update_partitions_without_zk(host):
    """
    Check if can update partitions numbers without zk (only > 1.0.0)
    """
    # Given
    topic_name = get_topic_name()
    ensure_kafka_topic(host, topic_defaut_configuration, topic_name)
    time.sleep(0.3)
    # When
    test_topic_configuration = topic_defaut_configuration.copy()
    test_topic_configuration.update({'partitions': 2})
    ensure_idempotency(ensure_kafka_topic,
                       host,
                       test_topic_configuration,
                       topic_name,
                       minimal_api_version="1.0.0")
    time.sleep(0.3)
    # Then
    for host, host_vars in kafka_hosts.items():
        if (parse_version(
                host_protocol_version[host_vars['inventory_hostname']]) <
                parse_version("1.0.0")):
            continue
        kfk_addr = "%s:9092" % \
            host_vars['ansible_eth0']['ipv4']['address']['__ansible_unsafe']
        check_configured_topic(host, test_topic_configuration, topic_name,
                               kfk_addr)
Esempio n. 8
0
def test_update_partitions(host):
    """
    Check if can update partitions numbers
    """
    # Given
    topic_name = get_topic_name()
    ensure_topic(
        host,
        topic_defaut_configuration,
        topic_name
    )
    time.sleep(0.3)
    # When
    test_topic_configuration = topic_defaut_configuration.copy()
    test_topic_configuration.update({
        'partitions': 2
    })
    ensure_idempotency(
        ensure_topic,
        host,
        test_topic_configuration,
        topic_name
    )
    time.sleep(0.3)
    # Then
    for kafka_host, host_vars in kafka_hosts.items():
        kfk_addr = "%s:9092" % \
            host_vars['ansible_eth0']['ipv4']['address']['__ansible_unsafe']
        check_configured_topic(kafka_host, test_topic_configuration,
                               topic_name, kfk_addr)
def test_duplicated_topics(host):
    """
    Check if can remove topics options
    """
    # Given
    duplicated_topic_name = get_topic_name()

    def get_topic_config():
        topic_configuration = topic_defaut_configuration.copy()
        topic_configuration.update({
            'name': duplicated_topic_name,
            'options': {
                'retention.ms': 66574936,
                'flush.ms': 564939
            }
        })
        return topic_configuration

    topic_configuration = {'topics': [get_topic_config(), get_topic_config()]}
    # When
    results = ensure_kafka_topics(host, topic_configuration)
    time.sleep(0.3)
    # Then
    for result in results:
        assert not result['changed']
        assert 'duplicated topics' in result['msg']
def test_delete_options(host):
    """
    Check if can remove topic options
    """
    # Given
    init_topic_configuration = topic_defaut_configuration.copy()
    init_topic_configuration.update(
        {'options': {
            'retention.ms': 66574936,
            'flush.ms': 564939
        }})
    topic_name = get_topic_name()
    ensure_kafka_topic(host, init_topic_configuration, topic_name)
    time.sleep(0.3)
    # When
    test_topic_configuration = topic_defaut_configuration.copy()
    test_topic_configuration.update({'options': {'flush.ms': 564939}})
    ensure_idempotency(ensure_kafka_topic, host, test_topic_configuration,
                       topic_name)
    time.sleep(0.3)
    # Then
    deleted_options = {
        'retention.ms': 66574936,
    }
    for host, host_vars in kafka_hosts.items():
        kfk_addr = "%s:9092" % \
            host_vars['ansible_eth0']['ipv4']['address']['__ansible_unsafe']
        check_configured_topic(host,
                               test_topic_configuration,
                               topic_name,
                               kfk_addr,
                               deleted_options=deleted_options)
def test_update_replica_factor_force_reassign(host):
    """
    Check if can update replication factor
    """
    # Given
    topic_name = get_topic_name()
    ensure_kafka_topic(host, topic_defaut_configuration, topic_name)
    time.sleep(0.3)
    # When
    test_topic_configuration = topic_defaut_configuration.copy()
    test_topic_configuration.update({
        'kafka_sleep_time': 10,
        'kafka_max_retries': 10,
        'replica_factor': 2,
        'force_reassign': True,
        'preserve_leader': True
    })
    ensure_kafka_topic_with_zk(host, test_topic_configuration, topic_name)
    time.sleep(0.3)
    test_topic_configuration.update({
        'replica_factor': 2,
        'force_reassign': True,
        'preserve_leader': True
    })
    results = ensure_kafka_topic_with_zk(host, test_topic_configuration,
                                         topic_name)
    time.sleep(0.3)
    # Then
    for result in results:
        assert result['changed'], str(result)
        assert topic_name in result['changes']['topic_updated'], str(result)
Esempio n. 12
0
def test_kafka_info_consumer_group(host):
    """
    Check if can get info on consumer groups
    """
    # Given
    topic_name = get_topic_name()
    ensure_topic(
        host,
        topic_defaut_configuration,
        topic_name
    )
    time.sleep(0.3)
    consumer_group = get_consumer_group()
    total_msg = 42
    # When
    produce_and_consume_topic(
        topic_name, total_msg, consumer_group)
    time.sleep(0.3)
    # Then
    results = call_kafka_info(
        host,
        {
            'resource': 'consumer_group'
        }
    )
    for r in results:
        assert consumer_group in r['ansible_module_results']
 def get_topic_config():
     topic_configuration = topic_defaut_configuration.copy()
     topic_configuration.update({
         'name': get_topic_name(),
         'options': {
             'retention.ms': 66574936,
             'flush.ms': 564939
         }
     })
     return topic_configuration
Esempio n. 14
0
def test_kafka_info_topic_include_internal(host):
    """
    Check if can get info on topic
    """
    # Given
    topic_name = get_topic_name()
    ensure_topic(
        host,
        topic_defaut_configuration,
        topic_name
    )
    time.sleep(0.3)

    topic_test_name = get_topic_name()
    topic_test_configuration = topic_defaut_configuration.copy()
    topic_test_configuration.update({
        'options': {
            'min.insync.replicas': 2
        }
    })
    ensure_topic(
        host,
        topic_test_configuration,
        topic_test_name
    )
    time.sleep(0.3)
    produce_and_consume_topic(topic_name, 10, get_consumer_group())
    time.sleep(0.3)
    # When
    results = call_kafka_info(
        host,
        {
            'resource': 'topic',
            'include_internal': True
        }
    )
    # Then
    for r in results:
        assert topic_name in r['ansible_module_results']
        assert '__consumer_offsets' in r['ansible_module_results']
Esempio n. 15
0
def test_kafka_info_topic(host):
    """
    Check if can get info on topic
    """
    # Given
    topic_name = get_topic_name()
    ensure_topic(host, topic_defaut_configuration, topic_name)
    time.sleep(0.3)
    # When
    results = call_kafka_info(host, {'resource': 'topic'})
    # Then
    for r in results:
        assert topic_name in r['ansible_module_results']
def test_kafka_info_brokers():
    """
    Check if can get info on brokers
    """
    # Given
    topic_name = get_topic_name()
    ensure_topic(localhost, topic_defaut_configuration, topic_name)
    time.sleep(0.5)
    # When
    results = call_kafka_info(localhost, {'resource': 'broker'})
    # Then
    for r in results:
        assert len(r['ansible_module_results']) == 2
def delete_consumer_offset(host, partitions=None):
    """
    Check if can delete consumer group for topic
    """
    # Given
    consumer_group = get_consumer_group()
    topic_name1 = get_topic_name()

    ensure_kafka_topic(host,
                       topic_defaut_configuration,
                       topic_name1,
                       minimal_api_version="2.4.0")
    time.sleep(0.3)

    produce_and_consume_topic(topic_name1, 1, consumer_group, True, "2.4.0")
    time.sleep(0.3)

    # When
    test_cg_configuration = cg_defaut_configuration.copy()

    test_cg_configuration.update({
        'consumer_group': consumer_group,
        'action': 'delete',
        'api_version': '2.4.0'
    })

    if partitions is None:
        test_cg_configuration.update({'topics': [{'name': topic_name1}]})
    else:
        test_cg_configuration.update(
            {'topics': [{
                'name': topic_name1,
                'partitions': partitions
            }]})

    test_cg_configuration.update(sasl_default_configuration)
    ensure_idempotency(ensure_kafka_consumer_group,
                       host,
                       test_cg_configuration,
                       minimal_api_version="2.4.0")
    time.sleep(0.3)
    # Then
    for host, host_vars in kafka_hosts.items():
        if (parse_version(
                host_protocol_version[host_vars['inventory_hostname']]) <
                parse_version("2.4.0")):
            continue

        kfk_addr = "%s:9092" % \
            host_vars['ansible_eth0']['ipv4']['address']['__ansible_unsafe']
        check_unconsumed_topic(consumer_group, topic_name1, kfk_addr)
def test_consumer_lag():
    """
    Check if can check global consumer lag
    """
    # Given
    topic_name = get_topic_name()
    ensure_topic(localhost, topic_defaut_configuration, topic_name)
    time.sleep(0.5)
    consumer_group = get_consumer_group()
    total_msg = 42
    # When
    produce_and_consume_topic(topic_name, total_msg, consumer_group)
    time.sleep(0.5)
    # Then
    for host, host_vars in kafka_hosts.items():
        lag = call_kafka_stat_lag(localhost,
                                  {'consummer_group': consumer_group})
        msg = json.loads(lag[0]['msg'])
        global_lag_count = msg['global_lag_count']
        assert global_lag_count == (total_msg - 1)
def test_update_partitions_and_replica_factor_default_value(host):
    """
    Check if can update partitions numbers
    make sure -1 values are considered, but warning + step is skipped
    """
    # Given
    topic_name = get_topic_name()
    ensure_kafka_topic(host, topic_defaut_configuration, topic_name)
    time.sleep(0.3)
    # When
    test_topic_configuration = topic_defaut_configuration.copy()
    test_topic_configuration.update({'partitions': -1, 'replica_factor': -1})
    ensure_kafka_topic(host, test_topic_configuration, topic_name)
    time.sleep(0.3)
    # Then
    expected_topic_configuration = topic_defaut_configuration.copy()
    expected_topic_configuration.update({'partitions': 1, 'replica_factor': 1})
    for host, host_vars in kafka_hosts.items():
        kfk_addr = "%s:9092" % \
            host_vars['ansible_eth0']['ipv4']['address']['__ansible_unsafe']
        check_configured_topic(host, expected_topic_configuration, topic_name,
                               kfk_addr)
def test_rebalance_all_partitions_preserve_leader(host):
    """
    Check if can reassign all topics-partition with
    leader conservation.
    """
    # Given
    test_topic_name = get_topic_name()
    test_topic_configuration = topic_defaut_configuration.copy()
    test_topic_configuration.update({
        'partitions': 50,
        'replica_factor': 2,
        'preserve_leader': True
    })
    ensure_kafka_topic(host, test_topic_configuration, test_topic_name)
    time.sleep(0.3)
    topics_config = call_kafka_info(
        host, {
            'resource': 'topic-config',
            'include_internal': True,
            'include_defaults': False
        })
    topics = call_kafka_info(host, {
        'resource': 'topic',
        'include_internal': True
    })

    all_topics = topics[0]['ansible_module_results']
    all_topics_config = topics_config[0]['ansible_module_results']
    test_topics = []
    for topic_name, partitions in all_topics.items():
        if topic_name == test_topic_name or topic_name.startswith('__'):
            test_topics.append({
                'name':
                topic_name,
                'partitions':
                len(partitions),
                'replica_factor':
                len(partitions['0']['replicas']),
                'options':
                all_topics_config[topic_name],
                'force_reassign':
                True,
                'preserve_leader':
                True,
                'state':
                'present'
            })
    existing_leaders = []
    for i, broker_topics in enumerate(topics):
        existing_leaders.append({})
        for topic_name, partitions in (
                broker_topics['ansible_module_results'].items()):
            if topic_name == test_topic_name or topic_name.startswith('__'):
                for partition, config in partitions.items():
                    existing_leaders[i][(topic_name, partition)] = \
                        config['leader']
    # When
    test_topics_configuration = {
        'topics': test_topics,
        'kafka_sleep_time': 5,
        'kafka_max_retries': 60
    }
    ensure_kafka_topics(host, test_topics_configuration)
    time.sleep(10)
    # Then
    topics = call_kafka_info(host, {
        'resource': 'topic',
        'include_internal': True
    })
    for i, broker_topics in enumerate(topics):
        resulting_replicas = {}
        for topic_name, partitions in (
                topics[i]['ansible_module_results'].items()):
            if topic_name == test_topic_name or topic_name.startswith('__'):
                for partition, config in partitions.items():
                    resulting_replicas[(topic_name, partition)] = \
                        config['replicas']
        for topic_partition, leader in existing_leaders[i].items():
            assert leader in resulting_replicas[topic_partition]
        total_partitions = 0
        partitions_per_broker = {}
        for topic_name, partitions in (
                topics[i]['ansible_module_results'].items()):
            if topic_name == test_topic_name or topic_name.startswith('__'):
                for partition, config in partitions.items():
                    for replica in config['replicas']:
                        if replica not in partitions_per_broker:
                            partitions_per_broker[replica] = 0
                        partitions_per_broker[replica] += 1
                        total_partitions += 1
        mean_partitions = int(total_partitions * 1.0 /
                              len(partitions_per_broker))
        for node_id, partitions in partitions_per_broker.items():
            assert partitions >= mean_partitions, partitions_per_broker
            assert partitions <= (mean_partitions + 1), partitions_per_broker
Esempio n. 21
0
def test_check_mode(host):
    """
    Check if can check mode do nothing
    """
    # Given
    topic_name = get_topic_name()
    ensure_topic(
        host,
        topic_defaut_configuration,
        topic_name
    )
    time.sleep(0.3)
    test_acl_configuration = acl_defaut_configuration.copy()
    test_acl_configuration.update({
        'name': get_acl_name(),
        'state': 'present',
        **sasl_default_configuration
    })
    ensure_acl(
        host,
        test_acl_configuration
    )
    time.sleep(0.3)
    # When
    test_topic_configuration = topic_defaut_configuration.copy()
    test_topic_configuration.update({
        'state': 'absent'
    })
    ensure_topic(
        host,
        test_topic_configuration,
        topic_name,
        check=True
    )
    time.sleep(0.3)
    test_topic_configuration.update({
        'state': 'present',
        'partitions': topic_defaut_configuration['partitions'] + 1,
        'replica_factor': topic_defaut_configuration['replica_factor'] + 1,
        'options': {
            'retention.ms': 1000
        }
    })
    ensure_topic(
        host,
        test_topic_configuration,
        topic_name,
        check=True
    )
    time.sleep(0.3)
    new_topic_name = get_topic_name()
    ensure_topic(
        host,
        test_topic_configuration,
        new_topic_name,
        check=True
    )
    time.sleep(0.3)
    check_acl_configuration = test_acl_configuration.copy()
    check_acl_configuration.update({
        'state': 'absent'
    })
    ensure_acl(
        host,
        check_acl_configuration,
        check=True
    )
    time.sleep(0.3)
    check_acl_configuration.update({
        'state': 'present',
        'name': get_topic_name()
    })
    ensure_acl(
        host,
        check_acl_configuration,
        check=True
    )
    time.sleep(0.3)
    # Then
    expected_topic_configuration = topic_defaut_configuration.copy()
    for kafka_host, host_vars in kafka_hosts.items():
        kfk_addr = "%s:9092" % \
            host_vars['ansible_eth0']['ipv4']['address']['__ansible_unsafe']
        kfk_sasl_addr = "%s:9094" % \
            host_vars['ansible_eth0']['ipv4']['address']['__ansible_unsafe']
        check_configured_topic(kafka_host, expected_topic_configuration,
                               topic_name, kfk_addr)
        check_configured_acl(kafka_host, test_acl_configuration, kfk_sasl_addr)
        test_topic_configuration.update({
            'state': 'absent'
        })
        check_configured_topic(kafka_host, test_topic_configuration,
                               new_topic_name, kfk_addr)
        check_acl_configuration.update({
            'state': 'absent'
        })
        check_configured_acl(kafka_host, test_acl_configuration, kfk_sasl_addr)