def test_master_selection(kafka_servers: KafkaServers, strategy: str) -> None:
    # Use random port to allow for parallel runs.
    port1 = get_random_port(port_range=TESTS_PORT_RANGE, blacklist=[])
    port2 = get_random_port(port_range=TESTS_PORT_RANGE, blacklist=[port1])
    port_aa, port_bb = sorted((port1, port2))
    client_id_aa = new_random_name("master_selection_aa_")
    client_id_bb = new_random_name("master_selection_bb_")
    group_id = new_random_name("group_id")

    config_aa = set_config_defaults({
        "advertised_hostname": "127.0.0.1",
        "bootstrap_uri": kafka_servers.bootstrap_servers,
        "client_id": client_id_aa,
        "group_id": group_id,
        "port": port_aa,
        "master_election_strategy": strategy,
    })
    config_bb = set_config_defaults({
        "advertised_hostname": "127.0.0.1",
        "bootstrap_uri": kafka_servers.bootstrap_servers,
        "client_id": client_id_bb,
        "group_id": group_id,
        "port": port_bb,
        "master_election_strategy": strategy,
    })

    with closing(init_admin(config_aa)) as mc_aa, closing(init_admin(config_bb)) as mc_bb:
        if strategy == "lowest":
            master = mc_aa
            slave = mc_bb
        else:
            master = mc_bb
            slave = mc_aa

        # Wait for the election to happen
        while not is_master(master):
            time.sleep(0.3)

        while not has_master(slave):
            time.sleep(0.3)

        # Make sure the end configuration is as expected
        master_url = f'http://{master.config["host"]}:{master.config["port"]}'
        assert master.sc.election_strategy == strategy
        assert slave.sc.election_strategy == strategy
        assert master.sc.master_url == master_url
        assert slave.sc.master_url == master_url
예제 #2
0
def fixture_registry_async_pair(tmp_path: Path, kafka_servers: KafkaServers):
    master_config_path = tmp_path / "karapace_config_master.json"
    slave_config_path = tmp_path / "karapace_config_slave.json"
    master_port = get_random_port(port_range=REGISTRY_PORT_RANGE, blacklist=[])
    slave_port = get_random_port(port_range=REGISTRY_PORT_RANGE,
                                 blacklist=[master_port])
    topic_name = new_random_name("schema_pairs")
    group_id = new_random_name("schema_pairs")
    write_config(
        master_config_path, {
            "bootstrap_uri": kafka_servers.bootstrap_servers,
            "topic_name": topic_name,
            "group_id": group_id,
            "advertised_hostname": "127.0.0.1",
            "karapace_registry": True,
            "port": master_port,
        })
    write_config(
        slave_config_path, {
            "bootstrap_uri": kafka_servers.bootstrap_servers,
            "topic_name": topic_name,
            "group_id": group_id,
            "advertised_hostname": "127.0.0.1",
            "karapace_registry": True,
            "port": slave_port,
        })
    master_process = Popen(
        ["python", "-m", "karapace.karapace_all",
         str(master_config_path)])
    slave_process = Popen(
        ["python", "-m", "karapace.karapace_all",
         str(slave_config_path)])
    try:
        wait_for_port(master_port)
        wait_for_port(slave_port)
        yield f"http://127.0.0.1:{master_port}", f"http://127.0.0.1:{slave_port}"
    finally:
        master_process.kill()
        slave_process.kill()
def test_no_eligible_master(kafka_servers: KafkaServers) -> None:
    client_id = new_random_name("master_selection_")
    group_id = new_random_name("group_id")

    config_aa = set_config_defaults({
        "advertised_hostname": "127.0.0.1",
        "bootstrap_uri": kafka_servers.bootstrap_servers,
        "client_id": client_id,
        "group_id": group_id,
        "port": get_random_port(port_range=TESTS_PORT_RANGE, blacklist=[]),
        "master_eligibility": False,
    })

    with closing(init_admin(config_aa)) as mc:
        # Wait for the election to happen, ie. flag is not None
        while not mc.sc or mc.sc.are_we_master is None:
            time.sleep(0.3)

        # Make sure the end configuration is as expected
        assert mc.sc.are_we_master is False
        assert mc.sc.master_url is None
예제 #4
0
def configure_and_start_zk(zk_dir: Path) -> Tuple[ZKConfig, Popen]:
    cfg_path = zk_dir / "zoo.cfg"
    logs_dir = zk_dir / "logs"
    logs_dir.mkdir(parents=True)

    client_port = get_random_port(port_range=ZK_PORT_RANGE, blacklist=[])
    admin_port = get_random_port(port_range=ZK_PORT_RANGE,
                                 blacklist=[client_port])
    config = ZKConfig(
        client_port=client_port,
        admin_port=admin_port,
        path=str(zk_dir),
    )
    zoo_cfg = """
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir={path}
# the port at which the clients will connect
clientPort={client_port}
#clientPortAddress=127.0.0.1
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
# admin server
admin.serverPort={admin_port}
admin.enableServer=false
# Allow reconfig calls to be made to add/remove nodes to the cluster on the fly
reconfigEnabled=true
# Don't require authentication for reconfig
skipACL=yes
""".format(
        client_port=config.client_port,
        admin_port=config.admin_port,
        path=config.path,
    )
    cfg_path.write_text(zoo_cfg)
    env = {
        "CLASSPATH": "/usr/share/java/slf4j/slf4j-simple.jar",
        "ZOO_LOG_DIR": str(logs_dir),
    }
    java_args = get_java_process_configuration(
        java_args=zk_java_args(cfg_path))
    proc = Popen(java_args, env=env)
    return config, proc
예제 #5
0
def configure_and_start_kafka(kafka_dir: Path,
                              zk: ZKConfig) -> Tuple[KafkaConfig, Popen]:
    # setup filesystem
    data_dir = kafka_dir / "data"
    config_dir = kafka_dir / "config"
    config_path = config_dir / "server.properties"
    data_dir.mkdir(parents=True)
    config_dir.mkdir(parents=True)

    plaintext_port = get_random_port(port_range=KAFKA_PORT_RANGE, blacklist=[])

    config = KafkaConfig(
        datadir=str(data_dir),
        kafka_keystore_password="******",
        kafka_port=plaintext_port,
        zookeeper_port=zk.client_port,
    )

    advertised_listeners = ",".join([
        "PLAINTEXT://127.0.0.1:{}".format(plaintext_port),
    ])
    listeners = ",".join([
        "PLAINTEXT://:{}".format(plaintext_port),
    ])

    # Keep in sync with containers/docker-compose.yml
    kafka_config = {
        "broker.id": 1,
        "broker.rack": "local",
        "advertised.listeners": advertised_listeners,
        "auto.create.topics.enable": False,
        "default.replication.factor": 1,
        "delete.topic.enable": "true",
        "inter.broker.listener.name": "PLAINTEXT",
        "inter.broker.protocol.version": KAFKA_CURRENT_VERSION,
        "listeners": listeners,
        "log.cleaner.enable": "true",
        "log.dirs": config.datadir,
        "log.message.format.version": KAFKA_CURRENT_VERSION,
        "log.retention.check.interval.ms": 300000,
        "log.segment.bytes": 200 * 1024 * 1024,  # 200 MiB
        "num.io.threads": 8,
        "num.network.threads": 112,
        "num.partitions": 1,
        "num.replica.fetchers": 4,
        "num.recovery.threads.per.data.dir": 1,
        "offsets.topic.replication.factor": 1,
        "socket.receive.buffer.bytes": 100 * 1024,
        "socket.request.max.bytes": 100 * 1024 * 1024,
        "socket.send.buffer.bytes": 100 * 1024,
        "transaction.state.log.min.isr": 1,
        "transaction.state.log.num.partitions": 16,
        "transaction.state.log.replication.factor": 1,
        "zookeeper.connection.timeout.ms": 6000,
        "zookeeper.connect": f"127.0.0.1:{zk.client_port}",
    }

    with config_path.open("w") as fp:
        for key, value in kafka_config.items():
            fp.write("{}={}\n".format(key, value))

    log4j_properties_path = os.path.join(BASEDIR, "config/log4j.properties")

    kafka_cmd = get_java_process_configuration(java_args=kafka_java_args(
        heap_mb=256,
        logs_dir=str(kafka_dir),
        log4j_properties_path=log4j_properties_path,
        kafka_config_path=str(config_path),
    ), )
    env: Dict[bytes, bytes] = dict()
    proc = Popen(kafka_cmd, env=env)
    return config, proc