def wait_for_bootstrap_completion(context, timeout):
    peers = context.interface.get_peers(context)
    brokers = []
    try:
        with common_util.Timeout(timeout):
            common_util.wait_until_in_log(
                peers,
                "Starting profiling server with listenAddress = 0.0.0.0:6060")

            # Check Kafka logs
            if "kafka0" in context.composition.collectServiceNames():
                kafkas = orderer_util.getKafkaBrokerList(
                    context, "orderer0.example.com")
                # Remove the ports from the list
                for kafka in kafkas:
                    broker = kafka.split(":")
                    brokers.append(broker[0])
                common_util.wait_until_in_log(brokers, " Startup complete. ")
    finally:
        assert common_util.is_in_log(
            peers,
            "Starting profiling server with listenAddress = 0.0.0.0:6060"
        ), "The peer containers are not ready in the allotted time ({} seconds)".format(
            timeout)
        assert common_util.is_in_log(
            brokers, " Startup complete. "
        ), "The kafka containers are not ready in the allotted time ({} seconds)".format(
            timeout)

    # A 5-second additional delay ensures ready state
    time.sleep(5)
Exemple #2
0
def step_impl(context, count):
    brokers = orderer_util.getKafkaBrokerList(context, "orderer0.example.com")
    kafkas = orderer_util.getKafkaIPs(context, brokers)
    _, isr_list = orderer_util.getKafkaTopic(kafkaBrokers=kafkas)
    assert len(
        isr_list
    ) == count, "len of isr_list: {0} does not match expected number of brokers: {1}".format(
        len(isr_list), count)
Exemple #3
0
def stop_non_isr_impl(context, orderer):
    brokers = orderer_util.getKafkaBrokerList(context, orderer)
    kafkas = orderer_util.getKafkaIPs(context, brokers)
    kafka = orderer_util.getNonISRKafkaBroker(kafkaBrokers=kafkas)

    if not hasattr(context, "stopped_non_isr"):
        context.stopped_non_isr = []
    context.stopped_non_isr.append(kafka)
    context.composition.stop([kafka])
def step_impl(context):
    brokers = orderer_util.getKafkaBrokerList(context, "orderer0.example.com")
    kafkas = orderer_util.getKafkaIPs(context, brokers)
    topic, isr_list = orderer_util.getKafkaTopic(kafkaBrokers=kafkas)
    #as long as we have 1 broker in isr_list, check that none from stopped_brokers list exist in isr_list
    if isr_list >= 1:
        for kafka in context.stopped_brokers:
            assert kafka not in isr_list, "stopped broker still exists in isr_set and is not removed"

    #for each broker in isr_list check logs
    for kafka in isr_list:
        assert common_util.is_in_log(kafka, "Shutdown completed (kafka.server.ReplicaFetcherThread)"), "could not verify in the remaining broker logs that prevLeader is down"
Exemple #5
0
def stop_leader_impl(context, orderer):
    brokers = orderer_util.getKafkaBrokerList(context, orderer)
    kafkas = orderer_util.getKafkaIPs(context, brokers)
    leader = orderer_util.getKafkaPartitionLeader(kafkaBrokers=kafkas)
    print(leader)

    # Save stopped broker
    if not hasattr(context, "stopped_brokers"):
        context.stopped_brokers = []
    context.stopped_brokers.append(leader)
    # Now that we know the kafka leader, stop it
    context.composition.stop([leader])
Exemple #6
0
def stop_leader_impl(context, orderer, takeDownType):
    brokers = orderer_util.getKafkaBrokerList(context, orderer)
    kafkas = orderer_util.getKafkaIPs(context, brokers)
    leader = orderer_util.getKafkaPartitionLeader(kafkaBrokers=kafkas)

    # Save stopped broker
    if not hasattr(context, "stopped_brokers"):
        context.stopped_brokers = []
    context.stopped_brokers.append(leader)
    # Now that we know the kafka leader, stop it
    basic_impl.bringdown_impl(context, leader, takeDownType)

    if not hasattr(context, "prevLeader"):
        context.prevLeader = leader