Example #1
0
 def generate_some(rnd: SimRandom):
     values = [rnd.integer(-37, 7342) for _ in range(1000)]
     strings = [rnd.string(10) for _ in range(10)]
     choice = rnd.choice(*values)
     sample = rnd.sample(values, 10)
     shuffled = rnd.shuffle(values)
     return values, strings, choice, sample, shuffled
Example #2
0
def some_pool(random: SimRandom) -> (SimPool, List):
    pool_size = random.integer(4, 8)
    pool = SimPool(pool_size, random)

    # Create simulated history
    # TODO: Move into helper?
    faulty = (pool_size - 1) // 3
    seq_no_per_cp = 10
    max_batches = 50
    batches = [
        some_random_preprepare(random, 0, n) for n in range(1, max_batches)
    ]
    checkpoints = [
        some_checkpoint(random, 0, n)
        for n in range(0, max_batches, seq_no_per_cp)
    ]

    # Preprepares
    pp_count = [random.integer(0, len(batches)) for _ in range(pool_size)]
    max_pp = sorted(pp_count)[faulty]
    # Prepares
    p_count = [random.integer(0, min(max_pp, pp)) for pp in pp_count]
    max_p = sorted(p_count)[faulty]
    # Checkpoints
    cp_count = [
        1 + random.integer(0, min(max_p, p)) // seq_no_per_cp for p in pp_count
    ]
    max_stable_cp_indx = sorted(cp_count)[faulty] - 1
    stable_cp = [
        checkpoints[random.integer(0, min(max_stable_cp_indx, cp))].seqNoEnd
        for cp in cp_count
    ]

    # Initialize consensus data
    for i, node in enumerate(pool.nodes):
        node._data.preprepared = batches[:pp_count[i]]
        node._data.prepared = batches[:p_count[i]]
        node._data.checkpoints = checkpoints[:cp_count[i]]
        node._data.stable_checkpoint = stable_cp[i]

    committed = []
    for i in range(1, max_batches):
        prepare_count = sum(1 for node in pool.nodes
                            if i <= len(node._data.prepared))
        has_prepared_cert = prepare_count >= pool_size - faulty
        if has_prepared_cert:
            committed.append(ViewChangeService.batch_id(batches[i - 1]))

    return pool, committed
def check_view_change_completes_under_normal_conditions(random: SimRandom):
    # Create random pool with random initial state
    pool, committed = some_pool(random)

    # Schedule view change at different time on all nodes
    for node in pool.nodes:
        pool.timer.schedule(
            random.integer(0, 10000),
            partial(node._view_changer.process_need_view_change,
                    NeedViewChange()))

    # Make sure all nodes complete view change
    pool.timer.wait_for(lambda: all(not node._data.waiting_for_new_view and
                                    node._data.view_no > 0
                                    for node in pool.nodes))

    # Make sure all nodes end up in same state
    for node_a, node_b in zip(pool.nodes, pool.nodes[1:]):
        assert node_a._data.view_no == node_b._data.view_no
        assert node_a._data.primary_name == node_b._data.primary_name
        assert node_a._data.stable_checkpoint == node_b._data.stable_checkpoint
        assert node_a._data.preprepared == node_b._data.preprepared

    # Make sure that all committed reqs are ordered with the same ppSeqNo in the new view:
    stable_checkpoint = pool.nodes[0]._data.stable_checkpoint
    committed = [c for c in committed if c.pp_seq_no > stable_checkpoint]
    for n in pool.nodes:
        assert committed == n._data.preprepared[:len(committed)]
Example #4
0
def some_checkpoint(random: SimRandom, view_no: int,
                    pp_seq_no: int) -> Checkpoint:
    return Checkpoint(instId=0,
                      viewNo=view_no,
                      seqNoStart=pp_seq_no,
                      seqNoEnd=pp_seq_no,
                      digest=base58.b58encode(random.string(32)).decode())
Example #5
0
def some_checkpoint(random: SimRandom, view_no: int,
                    pp_seq_no: int) -> Checkpoint:
    return Checkpoint(instId=0,
                      viewNo=view_no,
                      seqNoStart=pp_seq_no,
                      seqNoEnd=pp_seq_no,
                      digest=random.string(40))
def check_view_change_completes_under_normal_conditions(
        random: SimRandom, min_latency, max_latency, filtered_msg_types,
        filter_probability):
    # PREPARE

    # 1. Create random pool with random initial state
    pool, committed = some_pool(random)
    N = pool.size
    F = (N - 1) // 3

    # 2. set latency
    pool.network.set_latency(min_latency, max_latency)

    # 3. set filter
    pool.network.set_filter([getNodeName(pool.nodes[-1].name)],
                            filtered_msg_types, filter_probability)

    # EXECUTE

    # Schedule view change at different time on all nodes
    for node in pool.nodes:
        pool.timer.schedule(
            random.integer(0, 10000),
            partial(node._view_changer.process_need_view_change,
                    NeedViewChange()))

    # CHECK

    # 1. Make sure all nodes complete view change
    pool.timer.wait_for(lambda: all(not node._data.waiting_for_new_view and
                                    node._data.view_no > 0
                                    for node in pool.nodes))

    # 2. check that equal stable checkpoint is set on at least N-F nodes (F nodes may lag behind and will catchup)
    stable_checkpoints = [n._data.stable_checkpoint for n in pool.nodes]
    most_freq_stable_ckeckpoint = Counter(stable_checkpoints).most_common(1)
    stable_checkpoint = most_freq_stable_ckeckpoint[0][0]
    assert most_freq_stable_ckeckpoint[0][1] >= N - F

    # 3. check that equal preprepares is set on all node with the found stable checkpoint
    preprepares = set()
    for n in pool.nodes:
        if n._data.stable_checkpoint >= stable_checkpoint:
            preprepares.add(tuple(n._data.preprepared))
    assert len(preprepares) == 1

    # 4. Make sure all nodes end up in same view
    for node_a, node_b in zip(pool.nodes, pool.nodes[1:]):
        assert node_a._data.view_no == node_b._data.view_no
        assert node_a._data.primary_name == node_b._data.primary_name

    # 5. Make sure that all committed reqs are ordered with the same ppSeqNo in the new view:
    committed_above_cp = [
        c for c in committed if c.pp_seq_no > stable_checkpoint
    ]
    for n in pool.nodes:
        if n._data.stable_checkpoint >= stable_checkpoint:
            assert committed_above_cp == n._data.preprepared[:len(
                committed_above_cp)]
Example #7
0
 def generate_some(rnd: SimRandom):
     integers = [rnd.integer(-37, 7342) for _ in range(1000)]
     floats = [rnd.float(0.1, 5.5) for _ in range(1000)]
     strings = [rnd.string(10) for _ in range(10)]
     choice = rnd.choice(*integers)
     sample = rnd.sample(integers, 10)
     shuffled = rnd.shuffle(integers)
     return integers, floats, strings, choice, sample, shuffled
Example #8
0
def check_view_change_completes_under_normal_conditions(random: SimRandom):
    pool_size = random.integer(4, 8)
    pool = SimPool(pool_size, random)

    view_nos = {node._data.view_no for node in pool.nodes}
    assert len(view_nos) == 1
    initial_view_no = view_nos.pop()

    # Schedule view change at different time on all nodes
    for node in pool.nodes:
        pool.timer.schedule(random.integer(0, 10000),
                            node._view_changer.start_view_change)

    # Make sure all nodes complete view change
    pool.timer.wait_for(lambda: all(not node._data.waiting_for_new_view and
                                    node._data.view_no > initial_view_no
                                    for node in pool.nodes))

    # Make sure all nodes end up in same state
    for node_a, node_b in zip(pool.nodes, pool.nodes[1:]):
        assert node_a._data.view_no == node_b._data.view_no
        assert node_a._data.primary_name == node_b._data.primary_name
        assert node_a._data.preprepared == node_b._data.preprepared
def check_view_change_completes_under_normal_conditions(random: SimRandom):
    # Create random pool with random initial state
    pool = some_pool(random)

    # Schedule view change at different time on all nodes
    for node in pool.nodes:
        pool.timer.schedule(random.integer(0, 10000),
                            node._view_changer.start_view_change)

    # Make sure all nodes complete view change
    pool.timer.wait_for(lambda: all(not node._data.waiting_for_new_view and
                                    node._data.view_no > 0
                                    for node in pool.nodes))

    # Make sure all nodes end up in same state
    for node_a, node_b in zip(pool.nodes, pool.nodes[1:]):
        assert node_a._data.view_no == node_b._data.view_no
        assert node_a._data.primary_name == node_b._data.primary_name
        assert node_a._data.stable_checkpoint == node_b._data.stable_checkpoint
        assert node_a._data.preprepared == node_b._data.preprepared
Example #10
0
def some_pool(random: SimRandom) -> (SimPool, List):
    pool_size = random.integer(4, 8)
    pool = SimPool(pool_size, random)
    view_no = pool._initial_view_no
    log_size = pool.nodes[0].config.LOG_SIZE

    # Create simulated history
    # TODO: Move into helper?
    faulty = (pool_size - 1) // 3
    seq_no_per_cp = 10
    max_batches = 50
    batches = [
        BatchID(view_no, view_no, n, random.string(40))
        for n in range(1, max_batches)
    ]
    checkpoints = [
        some_checkpoint(random, view_no, n)
        for n in range(0, max_batches, seq_no_per_cp)
    ]

    # Preprepares
    pp_count = [random.integer(0, len(batches)) for _ in range(pool_size)]
    max_pp = sorted(pp_count)[faulty]
    # Prepares
    p_count = [random.integer(0, min(max_pp, pp)) for pp in pp_count]
    max_p = sorted(p_count)[faulty]
    # Checkpoints
    cp_count = [
        1 + random.integer(0, min(max_p, p)) // seq_no_per_cp for p in pp_count
    ]
    max_stable_cp_indx = sorted(cp_count)[faulty]
    stable_cp = [
        checkpoints[random.integer(0,
                                   min(max_stable_cp_indx, cp) - 1)].seqNoEnd
        for cp in cp_count
    ]

    # Initialize consensus data
    for i, node in enumerate(pool.nodes):
        high_watermark = stable_cp[i] + log_size
        node._data.preprepared = batches[:min(high_watermark, pp_count[i])]
        node._data.prepared = batches[:min(high_watermark, p_count[i])]
        node._data.checkpoints.clear()
        node._data.checkpoints.update(checkpoints[:cp_count[i]])
        node._data.stable_checkpoint = stable_cp[i]

    # Mock Ordering service to update preprepares for new view
    for node in pool.nodes:

        def update_shared_data(node, msg: NewViewCheckpointsApplied):
            x = [
                BatchID(view_no=msg.view_no,
                        pp_view_no=batch_id.pp_view_no,
                        pp_seq_no=batch_id.pp_seq_no,
                        pp_digest=batch_id.pp_digest)
                for batch_id in msg.batches
            ]
            node._orderer._data.preprepared = x

        node._orderer._subscription.subscribe(
            node._orderer._stasher, NewViewCheckpointsApplied,
            partial(update_shared_data, node))

    committed = []
    for i in range(1, max_batches):
        prepare_count = sum(1 for node in pool.nodes
                            if i <= len(node._data.prepared))
        has_prepared_cert = prepare_count >= pool_size - faulty
        if has_prepared_cert:
            batch_id = batches[i - 1]
            committed.append(
                BatchID(batch_id.view_no + 1, batch_id.pp_view_no,
                        batch_id.pp_seq_no, batch_id.pp_digest))

    return pool, committed
Example #11
0
def some_random_preprepare(random: SimRandom, view_no: int,
                           pp_seq_no: int) -> PrePrepare:
    return some_preprepare(view_no, pp_seq_no, random.string(40))