Esempio n. 1
0
def some_checkpoint(random: SimRandom, view_no: int,
                    pp_seq_no: int) -> Checkpoint:
    return Checkpoint(instId=0,
                      viewNo=view_no,
                      seqNoStart=pp_seq_no,
                      seqNoEnd=pp_seq_no,
                      digest=base58.b58encode(random.string(32)).decode())
Esempio n. 2
0
def some_checkpoint(random: SimRandom, view_no: int,
                    pp_seq_no: int) -> Checkpoint:
    return Checkpoint(instId=0,
                      viewNo=view_no,
                      seqNoStart=pp_seq_no,
                      seqNoEnd=pp_seq_no,
                      digest=random.string(40))
Esempio n. 3
0
 def generate_some(rnd: SimRandom):
     values = [rnd.integer(-37, 7342) for _ in range(1000)]
     strings = [rnd.string(10) for _ in range(10)]
     choice = rnd.choice(*values)
     sample = rnd.sample(values, 10)
     shuffled = rnd.shuffle(values)
     return values, strings, choice, sample, shuffled
Esempio n. 4
0
 def generate_some(rnd: SimRandom):
     integers = [rnd.integer(-37, 7342) for _ in range(1000)]
     floats = [rnd.float(0.1, 5.5) for _ in range(1000)]
     strings = [rnd.string(10) for _ in range(10)]
     choice = rnd.choice(*integers)
     sample = rnd.sample(integers, 10)
     shuffled = rnd.shuffle(integers)
     return integers, floats, strings, choice, sample, shuffled
Esempio n. 5
0
def some_pool(random: SimRandom) -> (SimPool, List):
    pool_size = random.integer(4, 8)
    pool = SimPool(pool_size, random)

    # Create simulated history
    # TODO: Move into helper?
    faulty = (pool_size - 1) // 3
    seq_no_per_cp = 10
    max_batches = 50
    batches = [BatchID(0, n, random.string(40)) for n in range(1, max_batches)]
    checkpoints = [
        some_checkpoint(random, 0, n)
        for n in range(0, max_batches, seq_no_per_cp)
    ]

    # Preprepares
    pp_count = [random.integer(0, len(batches)) for _ in range(pool_size)]
    max_pp = sorted(pp_count)[faulty]
    # Prepares
    p_count = [random.integer(0, min(max_pp, pp)) for pp in pp_count]
    max_p = sorted(p_count)[faulty]
    # Checkpoints
    cp_count = [
        1 + random.integer(0, min(max_p, p)) // seq_no_per_cp for p in pp_count
    ]
    max_stable_cp_indx = sorted(cp_count)[faulty] - 1
    stable_cp = [
        checkpoints[random.integer(0, min(max_stable_cp_indx, cp))].seqNoEnd
        for cp in cp_count
    ]

    # Initialize consensus data
    for i, node in enumerate(pool.nodes):
        node._data.preprepared = batches[:pp_count[i]]
        node._data.prepared = batches[:p_count[i]]
        node._data.checkpoints = checkpoints[:cp_count[i]]
        node._data.stable_checkpoint = stable_cp[i]

    committed = []
    for i in range(1, max_batches):
        prepare_count = sum(1 for node in pool.nodes
                            if i <= len(node._data.prepared))
        has_prepared_cert = prepare_count >= pool_size - faulty
        if has_prepared_cert:
            committed.append(batches[i - 1])

    return pool, committed
Esempio n. 6
0
def some_pool(random: SimRandom) -> (SimPool, List):
    pool_size = random.integer(4, 8)
    pool = SimPool(pool_size, random)
    view_no = pool._initial_view_no
    log_size = pool.nodes[0].config.LOG_SIZE

    # Create simulated history
    # TODO: Move into helper?
    faulty = (pool_size - 1) // 3
    seq_no_per_cp = 10
    max_batches = 50
    batches = [
        BatchID(view_no, view_no, n, random.string(40))
        for n in range(1, max_batches)
    ]
    checkpoints = [
        some_checkpoint(random, view_no, n)
        for n in range(0, max_batches, seq_no_per_cp)
    ]

    # Preprepares
    pp_count = [random.integer(0, len(batches)) for _ in range(pool_size)]
    max_pp = sorted(pp_count)[faulty]
    # Prepares
    p_count = [random.integer(0, min(max_pp, pp)) for pp in pp_count]
    max_p = sorted(p_count)[faulty]
    # Checkpoints
    cp_count = [
        1 + random.integer(0, min(max_p, p)) // seq_no_per_cp for p in pp_count
    ]
    max_stable_cp_indx = sorted(cp_count)[faulty]
    stable_cp = [
        checkpoints[random.integer(0,
                                   min(max_stable_cp_indx, cp) - 1)].seqNoEnd
        for cp in cp_count
    ]

    # Initialize consensus data
    for i, node in enumerate(pool.nodes):
        high_watermark = stable_cp[i] + log_size
        node._data.preprepared = batches[:min(high_watermark, pp_count[i])]
        node._data.prepared = batches[:min(high_watermark, p_count[i])]
        node._data.checkpoints.clear()
        node._data.checkpoints.update(checkpoints[:cp_count[i]])
        node._data.stable_checkpoint = stable_cp[i]

    # Mock Ordering service to update preprepares for new view
    for node in pool.nodes:

        def update_shared_data(node, msg: NewViewCheckpointsApplied):
            x = [
                BatchID(view_no=msg.view_no,
                        pp_view_no=batch_id.pp_view_no,
                        pp_seq_no=batch_id.pp_seq_no,
                        pp_digest=batch_id.pp_digest)
                for batch_id in msg.batches
            ]
            node._orderer._data.preprepared = x

        node._orderer._subscription.subscribe(
            node._orderer._stasher, NewViewCheckpointsApplied,
            partial(update_shared_data, node))

    committed = []
    for i in range(1, max_batches):
        prepare_count = sum(1 for node in pool.nodes
                            if i <= len(node._data.prepared))
        has_prepared_cert = prepare_count >= pool_size - faulty
        if has_prepared_cert:
            batch_id = batches[i - 1]
            committed.append(
                BatchID(batch_id.view_no + 1, batch_id.pp_view_no,
                        batch_id.pp_seq_no, batch_id.pp_digest))

    return pool, committed
Esempio n. 7
0
def some_random_preprepare(random: SimRandom, view_no: int,
                           pp_seq_no: int) -> PrePrepare:
    return some_preprepare(view_no, pp_seq_no, random.string(40))