def _modexp(data): base_length, exponent_length, modulus_length = _extract_lengths(data) if base_length == 0: return 0 elif modulus_length == 0: return 0 # compute start:end indexes base_end_idx = 96 + base_length exponent_end_idx = base_end_idx + exponent_length modulus_end_dx = exponent_end_idx + modulus_length # extract arguments modulus_bytes = zpad_right( data[exponent_end_idx:modulus_end_dx], to_size=modulus_length, ) modulus = big_endian_to_int(modulus_bytes) if modulus == 0: return 0 base_bytes = zpad_right(data[96:base_end_idx], to_size=base_length) base = big_endian_to_int(base_bytes) exponent_bytes = zpad_right( data[base_end_idx:exponent_end_idx], to_size=exponent_length, ) exponent = big_endian_to_int(exponent_bytes) print('base', base, 'exponent', exponent, 'modulus', modulus) result = pow(base, exponent, modulus) return result
def _compute_modexp_gas_fee(data): base_length, exponent_length, modulus_length = _extract_lengths(data) first_32_exponent_bytes = zpad_right( data[96 + base_length:96 + base_length + exponent_length], to_size=min(exponent_length, 32), )[:32] adjusted_exponent_length = _compute_adjusted_exponent_length( exponent_length, first_32_exponent_bytes, ) complexity = _compute_complexity(max(modulus_length, base_length)) gas_fee = ( complexity * max(adjusted_exponent_length, 1) // constants.GAS_MOD_EXP_QUADRATIC_DENOMINATOR ) return gas_fee
def test_blob_serialization(blobs, unpadded_body): assert serialize_blobs(blobs) == zpad_right(unpadded_body, COLLATION_SIZE)
def test_blob_iteration(blobs, unpadded_body): body = zpad_right(unpadded_body, COLLATION_SIZE) deserialized_blobs = list(deserialize_blobs(body)) assert deserialized_blobs == blobs
async def test_collation_requests(request, event_loop): # setup two peers sender, receiver = await get_directly_linked_sharding_peers( request, event_loop) receiver_peer_pool = MockPeerPoolWithConnectedPeers([receiver]) # setup shard db for request receiving node receiver_db = ShardDB(MemoryDB()) receiver_shard = Shard(receiver_db, 0) # create three collations and add two to the shard of the receiver # body is shared to avoid unnecessary chunk root calculation body = zpad_right(b"body", COLLATION_SIZE) chunk_root = calc_chunk_root(body) c1 = Collation( CollationHeader(0, chunk_root, 0, zpad_right(b"proposer1", 20)), body) c2 = Collation( CollationHeader(0, chunk_root, 1, zpad_right(b"proposer2", 20)), body) c3 = Collation( CollationHeader(0, chunk_root, 2, zpad_right(b"proposer3", 20)), body) for collation in [c1, c2]: receiver_shard.add_collation(collation) # start shard syncer receiver_syncer = ShardSyncer(receiver_shard, receiver_peer_pool) asyncio.ensure_future(receiver_syncer.run()) def finalizer(): event_loop.run_until_complete(receiver_syncer.cancel()) request.addfinalizer(finalizer) cancel_token = CancelToken("test") # request single collation received_collations = await asyncio.wait_for( sender.get_collations([c1.hash], cancel_token), timeout=1, ) assert received_collations == set([c1]) # request multiple collations received_collations = await asyncio.wait_for( sender.get_collations([c1.hash, c2.hash], cancel_token), timeout=1, ) assert received_collations == set([c1, c2]) # request no collations received_collations = await asyncio.wait_for( sender.get_collations([], cancel_token), timeout=1, ) assert received_collations == set() # request unknown collation received_collations = await asyncio.wait_for( sender.get_collations([c3.hash], cancel_token), timeout=1, ) assert received_collations == set() # request multiple collations, including unknown one received_collations = await asyncio.wait_for( sender.get_collations([c1.hash, c2.hash, c3.hash], cancel_token), timeout=1, ) assert received_collations == set([c1, c2])
def calc_chunk_root(collation_body: bytes) -> Hash32: check_body_size(collation_body) chunks = iterate_chunks(collation_body) return calc_merkle_root(chunks) def check_body_size(body): if len(body) != COLLATION_SIZE: raise ValidationError( "{} byte collation body exceeds maximum allowed size".format( len(body))) return body @apply_to_return_value(check_body_size) @apply_to_return_value(lambda v: zpad_right(v, COLLATION_SIZE)) @apply_to_return_value(b"".join) def serialize_blobs(blobs: Iterable[bytes]) -> Iterator[bytes]: """Serialize a sequence of blobs and return a collation body.""" for i, blob in enumerate(blobs): if len(blob) == 0: raise ValidationError( "Cannot serialize blob {} of length 0".format(i)) if len(blob) > MAX_BLOB_SIZE: raise ValidationError("Cannot serialize blob {} of size {}".format( i, len(blob))) for blob_index in range(0, len(blob), CHUNK_DATA_SIZE): remaining_blob_bytes = len(blob) - blob_index if remaining_blob_bytes <= CHUNK_DATA_SIZE: