def _modexp(data): base_length, exponent_length, modulus_length = _extract_lengths(data) if base_length == 0: return 0 elif modulus_length == 0: return 0 # compute start:end indexes base_end_idx = 96 + base_length exponent_end_idx = base_end_idx + exponent_length modulus_end_dx = exponent_end_idx + modulus_length # extract arguments modulus_bytes = zpad_right( data[exponent_end_idx:modulus_end_dx], to_size=modulus_length, ) modulus = big_endian_to_int(modulus_bytes) if modulus == 0: return 0 base_bytes = zpad_right(data[96:base_end_idx], to_size=base_length) base = big_endian_to_int(base_bytes) exponent_bytes = zpad_right( data[base_end_idx:exponent_end_idx], to_size=exponent_length, ) exponent = big_endian_to_int(exponent_bytes) print('base', base, 'exponent', exponent, 'modulus', modulus) result = pow(base, exponent, modulus) return result
def random_collation(shard_id, period): body = zpad_right(int_to_big_endian(random.getrandbits(8 * 32)), COLLATION_SIZE) header = CollationHeader( shard_id=shard_id, period=period, chunk_root=calc_chunk_root(body), proposer_address=b"\xff" * 20, ) return Collation(header, body)
def generate_collations(): explicit_params = {} for period in itertools.count(): default_params = { "shard_id": 0, "period": period, "body": zpad_right(b"body%d" % period, COLLATION_SIZE), "proposer_address": zpad_right(b"proposer%d" % period, 20), } # only calculate chunk root if it wouldn't be replaced anyway if "chunk_root" not in explicit_params: default_params["chunk_root"] = calc_chunk_root(default_params["body"]) params = merge(default_params, explicit_params) header = CollationHeader( shard_id=params["shard_id"], chunk_root=params["chunk_root"], period=params["period"], proposer_address=params["proposer_address"], ) collation = Collation(header, params["body"]) explicit_params = (yield collation) or {}
def _compute_modexp_gas_fee(data): base_length, exponent_length, modulus_length = _extract_lengths(data) first_32_exponent_bytes = zpad_right( data[96 + base_length:96 + base_length + exponent_length], to_size=min(exponent_length, 32), )[:32] adjusted_exponent_length = _compute_adjusted_exponent_length( exponent_length, first_32_exponent_bytes, ) complexity = _compute_complexity(max(modulus_length, base_length)) gas_fee = (complexity * max(adjusted_exponent_length, 1) // constants.GAS_MOD_EXP_QUADRATIC_DENOMINATOR) return gas_fee
async def propose(self) -> Collation: """Broadcast a new collation to the network, add it to the local shard, and return it.""" # create collation for current period period = self.get_current_period() body = zpad_right(str(self).encode("utf-8"), COLLATION_SIZE) header = CollationHeader(self.shard.shard_id, calc_chunk_root(body), period, b"\x11" * 20) collation = Collation(header, body) self.logger.debug("Proposing collation {}".format(collation)) # add collation to local chain self.shard.add_collation(collation) # broadcast collation async for peer in self.peer_pool: cast(ShardingProtocol, peer.sub_proto).send_new_collation_hashes( [(collation.hash, collation.period)] ) return collation
def test_blob_iteration(blobs, unpadded_body): body = zpad_right(unpadded_body, COLLATION_SIZE) deserialized_blobs = list(deserialize_blobs(body)) assert deserialized_blobs == blobs
def test_blob_serialization(blobs, unpadded_body): assert serialize_blobs(blobs) == zpad_right(unpadded_body, COLLATION_SIZE)
async def test_collation_requests(request, event_loop): # setup two peers sender, receiver = await get_directly_linked_sharding_peers( request, event_loop) receiver_peer_pool = MockPeerPoolWithConnectedPeers([receiver]) # setup shard db for request receiving node receiver_db = ShardDB(MemoryDB()) receiver_shard = Shard(receiver_db, 0) # create three collations and add two to the shard of the receiver # body is shared to avoid unnecessary chunk root calculation body = zpad_right(b"body", COLLATION_SIZE) chunk_root = calc_chunk_root(body) c1 = Collation( CollationHeader(0, chunk_root, 0, zpad_right(b"proposer1", 20)), body) c2 = Collation( CollationHeader(0, chunk_root, 1, zpad_right(b"proposer2", 20)), body) c3 = Collation( CollationHeader(0, chunk_root, 2, zpad_right(b"proposer3", 20)), body) for collation in [c1, c2]: receiver_shard.add_collation(collation) # start shard syncer receiver_syncer = ShardSyncer(receiver_shard, receiver_peer_pool) asyncio.ensure_future(receiver_syncer.run()) def finalizer(): event_loop.run_until_complete(receiver_syncer.cancel()) request.addfinalizer(finalizer) cancel_token = CancelToken("test") # request single collation received_collations = await asyncio.wait_for( sender.get_collations([c1.hash], cancel_token), timeout=1, ) assert received_collations == set([c1]) # request multiple collations received_collations = await asyncio.wait_for( sender.get_collations([c1.hash, c2.hash], cancel_token), timeout=1, ) assert received_collations == set([c1, c2]) # request no collations received_collations = await asyncio.wait_for( sender.get_collations([], cancel_token), timeout=1, ) assert received_collations == set() # request unknown collation received_collations = await asyncio.wait_for( sender.get_collations([c3.hash], cancel_token), timeout=1, ) assert received_collations == set() # request multiple collations, including unknown one received_collations = await asyncio.wait_for( sender.get_collations([c1.hash, c2.hash, c3.hash], cancel_token), timeout=1, ) assert received_collations == set([c1, c2])
def calc_chunk_root(collation_body: bytes) -> Hash32: check_body_size(collation_body) chunks = list(iterate_chunks(collation_body)) return calc_merkle_root(chunks) def check_body_size(body): if len(body) != COLLATION_SIZE: raise ValidationError("{} byte collation body exceeds maximum allowed size".format( len(body) )) return body @apply_to_return_value(check_body_size) @apply_to_return_value(lambda v: zpad_right(v, COLLATION_SIZE)) @apply_to_return_value(b"".join) def serialize_blobs(blobs: Iterable[bytes]) -> Iterator[bytes]: """Serialize a sequence of blobs and return a collation body.""" for i, blob in enumerate(blobs): if len(blob) == 0: raise ValidationError("Cannot serialize blob {} of length 0".format(i)) if len(blob) > MAX_BLOB_SIZE: raise ValidationError("Cannot serialize blob {} of size {}".format(i, len(blob))) for blob_index in range(0, len(blob), CHUNK_DATA_SIZE): remaining_blob_bytes = len(blob) - blob_index if remaining_blob_bytes <= CHUNK_DATA_SIZE: length_bits = remaining_blob_bytes else: