Example #1
0
 def hash(self):
     writer = BinaryWriter(stream=bytearray())
     self.serialize_unsigned(writer)
     hash_data = writer._stream.getvalue()
     hash = hashlib.sha256(hashlib.sha256(hash_data).digest()).digest()
     writer.cleanup()
     return UInt256(data=hash)
Example #2
0
 async def header_hash_by_height(self, height: int) -> 'UInt256':
     # return await self.controller.get_header_hash_by_height(height)
     header_hash = self.ledger.GetHeaderHash(height)
     if header_hash is None:
         data = bytearray(32)
     else:
         data = bytearray(binascii.unhexlify(header_hash))
         data.reverse()
     return UInt256(data=data)
Example #3
0
    def deserialize(self, reader: 'BinaryReader') -> None:
        """ Deserialize object. """
        self.type = InventoryType(reader.read_uint8())
        self.hashes = []
        hash_list_count = reader.read_var_int()

        try:
            for i in range(0, hash_list_count):
                self.hashes.append(UInt256(data=reader.read_bytes(32)))
        except ValueError:
            raise ValueError("Invalid hashes data")
Example #4
0
    async def sync_block(self) -> None:
        # to simplify syncing, don't ask for more data if we still have requests in flight
        if len(self.block_requests) > 0:
            return

        # the block cache might not have been fully processed, so we want to avoid asking for data we actually already have
        best_block_height = await self.get_best_stored_block_height()
        cur_header_height = await self.ledger.cur_header_height()
        blocks_to_fetch = cur_header_height - best_block_height
        if blocks_to_fetch <= 0:
            return

        block_cache_space = self.BLOCK_MAX_CACHE_SIZE - len(self.block_cache)
        if block_cache_space <= 0:
            return

        if blocks_to_fetch > block_cache_space or blocks_to_fetch > self.BLOCK_NETWORK_REQ_LIMIT:
            blocks_to_fetch = min(block_cache_space, self.BLOCK_NETWORK_REQ_LIMIT)

        try:
            best_node_height = max(map(lambda node: node.best_height, self.nodemgr.nodes))
        except ValueError:
            # if the node list is empty max() fails on an empty list
            return

        node = self.nodemgr.get_next_node(best_node_height)
        if not node:
            # no nodes with our desired height. We'll wait for node manager to resolve this
            # or for the nodes to increase their height on the next produced block
            return

        hashes = []
        endheight = None
        for i in range(1, blocks_to_fetch + 1):
            next_block_height = best_block_height + i
            if self.is_in_blockcache(next_block_height):
                continue

            if next_block_height > best_node_height:
                break

            next_header_hash = await self.ledger.header_hash_by_height(next_block_height)
            if next_header_hash == UInt256.zero():
                # we do not have enough headers to fill the block cache. That's fine, just return
                break

            endheight = next_block_height
            hashes.append(next_header_hash)
            self.add_block_flight_info(node.nodeid, next_block_height, next_header_hash)

        if len(hashes) > 0:
            logger.debug(f"Asking for blocks {best_block_height + 1} - {endheight} from {node.nodeid_human}")
            await node.get_data(InventoryType.block, hashes)
            node.nodeweight.append_new_request_time()
Example #5
0
    def __init__(self, prev_hash, merkle_root, timestamp, index, consensus_data, next_consensus, witness):
        version = 0
        temp_merkeroot = UInt256.zero()
        super(Header, self).__init__(version, prev_hash, temp_merkeroot, timestamp, index, consensus_data, next_consensus, witness)

        self.prev_hash = prev_hash
        self.merkle_root = merkle_root
        self.timestamp = timestamp
        self.index = index
        self.consensus_data = consensus_data
        self.next_consensus = next_consensus
        self.witness = bytearray()  # witness
Example #6
0
    def __init__(self, prev_hash, timestamp, index, consensus_data, next_consensus, witness):
        version = 0
        temp_merkleroot = UInt256.zero()
        super(Block, self).__init__(version, prev_hash, temp_merkleroot, timestamp, index, consensus_data, next_consensus, witness)
        self.prev_hash = prev_hash
        self.timestamp = timestamp
        self.index = index
        self.consensus_data = consensus_data
        self.next_consensus = next_consensus
        self.witness = witness
        self.transactions = []  # hardcoded to empty as we will not deserialize these

        # not part of the official Block implementation, just useful info for internal usage
        self._tx_count = 0
        self._size = 0
Example #7
0
    def test_simultaneous_same_header_received(self):
        """
        test ensures that we do not waste computing sources processing the same headers multiple times
        expected result is 1 "processed" event (return value 1) and 4 early exit events (return value -4)
        """
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        self.syncmgr.ledger = Ledger()
        self.syncmgr.nodemgr.add_node_error_count = asynctest.CoroutineMock()

        height = 12357
        node_id = 123

        self.syncmgr.header_request = RequestInfo(height)
        self.syncmgr.header_request.add_new_flight(FlightInfo(node_id, height))

        fake_uint256 = UInt256(data=bytearray(32))
        fake_uint160 = UInt160(data=bytearray(20))
        not_used = object()

        # create 2000 headers that can be persisted
        headers = []
        for i in range(2000):
            headers.append(
                Header(fake_uint256, fake_uint256, 0, height + i, 0,
                       fake_uint160, not_used))

        # create 5 tasks to schedule incoming headers
        tasks = []
        for i in range(5):
            tasks.append(
                loop.create_task(self.syncmgr.on_headers_received(i, headers)))

        # run all tasks
        try:
            results = loop.run_until_complete(asyncio.gather(*tasks))
        finally:
            loop.close()

        # assert that only the first one gets fully processed, the rest not
        success = 1
        already_exist = -4
        expected_results = [
            success, already_exist, already_exist, already_exist, already_exist
        ]
        self.assertEqual(results, expected_results)
Example #8
0
    async def relay(self, inventory) -> bool:
        """
        Try to relay the inventory to the network

        Args:
            inventory: should be of type Block, Transaction or ConsensusPayload (see: InventoryType) 

        Returns: False if inventory is already in the mempool, or if relaying to nodes failed (e.g. because we have no nodes connected)

        """
        # TODO: this is based on the current/old neo-python Block, Transaction and ConsensusPlayload classes
        #  meaning attribute naming will change (no longer camelCase) once we move to python naming convention
        #  for now we need to convert them to our new types or calls will fail
        new_inventorytype = InventoryType(inventory.InventoryType)
        new_hash = UInt256(data=inventory.Hash.ToArray())
        inv = InventoryPayload(type=new_inventorytype, hashes=[new_hash])
        m = Message(command='inv', payload=inv)
        await self.send_message(m)

        return True
Example #9
0
 def __init__(self, start: UInt256, stop: UInt256 = None):
     self.hash_start = [start]
     self.hash_stop = stop if stop else UInt256.zero()