async def farm_new_block(self, request: FarmNewBlockProtocol): async with self.full_node.blockchain.lock: self.log.info("Farming new block!") current_blocks = await self.get_all_full_blocks() if len(current_blocks) == 0: genesis = self.bt.get_consecutive_blocks(uint8(1))[0] await self.full_node.blockchain.receive_block(genesis) peak = self.full_node.blockchain.get_peak() assert peak is not None curr: BlockRecord = peak while not curr.is_transaction_block: curr = self.full_node.blockchain.block_record(curr.prev_hash) mempool_bundle = await self.full_node.mempool_manager.create_bundle_from_mempool(curr.header_hash) if mempool_bundle is None: spend_bundle = None else: spend_bundle = mempool_bundle[0] current_blocks = await self.get_all_full_blocks() target = request.puzzle_hash more = self.bt.get_consecutive_blocks( 1, transaction_data=spend_bundle, farmer_reward_puzzle_hash=target, pool_reward_puzzle_hash=target, block_list_input=current_blocks, current_time=self.use_current_time, ) rr: RespondBlock = RespondBlock(more[-1]) await self.full_node.respond_block(rr)
async def test_farmer_signage_point_endpoints(environment): ( farmer_service, farmer_rpc_api, farmer_rpc_client, harvester_service, harvester_rpc_api, harvester_rpc_client, ) = environment farmer_api = farmer_service._api assert (await farmer_rpc_client.get_signage_point(std_hash(b"2"))) is None assert len(await farmer_rpc_client.get_signage_points()) == 0 async def have_signage_points(): return len(await farmer_rpc_client.get_signage_points()) > 0 sp = farmer_protocol.NewSignagePoint(std_hash(b"1"), std_hash(b"2"), std_hash(b"3"), uint64(1), uint64(1000000), uint8(2)) await farmer_api.new_signage_point(sp) await time_out_assert(5, have_signage_points, True) assert (await farmer_rpc_client.get_signage_point(std_hash(b"2"))) is not None
async def farm_new_transaction_block(self, request: FarmNewBlockProtocol): await self.lock.acquire() try: self.log.info("Farming new block!") current_blocks = await self.get_all_full_blocks() if len(current_blocks) == 0: genesis = self.bt.get_consecutive_blocks(uint8(1))[0] await self.full_node.blockchain.receive_block(genesis) peak = self.full_node.blockchain.get_peak() assert peak is not None mempool_bundle = await self.full_node.mempool_manager.create_bundle_from_mempool( peak.header_hash) if mempool_bundle is None: spend_bundle = None else: spend_bundle = mempool_bundle[0] current_blocks = await self.get_all_full_blocks() target = request.puzzle_hash more = self.bt.get_consecutive_blocks( 1, transaction_data=spend_bundle, farmer_reward_puzzle_hash=target, pool_reward_puzzle_hash=target, block_list_input=current_blocks, guarantee_transaction_block=True, current_time=self.use_current_time, ) rr = RespondBlock(more[-1]) await self.full_node.respond_block(rr) except Exception as e: self.log.error(f"Error while farming block: {e}") finally: self.lock.release()
def make_sub_epoch_summary( constants: ConsensusConstants, blocks: BlockchainInterface, blocks_included_height: uint32, prev_prev_block: BlockRecord, new_difficulty: Optional[uint64], new_sub_slot_iters: Optional[uint64], ) -> SubEpochSummary: """ Creates a sub-epoch-summary object, assuming that the first block in the new sub-epoch is at height "blocks_included_height". Prev_prev_b is the second to last block in the previous sub-epoch. On a new epoch, new_difficulty and new_sub_slot_iters are also added. Args: constants: consensus constants being used for this chain blocks: dictionary from header hash to SBR of all included SBR blocks_included_height: block height in which the SES will be included prev_prev_block: second to last block in epoch new_difficulty: difficulty in new epoch new_sub_slot_iters: sub slot iters in new epoch """ assert prev_prev_block.height == blocks_included_height - 2 # First sub_epoch # This is not technically because more blocks can potentially be included than 2*MAX_SUB_SLOT_BLOCKS, # But assuming less than 128 overflow blocks get infused in the first 2 slots, it's not an issue if (blocks_included_height + constants.MAX_SUB_SLOT_BLOCKS) // constants.SUB_EPOCH_BLOCKS <= 1: return SubEpochSummary( constants.GENESIS_CHALLENGE, constants.GENESIS_CHALLENGE, uint8(0), None, None, ) curr: BlockRecord = prev_prev_block while curr.sub_epoch_summary_included is None: curr = blocks.block_record(curr.prev_hash) assert curr is not None assert curr.finished_reward_slot_hashes is not None prev_ses = curr.sub_epoch_summary_included.get_hash() return SubEpochSummary( prev_ses, curr.finished_reward_slot_hashes[-1], uint8(curr.height % constants.SUB_EPOCH_BLOCKS), new_difficulty, new_sub_slot_iters, )
def test_roundtrip(self): def roundtrip(v): s = io.BytesIO() v.stream(s) s.seek(0) cls = type(v) v2 = cls.parse(s) assert v2 == v # int512 is special. it uses 65 bytes to allow positive and negative # "uint512" roundtrip( int512( 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF # noqa: E501 )) roundtrip( int512( -0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF # noqa: E501 )) roundtrip(uint128(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)) roundtrip(uint128(0)) roundtrip(uint64(0xFFFFFFFFFFFFFFFF)) roundtrip(uint64(0)) roundtrip(int64(0x7FFFFFFFFFFFFFFF)) roundtrip(int64(-0x8000000000000000)) roundtrip(uint32(0xFFFFFFFF)) roundtrip(uint32(0)) roundtrip(int32(0x7FFFFFFF)) roundtrip(int32(-0x80000000)) roundtrip(uint16(0xFFFF)) roundtrip(uint16(0)) roundtrip(int16(0x7FFF)) roundtrip(int16(-0x8000)) roundtrip(uint8(0xFF)) roundtrip(uint8(0)) roundtrip(int8(0x7F)) roundtrip(int8(-0x80))
async def check_node_alive(node: Node) -> Tuple[bool, Node]: ssl_context = ssl_context_for_server('keys/chia_ca.crt', 'keys/chia_ca.key', 'keys/public_full_node.crt', 'keys/public_full_node.key') try: # TODO: connect and timeout in one shot, don't do it in two connections await asyncio.wait_for(websockets.connect(node.get_websocket_url(), ssl=ssl_context), timeout=10) async with websockets.connect(node.get_websocket_url(), ssl=ssl_context) as websocket: handshake = make_msg( ProtocolMessageTypes.handshake, Handshake( 'mainnet', protocol_version, '0.0.0', uint16(8884), uint8(NodeType.INTRODUCER), [(uint16(Capability.BASE.value), '1')], )) encoded_handshake = bytes(handshake) await websocket.send(encoded_handshake) message = await websocket.recv() if message is None: logging.warning('Node ' + node.ip + ' did not return anything') return False, node full_message_loaded = Message.from_bytes(message) inbound_handshake = Handshake.from_bytes(full_message_loaded.data) await websocket.close() if inbound_handshake.network_id != 'mainnet': logging.warning('Node ' + node.ip + ' is not on main net but is on mainnet port!') return False, node logging.info('Node ' + node.ip + ' is up.') return True, node except websockets.exceptions.ConnectionClosed as e: logging.warning('Node closed the connection') return False, node except asyncio.exceptions.TimeoutError as e: logging.warning('Node timeout : ' + node.ip) return False, node except websockets.exceptions.InvalidMessage as e: return False, node except OSError as e: return False, node except Exception as e: logging.error(e) traceback.print_exc() return False, node
def calculate_deficit( constants: ConsensusConstants, height: uint32, prev_b: Optional[BlockRecord], overflow: bool, num_finished_sub_slots: int, ) -> uint8: """ Returns the deficit of the block to be created at height. Args: constants: consensus constants being used for this chain height: block height of the block that we care about prev_b: previous block overflow: whether or not this is an overflow block num_finished_sub_slots: the number of finished slots between infusion points of prev and current """ if height == 0: return uint8(constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1) else: assert prev_b is not None prev_deficit: uint8 = prev_b.deficit if prev_deficit == constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK: # Prev sb must be an overflow sb. However maybe it's in a different sub-slot if overflow: if num_finished_sub_slots > 0: # We are an overflow block, but in a new sub-slot, so we can decrease the deficit return uint8(prev_deficit - 1) # Still overflowed, so we cannot decrease the deficit return uint8(prev_deficit) else: # We are no longer overflow, can decrease return uint8(prev_deficit - 1) elif prev_deficit == 0: if num_finished_sub_slots == 0: return uint8(0) elif num_finished_sub_slots == 1: if overflow: return uint8(constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK) else: return uint8(constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1) else: # More than one finished sub slot, we can decrease deficit return uint8(constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1) else: return uint8(prev_deficit - 1)
async def increment_sent( self, tx_id: bytes32, name: str, send_status: MempoolInclusionStatus, err: Optional[Err], ) -> bool: """ Updates transaction sent count (Full Node has received spend_bundle and sent ack). """ current: Optional[ TransactionRecord] = await self.get_transaction_record(tx_id) if current is None: return False sent_to = current.sent_to.copy() current_peers = set() err_str = err.name if err is not None else None append_data = (name, uint8(send_status.value), err_str) for peer_id, status, error in sent_to: current_peers.add(peer_id) if name in current_peers: sent_count = uint32(current.sent) else: sent_count = uint32(current.sent + 1) sent_to.append(append_data) tx: TransactionRecord = TransactionRecord( confirmed_at_height=current.confirmed_at_height, created_at_time=current.created_at_time, to_puzzle_hash=current.to_puzzle_hash, amount=current.amount, fee_amount=current.fee_amount, confirmed=current.confirmed, sent=sent_count, spend_bundle=current.spend_bundle, additions=current.additions, removals=current.removals, wallet_id=current.wallet_id, sent_to=sent_to, trade_id=current.trade_id, type=current.type, name=current.name, memos=current.memos, ) await self.add_transaction_record(tx, False) return True
def create_pool_state( state: PoolSingletonState, target_puzzle_hash: bytes32, owner_pubkey: G1Element, pool_url: Optional[str], relative_lock_height: uint32, ) -> PoolState: if state not in set(s.value for s in PoolSingletonState): raise AssertionError( "state {state} is not a valid PoolSingletonState,") ps = PoolState(POOL_PROTOCOL_VERSION, uint8(state), target_puzzle_hash, owner_pubkey, pool_url, relative_lock_height) # TODO Move verify here return ps
def test_StrictDataClassLists(self): @dataclass(frozen=True) @strictdataclass class TestClass: a: List[int] b: List[List[uint8]] assert TestClass([1, 2, 3], [[uint8(200), uint8(25)], [uint8(25)]]) try: TestClass([1, 2, 3], [[uint8(200), uint8(25)], [uint8(25)]]) assert False except AssertionError: pass try: TestClass([1, 2, 3], [uint8(200), uint8(25)]) # type: ignore assert False except ValueError: pass
async def invoke(*args, **kwargs): timeout = 60 if "timeout" in kwargs: timeout = kwargs["timeout"] attribute = getattr(class_for_type(self.connection_type), attr_name, None) if attribute is None: raise AttributeError( f"Node type {self.connection_type} does not have method {attr_name}" ) msg: Message = Message( uint8(getattr(ProtocolMessageTypes, attr_name).value), None, args[0]) request_start_t = time.time() result = await self.send_request(msg, timeout) self.log.debug( f"Time for request {attr_name}: {self.get_peer_logging()} = {time.time() - request_start_t}, " f"None? {result is None}") if result is not None: sent_message_type = ProtocolMessageTypes(msg.type) recv_message_type = ProtocolMessageTypes(result.type) if not message_response_ok(sent_message_type, recv_message_type): # peer protocol violation error_message = f"WSConnection.invoke sent message {sent_message_type.name} " f"but received {recv_message_type.name}" await self.ban_peer_bad_protocol(self.error_message) raise ProtocolError(Err.INVALID_PROTOCOL_MESSAGE, [error_message]) ret_attr = getattr(class_for_type(self.local_type), ProtocolMessageTypes(result.type).name, None) req_annotations = ret_attr.__annotations__ req = None for key in req_annotations: if key == "return" or key == "peer": continue else: req = req_annotations[key] assert req is not None result = req.from_bytes(result.data) return result
def get_vdf_info_and_proof( constants: ConsensusConstants, vdf_input: ClassgroupElement, challenge_hash: bytes32, number_iters: uint64, normalized_to_identity: bool = False, ) -> Tuple[VDFInfo, VDFProof]: form_size = ClassgroupElement.get_size(constants) result: bytes = prove( bytes(challenge_hash), vdf_input.data, constants.DISCRIMINANT_SIZE_BITS, number_iters, ) output = ClassgroupElement.from_bytes(result[:form_size]) proof_bytes = result[form_size:2 * form_size] return VDFInfo(challenge_hash, number_iters, output), VDFProof(uint8(0), proof_bytes, normalized_to_identity)