async def test_too_many_messages(self): # Too many messages r = RateLimiter() new_tx_message = make_msg(ProtocolMessageTypes.new_transaction, bytes([1] * 40)) for i in range(3000): assert r.process_msg_and_check(new_tx_message) saw_disconnect = False for i in range(3000): response = r.process_msg_and_check(new_tx_message) if not response: saw_disconnect = True assert saw_disconnect # Non-tx message r = RateLimiter() new_peak_message = make_msg(ProtocolMessageTypes.new_peak, bytes([1] * 40)) for i in range(20): assert r.process_msg_and_check(new_peak_message) saw_disconnect = False for i in range(200): response = r.process_msg_and_check(new_peak_message) if not response: saw_disconnect = True assert saw_disconnect
async def test_too_much_data(self): # Too much data r = RateLimiter() tx_message = make_msg(ProtocolMessageTypes.respond_transaction, bytes([1] * 500 * 1024)) for i in range(10): assert r.process_msg_and_check(tx_message) saw_disconnect = False for i in range(300): response = r.process_msg_and_check(tx_message) if not response: saw_disconnect = True assert saw_disconnect r = RateLimiter() block_message = make_msg(ProtocolMessageTypes.respond_block, bytes([1] * 1024 * 1024)) for i in range(10): assert r.process_msg_and_check(block_message) saw_disconnect = False for i in range(40): response = r.process_msg_and_check(block_message) if not response: saw_disconnect = True assert saw_disconnect
async def test_periodic_reset(self): r = RateLimiter(5) tx_message = make_msg(ProtocolMessageTypes.respond_transaction, bytes([1] * 500 * 1024)) for i in range(10): assert r.process_msg_and_check(tx_message) saw_disconnect = False for i in range(300): response = r.process_msg_and_check(tx_message) if not response: saw_disconnect = True assert saw_disconnect assert not r.process_msg_and_check(tx_message) await asyncio.sleep(6) assert r.process_msg_and_check(tx_message) # Counts reset also r = RateLimiter(5) new_tx_message = make_msg(ProtocolMessageTypes.new_transaction, bytes([1] * 40)) for i in range(3000): assert r.process_msg_and_check(new_tx_message) saw_disconnect = False for i in range(3000): response = r.process_msg_and_check(new_tx_message) if not response: saw_disconnect = True assert saw_disconnect assert not r.process_msg_and_check(new_tx_message) await asyncio.sleep(6) assert r.process_msg_and_check(new_tx_message)
async def perform_handshake( self, network_id: bytes32, protocol_version: str, server_port: int, local_type: NodeType ): if self.is_outbound: outbound_handshake = make_msg( ProtocolMessageTypes.handshake, Handshake( network_id, protocol_version, chia_full_version_str(), uint16(server_port), uint8(local_type.value), ), ) payload: Optional[Payload] = Payload(outbound_handshake, None) assert payload is not None await self._send_message(payload) payload = await self._read_one_message() if payload is None: raise ProtocolError(Err.INVALID_HANDSHAKE) inbound_handshake = Handshake.from_bytes(payload.msg.data) if ProtocolMessageTypes(payload.msg.type) != ProtocolMessageTypes.handshake: raise ProtocolError(Err.INVALID_HANDSHAKE) if inbound_handshake.protocol_version != protocol_version: raise ProtocolError(Err.INCOMPATIBLE_PROTOCOL_VERSION) self.peer_server_port = inbound_handshake.server_port self.connection_type = NodeType(inbound_handshake.node_type) else: try: payload = await self._read_one_message() except Exception: raise ProtocolError(Err.INVALID_HANDSHAKE) if payload is None: raise ProtocolError(Err.INVALID_HANDSHAKE) inbound_handshake = Handshake.from_bytes(payload.msg.data) if ProtocolMessageTypes(payload.msg.type) != ProtocolMessageTypes.handshake: raise ProtocolError(Err.INVALID_HANDSHAKE) if inbound_handshake.protocol_version != protocol_version: raise ProtocolError(Err.INCOMPATIBLE_PROTOCOL_VERSION) outbound_handshake = make_msg( ProtocolMessageTypes.handshake, Handshake( network_id, protocol_version, chia_full_version_str(), uint16(server_port), uint8(local_type.value), ), ) payload = Payload(outbound_handshake, None) await self._send_message(payload) self.peer_server_port = inbound_handshake.server_port self.connection_type = NodeType(inbound_handshake.node_type) self.outbound_task = asyncio.create_task(self.outbound_handler()) self.inbound_task = asyncio.create_task(self.inbound_handler()) return True
async def request_peers_introducer( self, request: RequestPeersIntroducer, peer: WSChiaConnection, ) -> Optional[Message]: max_peers = self.introducer.max_peers_to_send if self.introducer.server is None or self.introducer.server.introducer_peers is None: return None rawpeers = self.introducer.server.introducer_peers.get_peers( max_peers * 5, True, self.introducer.recent_peer_threshold) peers = [] for r_peer in rawpeers: if r_peer.get_hash() not in self.introducer.vetted: continue if self.introducer.vetted[r_peer.get_hash()]: if r_peer.host == peer.peer_host and r_peer.port == peer.peer_server_port: continue peer_without_timestamp = TimestampedPeerInfo( r_peer.host, r_peer.port, uint64(0), ) peers.append(peer_without_timestamp) if len(peers) >= max_peers: break self.introducer.log.info(f"Sending vetted {peers}") msg = make_msg(ProtocolMessageTypes.respond_peers_introducer, RespondPeersIntroducer(peers)) return msg
async def request_signatures(self, request: harvester_protocol.RequestSignatures): """ The farmer requests a signature on the header hash, for one of the proofs that we found. A signature is created on the header hash using the harvester private key. This can also be used for pooling. """ plot_filename = Path(request.plot_identifier[64:]).resolve() try: plot_info = self.harvester.provers[plot_filename] except KeyError: self.harvester.log.warning(f"KeyError plot {plot_filename} does not exist.") return local_sk = plot_info.local_sk agg_pk = ProofOfSpace.generate_plot_public_key(local_sk.get_g1(), plot_info.farmer_public_key) # This is only a partial signature. When combined with the farmer's half, it will # form a complete PrependSignature. message_signatures: List[Tuple[bytes32, G2Element]] = [] for message in request.messages: signature: G2Element = AugSchemeMPL.sign(local_sk, message, agg_pk) message_signatures.append((message, signature)) response: harvester_protocol.RespondSignatures = harvester_protocol.RespondSignatures( request.plot_identifier, request.challenge_hash, request.sp_hash, local_sk.get_g1(), plot_info.farmer_public_key, message_signatures, ) return make_msg(ProtocolMessageTypes.respond_signatures, response)
async def on_connect(self, peer: ws.WSChiaConnection): if ( peer.is_outbound is False and peer.peer_server_port is not None and peer.connection_type is NodeType.FULL_NODE and self.server._local_type is NodeType.FULL_NODE and self.address_manager is not None ): timestamped_peer_info = TimestampedPeerInfo( peer.peer_host, peer.peer_server_port, uint64(int(time.time())), ) await self.address_manager.add_to_new_table([timestamped_peer_info], peer.get_peer_info(), 0) if self.relay_queue is not None: self.relay_queue.put_nowait((timestamped_peer_info, 1)) if ( peer.is_outbound and peer.peer_server_port is not None and peer.connection_type is NodeType.FULL_NODE and self.server._local_type is NodeType.FULL_NODE and self.address_manager is not None ): msg = make_msg(ProtocolMessageTypes.request_peers, full_node_protocol.RequestPeers()) await peer.send_message(msg)
async def _periodically_self_advertise(self): while not self.is_closed: try: try: await asyncio.sleep(24 * 3600) except asyncio.CancelledError: return # Clean up known nodes for neighbours every 24 hours. async with self.lock: for neighbour in list(self.neighbour_known_peers.keys()): self.neighbour_known_peers[neighbour].clear() # Self advertise every 24 hours. peer = await self.server.get_peer_info() if peer is None: continue timestamped_peer = [ TimestampedPeerInfo( peer.host, peer.port, uint64(int(time.time())), ) ] msg = make_msg( ProtocolMessageTypes.respond_peers, full_node_protocol.RespondPeers(timestamped_peer), ) await self.server.send_to_all([msg], NodeType.FULL_NODE) except Exception as e: self.log.error(f"Exception in self advertise: {e}") self.log.error(f"Traceback: {traceback.format_exc()}")
async def farm_block(self, request): raw_puzzle_hash = decode_puzzle_hash(request["address"]) request = FarmNewBlockProtocol(raw_puzzle_hash) msg = make_msg(ProtocolMessageTypes.farm_new_block, request) await self.service.server.send_to_all([msg], NodeType.FULL_NODE) return {}
async def on_connect(self, peer: WSChiaConnection): # Sends a handshake to the harvester handshake = harvester_protocol.HarvesterHandshake( self.get_public_keys(), self.pool_public_keys, ) if peer.connection_type is NodeType.HARVESTER: msg = make_msg(ProtocolMessageTypes.harvester_handshake, handshake) await peer.send_message(msg)
async def test_spam_tx(self, setup_two_nodes): nodes, _ = setup_two_nodes full_node_1, full_node_2 = nodes server_1 = nodes[0].full_node.server server_2 = nodes[1].full_node.server await server_2.start_client( PeerInfo(self_hostname, uint16(server_1._port)), full_node_2.full_node.on_connect) assert len(server_1.all_connections) == 1 ws_con: WSChiaConnection = list(server_1.all_connections.values())[0] ws_con_2: WSChiaConnection = list(server_2.all_connections.values())[0] ws_con.peer_host = "1.2.3.4" ws_con_2.peer_host = "1.2.3.4" new_tx_message = make_msg( ProtocolMessageTypes.new_transaction, full_node_protocol.NewTransaction(bytes([9] * 32), uint64(0), uint64(0)), ) for i in range(4000): await ws_con._send_message(new_tx_message) await asyncio.sleep(1) assert not ws_con.closed # Tests outbound rate limiting, we will not send too much data for i in range(2000): await ws_con._send_message(new_tx_message) await asyncio.sleep(1) assert not ws_con.closed # Remove outbound rate limiter to test inbound limits ws_con.outbound_rate_limiter = RateLimiter(percentage_of_limit=10000) for i in range(6000): await ws_con._send_message(new_tx_message) await asyncio.sleep(1) def is_closed(): return ws_con.closed await time_out_assert(15, is_closed) assert ws_con.closed def is_banned(): return "1.2.3.4" in server_2.banned_peers await time_out_assert(15, is_banned)
async def test_large_message(self): # Large tx small_tx_message = make_msg(ProtocolMessageTypes.respond_transaction, bytes([1] * 500 * 1024)) large_tx_message = make_msg(ProtocolMessageTypes.new_transaction, bytes([1] * 3 * 1024 * 1024)) r = RateLimiter() assert r.process_msg_and_check(small_tx_message) assert r.process_msg_and_check(small_tx_message) assert not r.process_msg_and_check(large_tx_message) small_vdf_message = make_msg( ProtocolMessageTypes.respond_signage_point, bytes([1] * 5 * 1024)) large_vdf_message = make_msg( ProtocolMessageTypes.respond_signage_point, bytes([1] * 600 * 1024)) r = RateLimiter() assert r.process_msg_and_check(small_vdf_message) assert r.process_msg_and_check(small_vdf_message) assert not r.process_msg_and_check(large_vdf_message)
async def _address_relay(self): while not self.is_closed: try: try: relay_peer, num_peers = await self.relay_queue.get() except asyncio.CancelledError: return relay_peer_info = PeerInfo(relay_peer.host, relay_peer.port) if not relay_peer_info.is_valid(): continue # https://en.bitcoin.it/wiki/Satoshi_Client_Node_Discovery#Address_Relay connections = self.server.get_full_node_connections() hashes = [] cur_day = int(time.time()) // (24 * 60 * 60) for connection in connections: peer_info = connection.get_peer_info() if peer_info is None: continue cur_hash = int.from_bytes( bytes( std_hash( self.key.to_bytes(32, byteorder="big") + peer_info.get_key() + cur_day.to_bytes(3, byteorder="big") ) ), byteorder="big", ) hashes.append((cur_hash, connection)) hashes.sort(key=lambda x: x[0]) for index, (_, connection) in enumerate(hashes): if index >= num_peers: break peer_info = connection.get_peer_info() pair = (peer_info.host, peer_info.port) async with self.lock: if pair in self.neighbour_known_peers and relay_peer.host in self.neighbour_known_peers[pair]: continue if pair not in self.neighbour_known_peers: self.neighbour_known_peers[pair] = set() self.neighbour_known_peers[pair].add(relay_peer.host) if connection.peer_node_id is None: continue msg = make_msg( ProtocolMessageTypes.respond_peers, full_node_protocol.RespondPeers([relay_peer]), ) await connection.send_message(msg) except Exception as e: self.log.error(f"Exception in address relay: {e}") self.log.error(f"Traceback: {traceback.format_exc()}")
async def test_non_tx_aggregate_limits(self): # Frequency limits r = RateLimiter() message_1 = make_msg(ProtocolMessageTypes.request_additions, bytes([1] * 5 * 1024)) message_2 = make_msg(ProtocolMessageTypes.request_removals, bytes([1] * 1024)) message_3 = make_msg(ProtocolMessageTypes.respond_additions, bytes([1] * 1024)) for i in range(450): assert r.process_msg_and_check(message_1) for i in range(450): assert r.process_msg_and_check(message_2) saw_disconnect = False for i in range(450): response = r.process_msg_and_check(message_3) if not response: saw_disconnect = True assert saw_disconnect # Size limits r = RateLimiter() message_4 = make_msg(ProtocolMessageTypes.respond_proof_of_weight, bytes([1] * 49 * 1024 * 1024)) message_5 = make_msg(ProtocolMessageTypes.respond_blocks, bytes([1] * 49 * 1024 * 1024)) for i in range(2): assert r.process_msg_and_check(message_4) saw_disconnect = False for i in range(2): response = r.process_msg_and_check(message_5) if not response: saw_disconnect = True assert saw_disconnect
async def new_signage_point(self, new_signage_point: farmer_protocol.NewSignagePoint): message = harvester_protocol.NewSignagePointHarvester( new_signage_point.challenge_hash, new_signage_point.difficulty, new_signage_point.sub_slot_iters, new_signage_point.signage_point_index, new_signage_point.challenge_chain_sp, ) msg = make_msg(ProtocolMessageTypes.new_signage_point_harvester, message) await self.farmer.server.send_to_all([msg], NodeType.HARVESTER) if new_signage_point.challenge_chain_sp not in self.farmer.sps: self.farmer.sps[new_signage_point.challenge_chain_sp] = [] self.farmer.sps[new_signage_point.challenge_chain_sp].append(new_signage_point) self.farmer.cache_add_time[new_signage_point.challenge_chain_sp] = uint64(int(time.time())) self.farmer.state_changed("new_signage_point", {"sp_hash": new_signage_point.challenge_chain_sp})
async def request_signed_values(self, full_node_request: farmer_protocol.RequestSignedValues): if full_node_request.quality_string not in self.farmer.quality_str_to_identifiers: self.farmer.log.error(f"Do not have quality string {full_node_request.quality_string}") return (plot_identifier, challenge_hash, sp_hash, node_id) = self.farmer.quality_str_to_identifiers[ full_node_request.quality_string ] request = harvester_protocol.RequestSignatures( plot_identifier, challenge_hash, sp_hash, [full_node_request.foliage_block_data_hash, full_node_request.foliage_transaction_block_hash], ) msg = make_msg(ProtocolMessageTypes.request_signatures, request) await self.farmer.server.send_to_specific([msg], node_id)
async def _action_messages(self) -> List[Message]: if self.wallet_state_manager is None or self.backup_initialized is False: return [] actions: List[WalletAction] = await self.wallet_state_manager.action_store.get_all_pending_actions() result: List[Message] = [] for action in actions: data = json.loads(action.data) action_data = data["data"]["action_data"] if action.name == "request_puzzle_solution": coin_name = bytes32(hexstr_to_bytes(action_data["coin_name"])) height = uint32(action_data["height"]) msg = make_msg( ProtocolMessageTypes.request_puzzle_solution, wallet_protocol.RequestPuzzleSolution(coin_name, height), ) result.append(msg) return result
async def test_spam_message_too_large(self, setup_two_nodes): nodes, _ = setup_two_nodes full_node_1, full_node_2 = nodes server_1 = nodes[0].full_node.server server_2 = nodes[1].full_node.server await server_2.start_client( PeerInfo(self_hostname, uint16(server_1._port)), full_node_2.full_node.on_connect) assert len(server_1.all_connections) == 1 ws_con: WSChiaConnection = list(server_1.all_connections.values())[0] ws_con_2: WSChiaConnection = list(server_2.all_connections.values())[0] ws_con.peer_host = "1.2.3.4" ws_con_2.peer_host = "1.2.3.4" def is_closed(): return ws_con.closed new_message = make_msg( ProtocolMessageTypes.request_mempool_transactions, full_node_protocol.RequestMempoolTransactions( bytes([0] * 5 * 1024 * 1024)), ) # Tests outbound rate limiting, we will not send big messages await ws_con._send_message(new_message) await asyncio.sleep(1) assert not ws_con.closed # Remove outbound rate limiter to test inbound limits ws_con.outbound_rate_limiter = FakeRateLimiter() await ws_con._send_message(new_message) await time_out_assert(15, is_closed) # Banned def is_banned(): return "1.2.3.4" in server_2.banned_peers await time_out_assert(15, is_banned)
async def _messages_to_resend(self) -> List[Tuple[Message, Set[bytes32]]]: if self.wallet_state_manager is None or self.backup_initialized is False or self._shut_down: return [] messages: List[Tuple[Message, Set[bytes32]]] = [] records: List[TransactionRecord] = await self.wallet_state_manager.tx_store.get_not_sent() for record in records: if record.spend_bundle is None: continue msg = make_msg( ProtocolMessageTypes.send_transaction, wallet_protocol.SendTransaction(record.spend_bundle), ) already_sent = set() for peer, status, _ in record.sent_to: already_sent.add(hexstr_to_bytes(peer)) messages.append((msg, already_sent)) return messages
async def request_peers(self, peer_info: PeerInfo): try: # Prevent a fingerprint attack: do not send peers to inbound connections. # This asymmetric behavior for inbound and outbound connections was introduced # to prevent a fingerprinting attack: an attacker can send specific fake addresses # to users' AddrMan and later request them by sending getaddr messages. # Making nodes which are behind NAT and can only make outgoing connections ignore # the request_peers message mitigates the attack. if self.address_manager is None: return None peers = await self.address_manager.get_peers() await self.add_peers_neighbour(peers, peer_info) msg = make_msg( ProtocolMessageTypes.respond_peers, full_node_protocol.RespondPeers(peers), ) return msg except Exception as e: self.log.error(f"Request peers exception: {e}")
async def _check_for_end_of_subslot(self): left_subslot_iters = [ iteration for iteration, t in self.iteration_to_proof_type.items() if t == IterationType.END_OF_SUBSLOT ] if len(left_subslot_iters) == 0: return chains_finished = [ (chain, info, proof) for chain, info, proof in self.proofs_finished if info.number_of_iterations == left_subslot_iters[0] ] if self.last_state.get_challenge(Chain.INFUSED_CHALLENGE_CHAIN) is not None: chain_count = 3 else: chain_count = 2 if len(chains_finished) == chain_count: icc_ip_vdf: Optional[VDFInfo] = None icc_ip_proof: Optional[VDFProof] = None cc_vdf: Optional[VDFInfo] = None cc_proof: Optional[VDFProof] = None rc_vdf: Optional[VDFInfo] = None rc_proof: Optional[VDFProof] = None for chain, info, proof in chains_finished: if chain == Chain.CHALLENGE_CHAIN: cc_vdf = info cc_proof = proof if chain == Chain.REWARD_CHAIN: rc_vdf = info rc_proof = proof if chain == Chain.INFUSED_CHALLENGE_CHAIN: icc_ip_vdf = info icc_ip_proof = proof assert cc_proof is not None and rc_proof is not None and cc_vdf is not None and rc_vdf is not None if rc_vdf.challenge != self.last_state.get_challenge(Chain.REWARD_CHAIN): log.warning( f"Do not have correct challenge {self.last_state.get_challenge(Chain.REWARD_CHAIN).hex()} has" f" {rc_vdf.challenge}" ) # This proof is on an outdated challenge, so don't use it return log.info("Collected end of subslot vdfs.") iters_from_sub_slot_start = cc_vdf.number_of_iterations + self.last_state.get_last_ip() cc_vdf = dataclasses.replace(cc_vdf, number_of_iterations=iters_from_sub_slot_start) if icc_ip_vdf is not None: if self.last_state.peak is not None: total_iters = ( self.last_state.get_total_iters() - self.last_state.get_last_ip() + self.last_state.get_sub_slot_iters() ) else: total_iters = self.last_state.get_total_iters() + self.last_state.get_sub_slot_iters() iters_from_cb = uint64(total_iters - self.last_state.last_challenge_sb_or_eos_total_iters) if iters_from_cb > self.last_state.sub_slot_iters: log.error(f"{self.last_state.peak}") log.error(f"{self.last_state.subslot_end}") assert False assert iters_from_cb <= self.last_state.sub_slot_iters icc_ip_vdf = dataclasses.replace(icc_ip_vdf, number_of_iterations=iters_from_cb) icc_sub_slot: Optional[InfusedChallengeChainSubSlot] = ( None if icc_ip_vdf is None else InfusedChallengeChainSubSlot(icc_ip_vdf) ) icc_sub_slot_hash = icc_sub_slot.get_hash() if self.last_state.get_deficit() == 0 else None next_ses: Optional[SubEpochSummary] = self.last_state.get_next_sub_epoch_summary() if next_ses is not None: log.info(f"Including sub epoch summary{next_ses}") ses_hash = next_ses.get_hash() new_sub_slot_iters = next_ses.new_sub_slot_iters new_difficulty = next_ses.new_difficulty else: ses_hash = None new_sub_slot_iters = None new_difficulty = None cc_sub_slot = ChallengeChainSubSlot(cc_vdf, icc_sub_slot_hash, ses_hash, new_sub_slot_iters, new_difficulty) eos_deficit: uint8 = ( self.last_state.get_deficit() if self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK > self.last_state.get_deficit() > 0 else self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK ) rc_sub_slot = RewardChainSubSlot( rc_vdf, cc_sub_slot.get_hash(), icc_sub_slot.get_hash() if icc_sub_slot is not None else None, eos_deficit, ) eos_bundle = EndOfSubSlotBundle( cc_sub_slot, icc_sub_slot, rc_sub_slot, SubSlotProofs(cc_proof, icc_ip_proof, rc_proof), ) if self.server is not None: msg = make_msg( ProtocolMessageTypes.new_end_of_sub_slot_vdf, timelord_protocol.NewEndOfSubSlotVDF(eos_bundle), ) await self.server.send_to_all([msg], NodeType.FULL_NODE) log.info( f"Built end of subslot bundle. cc hash: {eos_bundle.challenge_chain.get_hash()}. New_difficulty: " f"{eos_bundle.challenge_chain.new_difficulty} New ssi: {eos_bundle.challenge_chain.new_sub_slot_iters}" ) if next_ses is None or next_ses.new_difficulty is None: self.total_unfinished += len(self.overflow_blocks) self.unfinished_blocks = self.overflow_blocks else: # No overflow blocks in a new epoch self.unfinished_blocks = [] self.overflow_blocks = [] self.new_subslot_end = eos_bundle
async def _check_for_new_ip(self): infusion_iters = [ iteration for iteration, t in self.iteration_to_proof_type.items() if t == IterationType.INFUSION_POINT ] for iteration in infusion_iters: proofs_with_iter = [ (chain, info, proof) for chain, info, proof in self.proofs_finished if info.number_of_iterations == iteration ] if self.last_state.get_challenge(Chain.INFUSED_CHALLENGE_CHAIN) is not None: chain_count = 3 else: chain_count = 2 if len(proofs_with_iter) == chain_count: block = None ip_iters = None for unfinished_block in self.unfinished_blocks: try: _, ip_iters = iters_from_block( self.constants, unfinished_block.reward_chain_block, self.last_state.get_sub_slot_iters(), self.last_state.get_difficulty(), ) except Exception: continue if ip_iters - self.last_state.get_last_ip() == iteration: block = unfinished_block break if block is not None: ip_total_iters = self.last_state.get_total_iters() + iteration challenge = block.reward_chain_block.get_hash() icc_info: Optional[VDFInfo] = None icc_proof: Optional[VDFProof] = None cc_info: Optional[VDFInfo] = None cc_proof: Optional[VDFProof] = None rc_info: Optional[VDFInfo] = None rc_proof: Optional[VDFProof] = None for chain, info, proof in proofs_with_iter: if chain == Chain.CHALLENGE_CHAIN: cc_info = info cc_proof = proof if chain == Chain.REWARD_CHAIN: rc_info = info rc_proof = proof if chain == Chain.INFUSED_CHALLENGE_CHAIN: icc_info = info icc_proof = proof if cc_info is None or cc_proof is None or rc_info is None or rc_proof is None: log.error(f"Insufficient VDF proofs for infusion point ch: {challenge} iterations:{iteration}") return if rc_info.challenge != self.last_state.get_challenge(Chain.REWARD_CHAIN): log.warning( f"Do not have correct challenge {self.last_state.get_challenge(Chain.REWARD_CHAIN).hex()} " f"has {rc_info.challenge}, partial hash {block.reward_chain_block.get_hash()}" ) # This proof is on an outdated challenge, so don't use it continue self.unfinished_blocks.remove(block) self.total_infused += 1 log.info(f"Generated infusion point for challenge: {challenge} iterations: {iteration}.") if not self.last_state.can_infuse_block(): log.warning("Too many blocks, cannot infuse, discarding") # Too many blocks return overflow = is_overflow_block(self.constants, block.reward_chain_block.signage_point_index) cc_info = dataclasses.replace(cc_info, number_of_iterations=ip_iters) response = timelord_protocol.NewInfusionPointVDF( challenge, cc_info, cc_proof, rc_info, rc_proof, icc_info, icc_proof, ) msg = make_msg(ProtocolMessageTypes.new_infusion_point_vdf, response) if self.server is not None: await self.server.send_to_all([msg], NodeType.FULL_NODE) self.proofs_finished = self._clear_proof_list(iteration) if ( self.last_state.get_last_block_total_iters() is None and not self.last_state.state_type == StateType.FIRST_SUB_SLOT ): # We don't know when the last block was, so we can't make peaks return sp_total_iters = ( ip_total_iters - ip_iters + calculate_sp_iters( self.constants, block.sub_slot_iters, block.reward_chain_block.signage_point_index, ) - (block.sub_slot_iters if overflow else 0) ) if self.last_state.state_type == StateType.FIRST_SUB_SLOT: is_transaction_block = True height: uint32 = uint32(0) else: is_transaction_block = self.last_state.get_last_block_total_iters() < sp_total_iters height: uint32 = self.last_state.get_height() + 1 if height < 5: # Don't directly update our state for the first few blocks, because we cannot validate # whether the pre-farm is correct return new_reward_chain_block = RewardChainBlock( self.last_state.get_weight() + block.difficulty, height, ip_total_iters, block.reward_chain_block.signage_point_index, block.reward_chain_block.pos_ss_cc_challenge_hash, block.reward_chain_block.proof_of_space, block.reward_chain_block.challenge_chain_sp_vdf, block.reward_chain_block.challenge_chain_sp_signature, cc_info, block.reward_chain_block.reward_chain_sp_vdf, block.reward_chain_block.reward_chain_sp_signature, rc_info, icc_info, is_transaction_block, ) if self.last_state.state_type == StateType.FIRST_SUB_SLOT: # Genesis new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1 elif overflow and self.last_state.deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK: if self.last_state.peak is not None: assert self.last_state.subslot_end is None # This means the previous block is also an overflow block, and did not manage # to lower the deficit, therefore we cannot lower it either. (new slot) new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK else: # This means we are the first infusion in this sub-slot. This may be a new slot or not. assert self.last_state.subslot_end is not None if self.last_state.subslot_end.infused_challenge_chain is None: # There is no ICC, which means we are not finishing a slot. We can reduce the deficit. new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1 else: # There is an ICC, which means we are finishing a slot. Different slot, so can't change # the deficit new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK else: new_deficit = max(self.last_state.deficit - 1, 0) if new_deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1: last_csb_or_eos = ip_total_iters else: last_csb_or_eos = self.last_state.last_challenge_sb_or_eos_total_iters if self.last_state.get_infused_sub_epoch_summary() is not None: new_sub_epoch_summary = None else: new_sub_epoch_summary = block.sub_epoch_summary self.new_peak = timelord_protocol.NewPeakTimelord( new_reward_chain_block, block.difficulty, new_deficit, block.sub_slot_iters, new_sub_epoch_summary, self.last_state.reward_challenge_cache, last_csb_or_eos, ) if self.total_unfinished > 0: infusion_rate = int(self.total_infused / self.total_unfinished * 100) log.info( f"Total unfinished blocks: {self.total_unfinished}." f"Total infused blocks: {self.total_infused}." f"Infusion rate: {infusion_rate}." ) await self._handle_new_peak() # Break so we alternate between checking SP and IP break
async def _check_for_new_sp(self): signage_iters = [ iteration for iteration, t in self.iteration_to_proof_type.items() if t == IterationType.SIGNAGE_POINT ] if len(signage_iters) == 0: return to_remove = [] for potential_sp_iters, signage_point_index in self.signage_point_iters: if potential_sp_iters not in signage_iters: continue signage_iter = potential_sp_iters proofs_with_iter = [ (chain, info, proof) for chain, info, proof in self.proofs_finished if info.number_of_iterations == signage_iter ] # Wait for both cc and rc to have the signage point. if len(proofs_with_iter) == 2: cc_info: Optional[VDFInfo] = None cc_proof: Optional[VDFProof] = None rc_info: Optional[VDFInfo] = None rc_proof: Optional[VDFProof] = None for chain, info, proof in proofs_with_iter: if chain == Chain.CHALLENGE_CHAIN: cc_info = info cc_proof = proof if chain == Chain.REWARD_CHAIN: rc_info = info rc_proof = proof if cc_info is None or cc_proof is None or rc_info is None or rc_proof is None: log.error(f"Insufficient signage point data {signage_iter}") continue if rc_info.challenge != self.last_state.get_challenge(Chain.REWARD_CHAIN): log.warning( f"SP: Do not have correct challenge {self.last_state.get_challenge(Chain.REWARD_CHAIN).hex()}" f" has {rc_info.challenge}" ) # This proof is on an outdated challenge, so don't use it continue iters_from_sub_slot_start = cc_info.number_of_iterations + self.last_state.get_last_ip() response = timelord_protocol.NewSignagePointVDF( signage_point_index, dataclasses.replace(cc_info, number_of_iterations=iters_from_sub_slot_start), cc_proof, rc_info, rc_proof, ) if self.server is not None: msg = make_msg(ProtocolMessageTypes.new_signage_point_vdf, response) await self.server.send_to_all([msg], NodeType.FULL_NODE) # Cleanup the signage point from memory. to_remove.append((signage_iter, signage_point_index)) self.proofs_finished = self._clear_proof_list(signage_iter) # Send the next 3 signage point to the chains. next_iters_count = 0 for next_sp, k in self.signage_point_iters: for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN]: if next_sp not in self.iters_submitted[chain] and next_sp not in self.iters_to_submit[chain]: self.iters_to_submit[chain].append(next_sp) self.iteration_to_proof_type[next_sp] = IterationType.SIGNAGE_POINT next_iters_count += 1 if next_iters_count == 3: break # Break so we alternate between checking SP and IP break for r in to_remove: self.signage_point_iters.remove(r)
async def respond_signatures(self, response: harvester_protocol.RespondSignatures): """ There are two cases: receiving signatures for sps, or receiving signatures for the block. """ if response.sp_hash not in self.farmer.sps: self.farmer.log.warning(f"Do not have challenge hash {response.challenge_hash}") return is_sp_signatures: bool = False sps = self.farmer.sps[response.sp_hash] signage_point_index = sps[0].signage_point_index found_sp_hash_debug = False for sp_candidate in sps: if response.sp_hash == response.message_signatures[0][0]: found_sp_hash_debug = True if sp_candidate.reward_chain_sp == response.message_signatures[1][0]: is_sp_signatures = True if found_sp_hash_debug: assert is_sp_signatures pospace = None for plot_identifier, candidate_pospace in self.farmer.proofs_of_space[response.sp_hash]: if plot_identifier == response.plot_identifier: pospace = candidate_pospace assert pospace is not None computed_quality_string = pospace.verify_and_get_quality_string( self.farmer.constants, response.challenge_hash, response.sp_hash ) if computed_quality_string is None: self.farmer.log.warning(f"Have invalid PoSpace {pospace}") return if is_sp_signatures: ( challenge_chain_sp, challenge_chain_sp_harv_sig, ) = response.message_signatures[0] reward_chain_sp, reward_chain_sp_harv_sig = response.message_signatures[1] for sk in self.farmer.get_private_keys(): pk = sk.get_g1() if pk == response.farmer_pk: agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk) assert agg_pk == pospace.plot_public_key farmer_share_cc_sp = AugSchemeMPL.sign(sk, challenge_chain_sp, agg_pk) agg_sig_cc_sp = AugSchemeMPL.aggregate([challenge_chain_sp_harv_sig, farmer_share_cc_sp]) assert AugSchemeMPL.verify(agg_pk, challenge_chain_sp, agg_sig_cc_sp) # This means it passes the sp filter farmer_share_rc_sp = AugSchemeMPL.sign(sk, reward_chain_sp, agg_pk) agg_sig_rc_sp = AugSchemeMPL.aggregate([reward_chain_sp_harv_sig, farmer_share_rc_sp]) assert AugSchemeMPL.verify(agg_pk, reward_chain_sp, agg_sig_rc_sp) if pospace.pool_public_key is not None: assert pospace.pool_contract_puzzle_hash is None pool_pk = bytes(pospace.pool_public_key) if pool_pk not in self.farmer.pool_sks_map: self.farmer.log.error( f"Don't have the private key for the pool key used by harvester: {pool_pk.hex()}" ) return pool_target: Optional[PoolTarget] = PoolTarget(self.farmer.pool_target, uint32(0)) assert pool_target is not None pool_target_signature: Optional[G2Element] = AugSchemeMPL.sign( self.farmer.pool_sks_map[pool_pk], bytes(pool_target) ) else: assert pospace.pool_contract_puzzle_hash is not None pool_target = None pool_target_signature = None request = farmer_protocol.DeclareProofOfSpace( response.challenge_hash, challenge_chain_sp, signage_point_index, reward_chain_sp, pospace, agg_sig_cc_sp, agg_sig_rc_sp, self.farmer.wallet_target, pool_target, pool_target_signature, ) self.farmer.state_changed("proof", {"proof": request, "passed_filter": True}) msg = make_msg(ProtocolMessageTypes.declare_proof_of_space, request) await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE) return else: # This is a response with block signatures for sk in self.farmer.get_private_keys(): ( foliage_block_data_hash, foliage_sig_harvester, ) = response.message_signatures[0] ( foliage_transaction_block_hash, foliage_transaction_block_sig_harvester, ) = response.message_signatures[1] pk = sk.get_g1() if pk == response.farmer_pk: agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk) assert agg_pk == pospace.plot_public_key foliage_sig_farmer = AugSchemeMPL.sign(sk, foliage_block_data_hash, agg_pk) foliage_transaction_block_sig_farmer = AugSchemeMPL.sign(sk, foliage_transaction_block_hash, agg_pk) foliage_agg_sig = AugSchemeMPL.aggregate([foliage_sig_harvester, foliage_sig_farmer]) foliage_block_agg_sig = AugSchemeMPL.aggregate( [foliage_transaction_block_sig_harvester, foliage_transaction_block_sig_farmer] ) assert AugSchemeMPL.verify(agg_pk, foliage_block_data_hash, foliage_agg_sig) assert AugSchemeMPL.verify(agg_pk, foliage_transaction_block_hash, foliage_block_agg_sig) request_to_nodes = farmer_protocol.SignedValues( computed_quality_string, foliage_agg_sig, foliage_block_agg_sig, ) msg = make_msg(ProtocolMessageTypes.signed_values, request_to_nodes) await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
async def new_proof_of_space( self, new_proof_of_space: harvester_protocol.NewProofOfSpace, peer: ws.WSChiaConnection ): """ This is a response from the harvester, for a NewChallenge. Here we check if the proof of space is sufficiently good, and if so, we ask for the whole proof. """ if new_proof_of_space.sp_hash not in self.farmer.number_of_responses: self.farmer.number_of_responses[new_proof_of_space.sp_hash] = 0 self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(int(time.time())) max_pos_per_sp = 5 if self.farmer.number_of_responses[new_proof_of_space.sp_hash] > max_pos_per_sp: self.farmer.log.warning( f"Surpassed {max_pos_per_sp} PoSpace for one SP, no longer submitting PoSpace for signage point " f"{new_proof_of_space.sp_hash}" ) return if new_proof_of_space.sp_hash not in self.farmer.sps: self.farmer.log.warning( f"Received response for a signage point that we do not have {new_proof_of_space.sp_hash}" ) return sps = self.farmer.sps[new_proof_of_space.sp_hash] for sp in sps: computed_quality_string = new_proof_of_space.proof.verify_and_get_quality_string( self.farmer.constants, new_proof_of_space.challenge_hash, new_proof_of_space.sp_hash, ) if computed_quality_string is None: self.farmer.log.error(f"Invalid proof of space {new_proof_of_space.proof}") return self.farmer.number_of_responses[new_proof_of_space.sp_hash] += 1 required_iters: uint64 = calculate_iterations_quality( self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR, computed_quality_string, new_proof_of_space.proof.size, sp.difficulty, new_proof_of_space.sp_hash, ) # Double check that the iters are good assert required_iters < calculate_sp_interval_iters(self.farmer.constants, sp.sub_slot_iters) # Proceed at getting the signatures for this PoSpace request = harvester_protocol.RequestSignatures( new_proof_of_space.plot_identifier, new_proof_of_space.challenge_hash, new_proof_of_space.sp_hash, [sp.challenge_chain_sp, sp.reward_chain_sp], ) if new_proof_of_space.sp_hash not in self.farmer.proofs_of_space: self.farmer.proofs_of_space[new_proof_of_space.sp_hash] = [ ( new_proof_of_space.plot_identifier, new_proof_of_space.proof, ) ] else: self.farmer.proofs_of_space[new_proof_of_space.sp_hash].append( ( new_proof_of_space.plot_identifier, new_proof_of_space.proof, ) ) self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(int(time.time())) self.farmer.quality_str_to_identifiers[computed_quality_string] = ( new_proof_of_space.plot_identifier, new_proof_of_space.challenge_hash, new_proof_of_space.sp_hash, peer.peer_node_id, ) self.farmer.cache_add_time[computed_quality_string] = uint64(int(time.time())) return make_msg(ProtocolMessageTypes.request_signatures, request)
async def perform_handshake(self, network_id: str, protocol_version: str, server_port: int, local_type: NodeType): if self.is_outbound: outbound_handshake = make_msg( ProtocolMessageTypes.handshake, Handshake( network_id, protocol_version, chia_full_version_str(), uint16(server_port), uint8(local_type.value), [(uint16(Capability.BASE.value), "1")], ), ) assert outbound_handshake is not None await self._send_message(outbound_handshake) inbound_handshake_msg = await self._read_one_message() if inbound_handshake_msg is None: raise ProtocolError(Err.INVALID_HANDSHAKE) inbound_handshake = Handshake.from_bytes( inbound_handshake_msg.data) if ProtocolMessageTypes(inbound_handshake_msg.type ) != ProtocolMessageTypes.handshake: raise ProtocolError(Err.INVALID_HANDSHAKE) if inbound_handshake.network_id != network_id: raise ProtocolError(Err.INCOMPATIBLE_NETWORK_ID) self.peer_server_port = inbound_handshake.server_port self.connection_type = NodeType(inbound_handshake.node_type) else: try: message = await self._read_one_message() except Exception: raise ProtocolError(Err.INVALID_HANDSHAKE) if message is None: raise ProtocolError(Err.INVALID_HANDSHAKE) inbound_handshake = Handshake.from_bytes(message.data) if ProtocolMessageTypes( message.type) != ProtocolMessageTypes.handshake: raise ProtocolError(Err.INVALID_HANDSHAKE) if inbound_handshake.network_id != network_id: raise ProtocolError(Err.INCOMPATIBLE_NETWORK_ID) outbound_handshake = make_msg( ProtocolMessageTypes.handshake, Handshake( network_id, protocol_version, chia_full_version_str(), uint16(server_port), uint8(local_type.value), [(uint16(Capability.BASE.value), "1")], ), ) await self._send_message(outbound_handshake) self.peer_server_port = inbound_handshake.server_port self.connection_type = NodeType(inbound_handshake.node_type) self.outbound_task = asyncio.create_task(self.outbound_handler()) self.inbound_task = asyncio.create_task(self.inbound_handler()) return True
async def on_connect(peer: ws.WSChiaConnection): msg = make_msg(ProtocolMessageTypes.request_peers_introducer, introducer_protocol.RequestPeersIntroducer()) await peer.send_message(msg)
async def new_signage_point_harvester( self, new_challenge: harvester_protocol.NewSignagePointHarvester, peer: WSChiaConnection ): """ The harvester receives a new signage point from the farmer, this happens at the start of each slot. The harvester does a few things: 1. The harvester applies the plot filter for each of the plots, to select the proportion which are eligible for this signage point and challenge. 2. The harvester gets the qualities for each plot. This is approximately 7 reads per plot which qualifies. Note that each plot may have 0, 1, 2, etc qualities for that challenge: but on average it will have 1. 3. Checks the required_iters for each quality and the given signage point, to see which are eligible for inclusion (required_iters < sp_interval_iters). 4. Looks up the full proof of space in the plot for each quality, approximately 64 reads per quality 5. Returns the proof of space to the farmer """ if len(self.harvester.pool_public_keys) == 0 or len(self.harvester.farmer_public_keys) == 0: # This means that we have not received the handshake yet return start = time.time() assert len(new_challenge.challenge_hash) == 32 # Refresh plots to see if there are any new ones if start - self.harvester.last_load_time > 120: await self.harvester.refresh_plots() self.harvester.last_load_time = time.time() loop = asyncio.get_running_loop() def blocking_lookup(filename: Path, plot_info: PlotInfo) -> List[Tuple[bytes32, ProofOfSpace]]: # Uses the DiskProver object to lookup qualities. This is a blocking call, # so it should be run in a thread pool. try: sp_challenge_hash = ProofOfSpace.calculate_pos_challenge( plot_info.prover.get_id(), new_challenge.challenge_hash, new_challenge.sp_hash, ) try: quality_strings = plot_info.prover.get_qualities_for_challenge(sp_challenge_hash) except Exception as e: self.harvester.log.error(f"Error using prover object {e}") return [] responses: List[Tuple[bytes32, ProofOfSpace]] = [] if quality_strings is not None: # Found proofs of space (on average 1 is expected per plot) for index, quality_str in enumerate(quality_strings): required_iters: uint64 = calculate_iterations_quality( self.harvester.constants.DIFFICULTY_CONSTANT_FACTOR, quality_str, plot_info.prover.get_size(), new_challenge.difficulty, new_challenge.sp_hash, ) sp_interval_iters = calculate_sp_interval_iters( self.harvester.constants, new_challenge.sub_slot_iters ) if required_iters < sp_interval_iters: # Found a very good proof of space! will fetch the whole proof from disk, # then send to farmer try: proof_xs = plot_info.prover.get_full_proof(sp_challenge_hash, index) except RuntimeError: self.harvester.log.error(f"Exception fetching full proof for {filename}") continue # Look up local_sk from plot to save locked memory ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(plot_info.prover.get_memo()) local_sk = master_sk_to_local_sk(local_master_sk) plot_public_key = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key ) responses.append( ( quality_str, ProofOfSpace( sp_challenge_hash, plot_info.pool_public_key, plot_info.pool_contract_puzzle_hash, plot_public_key, uint8(plot_info.prover.get_size()), proof_xs, ), ) ) return responses except Exception as e: self.harvester.log.error(f"Unknown error: {e}") return [] async def lookup_challenge(filename: Path, plot_info: PlotInfo) -> List[harvester_protocol.NewProofOfSpace]: # Executes a DiskProverLookup in a thread pool, and returns responses all_responses: List[harvester_protocol.NewProofOfSpace] = [] if self.harvester._is_shutdown: return [] proofs_of_space_and_q: List[Tuple[bytes32, ProofOfSpace]] = await loop.run_in_executor( self.harvester.executor, blocking_lookup, filename, plot_info ) for quality_str, proof_of_space in proofs_of_space_and_q: all_responses.append( harvester_protocol.NewProofOfSpace( new_challenge.challenge_hash, new_challenge.sp_hash, quality_str.hex() + str(filename.resolve()), proof_of_space, new_challenge.signage_point_index, ) ) return all_responses awaitables = [] passed = 0 total = 0 for try_plot_filename, try_plot_info in self.harvester.provers.items(): if try_plot_filename.exists(): # Passes the plot filter (does not check sp filter yet though, since we have not reached sp) # This is being executed at the beginning of the slot total += 1 if ProofOfSpace.passes_plot_filter( self.harvester.constants, try_plot_info.prover.get_id(), new_challenge.challenge_hash, new_challenge.sp_hash, ): passed += 1 awaitables.append(lookup_challenge(try_plot_filename, try_plot_info)) # Concurrently executes all lookups on disk, to take advantage of multiple disk parallelism total_proofs_found = 0 for sublist_awaitable in asyncio.as_completed(awaitables): for response in await sublist_awaitable: total_proofs_found += 1 msg = make_msg(ProtocolMessageTypes.new_proof_of_space, response) await peer.send_message(msg) now = uint64(int(time.time())) farming_info = FarmingInfo( new_challenge.challenge_hash, new_challenge.sp_hash, now, uint32(passed), uint32(total_proofs_found), uint32(total), ) pass_msg = make_msg(ProtocolMessageTypes.farming_info, farming_info) await peer.send_message(pass_msg) self.harvester.log.info( f"{len(awaitables)} plots were eligible for farming {new_challenge.challenge_hash.hex()[:10]}..." f" Found {total_proofs_found} proofs. Time: {time.time() - start:.5f} s. " f"Total {len(self.harvester.provers)} plots" )
async def _do_process_communication( self, chain: Chain, challenge: bytes32, initial_form: ClassgroupElement, ip: str, reader: asyncio.StreamReader, writer: asyncio.StreamWriter, # Data specific only when running in bluebox mode. bluebox_iteration: Optional[uint64] = None, header_hash: Optional[bytes32] = None, height: Optional[uint32] = None, field_vdf: Optional[uint8] = None, ): disc: int = create_discriminant(challenge, self.constants.DISCRIMINANT_SIZE_BITS) try: # Depending on the flags 'fast_algorithm' and 'sanitizer_mode', # the timelord tells the vdf_client what to execute. async with self.lock: if self.sanitizer_mode: writer.write(b"S") else: if self.config["fast_algorithm"]: # Run n-wesolowski (fast) algorithm. writer.write(b"N") else: # Run two-wesolowski (slow) algorithm. writer.write(b"T") await writer.drain() prefix = str(len(str(disc))) if len(prefix) == 1: prefix = "00" + prefix if len(prefix) == 2: prefix = "0" + prefix async with self.lock: writer.write((prefix + str(disc)).encode()) await writer.drain() # Send initial_form prefixed with its length. async with self.lock: writer.write( bytes([len(initial_form.data)]) + initial_form.data) await writer.drain() try: ok = await reader.readexactly(2) except (asyncio.IncompleteReadError, ConnectionResetError, Exception) as e: log.warning(f"{type(e)} {e}") async with self.lock: self.vdf_failures.append(chain) self.vdf_failures_count += 1 return if ok.decode() != "OK": return log.info("Got handshake with VDF client.") if not self.sanitizer_mode: async with self.lock: self.allows_iters.append(chain) else: async with self.lock: assert chain is Chain.BLUEBOX assert bluebox_iteration is not None prefix = str(len(str(bluebox_iteration))) if len(str(bluebox_iteration)) < 10: prefix = "0" + prefix iter_str = prefix + str(bluebox_iteration) writer.write(iter_str.encode()) await writer.drain() # Listen to the client until "STOP" is received. while True: try: data = await reader.readexactly(4) except ( asyncio.IncompleteReadError, ConnectionResetError, Exception, ) as e: log.warning(f"{type(e)} {e}") async with self.lock: self.vdf_failures.append(chain) self.vdf_failures_count += 1 break msg = "" try: msg = data.decode() except Exception: pass if msg == "STOP": log.info(f"Stopped client running on ip {ip}.") async with self.lock: writer.write(b"ACK") await writer.drain() break else: try: # This must be a proof, 4 bytes is length prefix length = int.from_bytes(data, "big") proof = await reader.readexactly(length) stdout_bytes_io: io.BytesIO = io.BytesIO( bytes.fromhex(proof.decode())) except ( asyncio.IncompleteReadError, ConnectionResetError, Exception, ) as e: log.warning(f"{type(e)} {e}") async with self.lock: self.vdf_failures.append(chain) self.vdf_failures_count += 1 break iterations_needed = uint64( int.from_bytes(stdout_bytes_io.read(8), "big", signed=True)) y_size_bytes = stdout_bytes_io.read(8) y_size = uint64( int.from_bytes(y_size_bytes, "big", signed=True)) y_bytes = stdout_bytes_io.read(y_size) witness_type = uint8( int.from_bytes(stdout_bytes_io.read(1), "big", signed=True)) proof_bytes: bytes = stdout_bytes_io.read() # Verifies our own proof just in case form_size = ClassgroupElement.get_size(self.constants) output = ClassgroupElement.from_bytes(y_bytes[:form_size]) if not self.sanitizer_mode: time_taken = time.time() - self.chain_start_time[chain] ips = int(iterations_needed / time_taken * 10) / 10 log.info( f"Finished PoT chall:{challenge[:10].hex()}.. {iterations_needed}" f" iters, " f"Estimated IPS: {ips}, Chain: {chain}") vdf_info: VDFInfo = VDFInfo( challenge, iterations_needed, output, ) vdf_proof: VDFProof = VDFProof( witness_type, proof_bytes, self.sanitizer_mode, ) if not vdf_proof.is_valid(self.constants, initial_form, vdf_info): log.error("Invalid proof of time!") if not self.sanitizer_mode: async with self.lock: self.proofs_finished.append( (chain, vdf_info, vdf_proof)) else: async with self.lock: writer.write(b"010") await writer.drain() assert header_hash is not None assert field_vdf is not None assert height is not None response = timelord_protocol.RespondCompactProofOfTime( vdf_info, vdf_proof, header_hash, height, field_vdf) if self.server is not None: message = make_msg( ProtocolMessageTypes. respond_compact_vdf_timelord, response) await self.server.send_to_all([message], NodeType.FULL_NODE) except ConnectionResetError as e: log.info(f"Connection reset with VDF client {e}")
async def test_percentage_limits(self): r = RateLimiter(60, 40) new_peak_message = make_msg(ProtocolMessageTypes.new_peak, bytes([1] * 40)) for i in range(50): assert r.process_msg_and_check(new_peak_message) saw_disconnect = False for i in range(50): response = r.process_msg_and_check(new_peak_message) if not response: saw_disconnect = True assert saw_disconnect r = RateLimiter(60, 40) block_message = make_msg(ProtocolMessageTypes.respond_block, bytes([1] * 1024 * 1024)) for i in range(5): assert r.process_msg_and_check(block_message) saw_disconnect = False for i in range(5): response = r.process_msg_and_check(block_message) if not response: saw_disconnect = True assert saw_disconnect # Aggregate percentage limit count r = RateLimiter(60, 40) message_1 = make_msg(ProtocolMessageTypes.request_additions, bytes([1] * 5 * 1024)) message_2 = make_msg(ProtocolMessageTypes.request_removals, bytes([1] * 1024)) message_3 = make_msg(ProtocolMessageTypes.respond_additions, bytes([1] * 1024)) for i in range(180): assert r.process_msg_and_check(message_1) for i in range(180): assert r.process_msg_and_check(message_2) saw_disconnect = False for i in range(100): response = r.process_msg_and_check(message_3) if not response: saw_disconnect = True assert saw_disconnect # Aggregate percentage limit max total size r = RateLimiter(60, 40) message_4 = make_msg(ProtocolMessageTypes.respond_proof_of_weight, bytes([1] * 18 * 1024 * 1024)) message_5 = make_msg(ProtocolMessageTypes.respond_blocks, bytes([1] * 24 * 1024 * 1024)) for i in range(2): assert r.process_msg_and_check(message_4) saw_disconnect = False for i in range(2): response = r.process_msg_and_check(message_5) if not response: saw_disconnect = True assert saw_disconnect