def __init__(self, coin_store: CoinStore, consensus_constants: ConsensusConstants): self.constants: ConsensusConstants = consensus_constants self.constants_json = recurse_jsonify( dataclasses.asdict(self.constants)) # Keep track of seen spend_bundles self.seen_bundle_hashes: Dict[bytes32, bytes32] = {} self.coin_store = coin_store self.lock = asyncio.Lock() # The fee per cost must be above this amount to consider the fee "nonzero", and thus able to kick out other # transactions. This prevents spam. This is equivalent to 0.055 XCH per block, or about 0.00005 XCH for two # spends. self.nonzero_fee_minimum_fpc = 5 self.limit_factor = 0.5 self.mempool_max_total_cost = int(self.constants.MAX_BLOCK_COST_CLVM * self.constants.MEMPOOL_BLOCK_BUFFER) # Transactions that were unable to enter mempool, used for retry. (they were invalid) self.potential_cache = PendingTxCache( self.constants.MAX_BLOCK_COST_CLVM * 1) self.seen_cache_size = 10000 self.pool = ProcessPoolExecutor(max_workers=2) # The mempool will correspond to a certain peak self.peak: Optional[BlockRecord] = None self.mempool: Mempool = Mempool(self.mempool_max_total_cost)
async def new_peak( self, new_peak: Optional[BlockRecord] ) -> List[Tuple[SpendBundle, NPCResult, bytes32]]: """ Called when a new peak is available, we try to recreate a mempool for the new tip. """ if new_peak is None: return [] if self.peak == new_peak: return [] if new_peak.height <= self.constants.INITIAL_FREEZE_PERIOD: return [] self.peak = new_peak old_pool = self.mempool self.mempool = Mempool(self.mempool_max_total_cost) for item in old_pool.spends.values(): await self.add_spendbundle(item.spend_bundle, item.npc_result, item.spend_bundle_name, False) potential_txs_copy = self.potential_txs.copy() self.potential_txs = {} txs_added = [] for item in potential_txs_copy.values(): cost, status, error = await self.add_spendbundle( item.spend_bundle, item.npc_result, item.spend_bundle_name) if status == MempoolInclusionStatus.SUCCESS: txs_added.append((item.spend_bundle, item.npc_result, item.spend_bundle_name)) log.debug( f"Size of mempool: {len(self.mempool.spends)} spends, cost: {self.mempool.total_mempool_cost} " f"minimum fee to get in: {self.mempool.get_min_fee_rate(100000)}") return txs_added
def __init__(self, coin_store: CoinStore, consensus_constants: ConsensusConstants): self.constants: ConsensusConstants = consensus_constants self.constants_json = recurse_jsonify( dataclasses.asdict(self.constants)) # Transactions that were unable to enter mempool, used for retry. (they were invalid) self.potential_txs: Dict[bytes32, Tuple[SpendBundle, CostResult, bytes32]] = {} # Keep track of seen spend_bundles self.seen_bundle_hashes: Dict[bytes32, bytes32] = {} self.coin_store = coin_store self.mempool_max_total_cost = int(self.constants.MAX_BLOCK_COST_CLVM * self.constants.MEMPOOL_BLOCK_BUFFER) self.potential_cache_max_total_cost = int( self.constants.MAX_BLOCK_COST_CLVM * self.constants.MEMPOOL_BLOCK_BUFFER) self.potential_cache_cost: int = 0 self.seen_cache_size = 10000 self.pool = ProcessPoolExecutor(max_workers=1) # The mempool will correspond to a certain peak self.peak: Optional[BlockRecord] = None self.mempool: Mempool = Mempool(self.mempool_max_total_cost)
async def new_peak( self, new_peak: Optional[BlockRecord], coin_changes: List[CoinRecord] ) -> List[Tuple[SpendBundle, NPCResult, bytes32]]: """ Called when a new peak is available, we try to recreate a mempool for the new tip. """ if new_peak is None: return [] if new_peak.is_transaction_block is False: return [] if self.peak == new_peak: return [] assert new_peak.timestamp is not None use_optimization: bool = self.peak is not None and new_peak.prev_transaction_block_hash == self.peak.header_hash self.peak = new_peak if use_optimization: # We don't reinitialize a mempool, just kick removed items for coin_record in coin_changes: if coin_record.name in self.mempool.removals: item = self.mempool.removals[coin_record.name] self.mempool.remove_from_pool(item) self.remove_seen(item.spend_bundle_name) else: old_pool = self.mempool self.mempool = Mempool(self.mempool_max_total_cost) for item in old_pool.spends.values(): _, result, _ = await self.add_spendbundle( item.spend_bundle, item.npc_result, item.spend_bundle_name, item.program) # If the spend bundle was confirmed or conflicting (can no longer be in mempool), it won't be # successfully added to the new mempool. In this case, remove it from seen, so in the case of a reorg, # it can be resubmitted if result != MempoolInclusionStatus.SUCCESS: self.remove_seen(item.spend_bundle_name) potential_txs = self.potential_cache.drain() txs_added = [] for item in potential_txs.values(): cost, status, error = await self.add_spendbundle( item.spend_bundle, item.npc_result, item.spend_bundle_name, program=item.program) if status == MempoolInclusionStatus.SUCCESS: txs_added.append((item.spend_bundle, item.npc_result, item.spend_bundle_name)) log.info( f"Size of mempool: {len(self.mempool.spends)} spends, cost: {self.mempool.total_mempool_cost} " f"minimum fee rate (in FPC) to get in for 5M cost tx: {self.mempool.get_min_fee_rate(5000000)}" ) return txs_added
async def test_basic_mempool(self, two_nodes): reward_ph = WALLET_A.get_new_puzzlehash() blocks = bt.get_consecutive_blocks( 3, guarantee_transaction_block=True, farmer_reward_puzzle_hash=reward_ph, pool_reward_puzzle_hash=reward_ph, ) full_node_1, _, server_1, _ = two_nodes for block in blocks: await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block)) await time_out_assert(60, node_height_at_least, True, full_node_1, blocks[-1].height) max_mempool_cost = 40000000 * 5 mempool = Mempool(max_mempool_cost) assert mempool.get_min_fee_rate(104000) == 0 with pytest.raises(ValueError): mempool.get_min_fee_rate(max_mempool_cost + 1) spend_bundle = generate_test_spend_bundle(list(blocks[-1].get_included_reward_coins())[0]) assert spend_bundle is not None
async def new_peak( self, new_peak: Optional[BlockRecord] ) -> List[Tuple[SpendBundle, NPCResult, bytes32]]: """ Called when a new peak is available, we try to recreate a mempool for the new tip. """ if new_peak is None: return [] if new_peak.is_transaction_block is False: return [] if self.peak == new_peak: return [] assert new_peak.timestamp is not None if new_peak.timestamp <= self.constants.INITIAL_FREEZE_END_TIMESTAMP: return [] self.peak = new_peak old_pool = self.mempool self.mempool = Mempool(self.mempool_max_total_cost) for item in old_pool.spends.values(): _, result, _ = await self.add_spendbundle(item.spend_bundle, item.npc_result, item.spend_bundle_name, False, item.program) # If the spend bundle was confirmed or conflicting (can no longer be in mempool), it won't be successfully # added to the new mempool. In this case, remove it from seen, so in the case of a reorg, it can be # resubmitted if result != MempoolInclusionStatus.SUCCESS: self.remove_seen(item.spend_bundle_name) potential_txs_copy = self.potential_txs.copy() self.potential_txs = {} txs_added = [] for item in potential_txs_copy.values(): cost, status, error = await self.add_spendbundle( item.spend_bundle, item.npc_result, item.spend_bundle_name, program=item.program) if status == MempoolInclusionStatus.SUCCESS: txs_added.append((item.spend_bundle, item.npc_result, item.spend_bundle_name)) log.debug( f"Size of mempool: {len(self.mempool.spends)} spends, cost: {self.mempool.total_mempool_cost} " f"minimum fee to get in: {self.mempool.get_min_fee_rate(100000)}") return txs_added
def __init__(self, coin_store: CoinStore, consensus_constants: ConsensusConstants): self.constants: ConsensusConstants = consensus_constants self.constants_json = recurse_jsonify(dataclasses.asdict(self.constants)) # Transactions that were unable to enter mempool, used for retry. (they were invalid) self.potential_txs: Dict[bytes32, Tuple[SpendBundle, CostResult, bytes32]] = {} # Keep track of seen spend_bundles self.seen_bundle_hashes: Dict[bytes32, bytes32] = {} self.coin_store = coin_store tx_per_sec = self.constants.TX_PER_SEC sec_per_block = self.constants.SUB_SLOT_TIME_TARGET // self.constants.SLOT_BLOCKS_TARGET block_buffer_count = self.constants.MEMPOOL_BLOCK_BUFFER # MEMPOOL_SIZE = 60000 self.mempool_size = int(tx_per_sec * sec_per_block * block_buffer_count) self.potential_cache_size = 300 self.seen_cache_size = 10000 self.pool = ProcessPoolExecutor(max_workers=1) # The mempool will correspond to a certain peak self.peak: Optional[BlockRecord] = None self.mempool: Mempool = Mempool.create(self.mempool_size)