def test_get_epoch_receipts(self): parent_hash = self.block_by_epoch("latest_mined")['hash'] start_nonce = self.get_nonce(self.GENESIS_ADDR) # generate epoch of 2 block with transactions in each block # NOTE: we need `C` to ensure that the top fork is heavier # --- --- --- # .- | A | <--- | C | <--- | D | <--- ... # --- | --- --- --- # ... <--- | P | <-* . # --- | --- . # .- | B | <.................. # --- txs = [ self.new_tx(receiver=self.rand_addr(), nonce=start_nonce + ii) for ii in range(NUM_TXS) ] txs1 = txs[:NUM_TXS // 2] txs2 = txs[NUM_TXS // 2:] block_a = self.generate_custom_block(parent_hash=parent_hash, referee=[], txs=[]) block_b = self.generate_custom_block(parent_hash=parent_hash, referee=[], txs=txs1) block_c = self.generate_custom_block(parent_hash=block_a, referee=[], txs=[]) block_d = self.generate_custom_block(parent_hash=block_c, referee=[block_b], txs=txs2) # make sure transactions have been executed parent_hash = block_d for _ in range(5): block = self.generate_custom_block(parent_hash=parent_hash, referee=[], txs=[]) parent_hash = block # retrieve epoch receipts epoch = self.block_by_hash(block_d)['height'] receipts = self.node.cfx_getEpochReceipts(epoch) assert_ne(receipts, None) assert_equal(len(receipts), 2) assert_equal(len(receipts[0]), NUM_TXS // 2) assert_equal(len(receipts[1]), NUM_TXS // 2)
def test_simple_receipt(self): to = self.rand_addr() tx = self.new_tx(receiver=to) tx_hash = self.send_tx(tx, wait_for_receipt=True) tx2 = self.get_tx(tx_hash) receipt = self.get_transaction_receipt(tx_hash) assert_ne(receipt, None) assert_equal(receipt['blockHash'], tx2['blockHash']) assert_equal(receipt['contractCreated'], tx2['contractCreated']) assert_equal(receipt['from'], tx2['from']) assert_equal(receipt['index'], tx2['transactionIndex']) assert_equal(receipt['to'], tx2['to']) assert_equal(receipt['transactionHash'], tx_hash) assert_equal(receipt['gasCoveredBySponsor'], False) assert_equal(receipt['logs'], []) assert_equal(receipt['outcomeStatus'], '0x0') assert_equal(receipt['storageCollateralized'], '0x0') assert_equal(receipt['storageCoveredBySponsor'], False) assert_equal(receipt['storageReleased'], []) assert_equal(receipt['txExecErrorMsg'], None)
def test_get_epoch_receipts(self): parent_hash = self.block_by_epoch("latest_mined")['hash'] start_nonce = self.get_nonce(self.GENESIS_ADDR) # generate epoch of 2 block with transactions in each block # NOTE: we need `C` to ensure that the top fork is heavier # --- --- --- # .- | A | <--- | C | <--- | D | <--- ... # --- | --- --- --- # ... <--- | P | <-* . # --- | --- . # .- | B | <.................. # --- txs = [ self.new_tx(receiver=self.rand_addr(), nonce=start_nonce + ii) for ii in range(NUM_TXS) ] txs1 = txs[:NUM_TXS // 2] txs2 = txs[NUM_TXS // 2:] block_a = self.generate_custom_block(parent_hash=parent_hash, referee=[], txs=[]) block_b = self.generate_custom_block(parent_hash=parent_hash, referee=[], txs=txs1) block_c = self.generate_custom_block(parent_hash=block_a, referee=[], txs=[]) block_d = self.generate_custom_block(parent_hash=block_c, referee=[block_b], txs=txs2) # not executed yet, no epoch receipts epoch_d = self.block_by_hash(block_d)['height'] assert_equal(self.node.cfx_getEpochReceipts(epoch_d), None) assert_equal(self.node.cfx_getEpochReceipts(f'hash:{block_d}'), None) # make sure transactions have been executed parent_hash = block_d for _ in range(5): block = self.generate_custom_block(parent_hash=parent_hash, referee=[], txs=[]) parent_hash = block # retrieve epoch receipts receipts = self.node.cfx_getEpochReceipts(epoch_d) assert_ne(receipts, None) assert_equal(len(receipts), 2) assert_equal(len(receipts[0]), NUM_TXS // 2) assert_equal(len(receipts[1]), NUM_TXS // 2) # retrieve epoch receipts by pivot hash receipts2 = self.node.cfx_getEpochReceipts(f'hash:{block_d}') assert_equal(receipts2, receipts) # request with non-pivot block hash should fail assert_raises_rpc_error(None, None, self.node.cfx_getEpochReceipts, f'hash:{block_b}') # request with nonexistent block hash should fail assert_raises_rpc_error( None, None, self.node.cfx_getEpochReceipts, f'hash:0x66e365b5bbd53bc26fd306fd7c65290b2b13c165d7cae816b651e7fcf2646f37' )
async def run_async(self): priv_key = default_config["GENESIS_PRI_KEY"] sender = eth_utils.encode_hex(priv_to_addr(priv_key)) # deploy two instances of the contract bytecode_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), CONTRACT_PATH) assert (os.path.isfile(bytecode_file)) bytecode = open(bytecode_file).read() _, contract1 = self.deploy_contract(sender, priv_key, bytecode) _, contract2 = self.deploy_contract(sender, priv_key, bytecode) # subscribe sub_all = await self.pubsub[FULLNODE1].subscribe("logs") sub_one = await self.pubsub[FULLNODE1].subscribe( "logs", Filter(address=[contract2]).__dict__) # call contracts and collect receipts receipts = [] for _ in range(NUM_CALLS): r = self.call_contract(sender, priv_key, contract1, FOO_TOPIC) assert (r != None) receipts.append(r) r = self.call_contract(sender, priv_key, contract2, FOO_TOPIC) receipts.append(r) assert (r != None) # make sure the pub-sub layer processes the logs self.rpc[FULLNODE0].generate_blocks(20) sync_blocks(self.nodes) # collect pub-sub notifications logs1 = [l async for l in sub_all.iter()] logs2 = [l async for l in sub_one.iter()] assert_equal(len(logs1), 2 * NUM_CALLS) assert_equal(len(logs2), NUM_CALLS) self.log.info(f"Pass -- retrieved logs with no fork") # create alternative fork old_tip = self.rpc[FULLNODE0].best_block_hash() old_tip_epoch = self.rpc[FULLNODE0].epoch_number() fork_hash = receipts[len(receipts) // 2]["blockHash"] fork_epoch = int(receipts[len(receipts) // 2]["epochNumber"], 16) self.log.info(f"Creating fork at {fork_hash[:20]}... (#{fork_epoch})") new_tip = self.generate_chain(fork_hash, 2 * (old_tip_epoch - fork_epoch))[-1] new_tip = self.rpc[FULLNODE0].generate_block_with_parent( new_tip, referee=[old_tip]) new_tip = self.generate_chain(new_tip, 20)[-1] new_tip_epoch = self.rpc[FULLNODE0].epoch_number() sync_blocks(self.nodes) self.log.info( f"Tip: {old_tip[:20]}... (#{old_tip_epoch}) --> {new_tip[:20]}... (#{new_tip_epoch})" ) # block order changed, some transactions need to be re-executed num_to_reexecute = sum(1 for r in receipts if int(r["epochNumber"], 16) > fork_epoch) msg = await sub_all.next(timeout=5) assert (msg["revertTo"] != None) assert_equal(int(msg["revertTo"], 16), fork_epoch) logs = [l async for l in sub_all.iter()] assert_equal(len(logs), num_to_reexecute) self.log.info(f"Pass -- retrieved re-executed logs after fork") # create one transaction that is mined but not executed yet sync_blocks(self.nodes) tx = self.rpc[FULLNODE0].new_contract_tx(receiver=contract1, data_hex=FOO_TOPIC, sender=sender, priv_key=priv_key, storage_limit=20000) assert_equal(self.rpc[FULLNODE0].send_tx(tx, wait_for_receipt=False), tx.hash_hex()) self.rpc[FULLNODE0].generate_block(num_txs=1) receipt = self.rpc[FULLNODE0].get_transaction_receipt(tx.hash_hex()) assert_equal(receipt, None) time.sleep(1) # mine more blocks, the transaction is now executed self.rpc[FULLNODE0].generate_blocks(4) receipt = self.rpc[FULLNODE0].get_transaction_receipt(tx.hash_hex()) assert_ne(receipt, None) # make sure the pub-sub layer processes the logs self.rpc[FULLNODE0].generate_blocks(20) sync_blocks(self.nodes) # this would timeout before #1989 was fixed await sub_all.next() self.log.info(f"Pass -- test #1989 fix")