def run_test(self): priv_key = default_config["GENESIS_PRI_KEY"] sender = eth_utils.encode_hex(priv_to_addr(priv_key)) # deploy contract bytecode_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), CONTRACT_PATH) assert (os.path.isfile(bytecode_file)) bytecode = open(bytecode_file).read() _, contractAddr = self.deploy_contract(sender, priv_key, bytecode) self.log.info(f"contract deployed at address {contractAddr}") # emit events throughout a few eras num_events = 0 while self.rpc[ARCHIVE_NODE].epoch_number( ) < NUM_ERAS * ERA_EPOCH_COUNT: self.call_contract(sender, priv_key, contractAddr, encode_hex_0x(keccak(b"foo()"))) num_events += 1 self.log.info(f"num_events = {num_events}") self.log.info( f"epoch_number = {self.rpc[ARCHIVE_NODE].epoch_number()}") # sync blocks and wait for gc sync_blocks(self.nodes) time.sleep(1) latest_checkpoint = self.rpc[FULL_NODE].epoch_number( "latest_checkpoint") assert_greater_than(latest_checkpoint, 0) # filtering the whole epoch range should fail on full nodes filter = Filter(from_epoch="earliest", to_epoch="latest_state", topics=[CALLED_TOPIC]) logs_archive = self.rpc[ARCHIVE_NODE].get_logs(filter) assert_equal(len(logs_archive), num_events) assert_raises_rpc_error(None, None, self.rpc[FULL_NODE].get_logs, filter) # filtering since the latest checkpoint should yield the same result filter = Filter(from_epoch="latest_checkpoint", to_epoch="latest_state", topics=[CALLED_TOPIC]) logs_archive = self.rpc[ARCHIVE_NODE].get_logs(filter) assert_greater_than(len(logs_archive), 0) logs_full = self.rpc[FULL_NODE].get_logs(filter) assert_equal(logs_archive, logs_full) self.log.info("Pass")
def test_valid_filter(self): # epoch fields inclusive filter = Filter(from_epoch="0x1", to_epoch="0x1") logs = self.get_logs(filter) assert_equal(logs, []) # variadic `address` field filter = Filter(address=None) logs = self.get_logs(filter) assert_equal(logs, []) filter = Filter(address="0x0000000000000000000000000000000000000000") logs = self.get_logs(filter) assert_equal(logs, []) filter = Filter(address=[ "0x0000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000" ]) logs = self.get_logs(filter) assert_equal(logs, []) # variadic `topics` field filter = Filter(topics=None) logs = self.get_logs(filter) assert_equal(logs, []) filter = Filter(topics=[ "0x0000000000000000000000000000000000000000000000000000000000000000" ]) logs = self.get_logs(filter) assert_equal(logs, []) filter = Filter(topics=[ "0x0000000000000000000000000000000000000000000000000000000000000000", [ "0x0000000000000000000000000000000000000000000000000000000000000000" ] ]) logs = self.get_logs(filter) assert_equal(logs, []) ## all fields filter = Filter( from_epoch="0x0", to_epoch="latest_state", block_hashes=[ "0x0000000000000000000000000000000000000000000000000000000000000000" ], address=["0x0000000000000000000000000000000000000000"], topics=[ "0x0000000000000000000000000000000000000000000000000000000000000000", [ "0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000" ] ], limit="0x1") logs = self.get_logs(filter) assert_equal(logs, [])
def test_valid_filter(self): # epoch fields inclusive filter = Filter(from_epoch="0x1", to_epoch="0x1") logs = self.get_logs(filter) assert_equal(logs, []) # variadic `address` field filter = Filter(address=None) logs = self.get_logs(filter) assert_equal(logs, []) filter = Filter(address=NULL_H160) logs = self.get_logs(filter) assert_equal(logs, []) filter = Filter(address=[NULL_H160, NULL_H160]) logs = self.get_logs(filter) assert_equal(logs, []) # variadic `topics` field filter = Filter(topics=None) logs = self.get_logs(filter) assert_equal(logs, []) filter = Filter(topics=[NULL_H256]) logs = self.get_logs(filter) assert_equal(logs, []) filter = Filter(topics=[NULL_H256, [NULL_H256]]) logs = self.get_logs(filter) assert_equal(logs, []) # non-existent block hash filter = Filter(block_hashes=[NULL_H256]) assert_raises_rpc_error(None, None, self.get_logs, filter) # all fields filter = Filter( from_epoch="0x0", to_epoch="latest_state", block_hashes = [self.blocks[0]], address=[NULL_H160], topics=[NULL_H256, [NULL_H256, NULL_H256]], offset="0x0", limit="0x1" ) logs = self.get_logs(filter) assert_equal(logs, [])
def check_witnesses_synced(self): latest_epoch = self.rpc[FULLNODE0].epoch_number() # scan all blocks for receipts # we need to have all correct roots to do this filter = Filter(from_epoch="earliest", to_epoch=hex(latest_epoch - BLAME_CHECK_OFFSET), topics=[FOO_TOPIC]) logs_full = self.rpc[FULLNODE0].get_logs(filter) logs_light = self.rpc[LIGHTNODE].get_logs(filter) assert_equal(logs_full, logs_light)
def check_logs(self, first_block_number, last_block_number, sender): # check the number of logs returned for different ranges for from_block in range(first_block_number, last_block_number + 1): for to_block in range(from_block, last_block_number + 1): filter = Filter(from_block=hex(from_block), to_block=hex(to_block)) logs = self.rpc.get_logs(filter) assert_equal(len(logs), to_block - from_block + 1) # check the event parameters in each block for block_number in range(first_block_number, last_block_number + 1): logs = self.rpc.get_logs( Filter(from_block=hex(block_number), to_block=hex(block_number))) assert_equal(len(logs), 1) assert_equal(logs[0]["topics"][0], FOO_TOPIC) assert_equal(logs[0]["topics"][1], address_to_topic(sender)) assert_equal( logs[0]["topics"][2], number_to_topic(block_number - first_block_number + 1))
def run_test(self): file_path = os.path.dirname(os.path.realpath(__file__)).split("/") file_path.pop(-1) file_path.extend(["internal_contract", "metadata", "Staking.json"]) file_path = "/".join(file_path) staking_contract_dict = json.loads(open(os.path.join(file_path), "r").read()) staking_contract = get_contract_instance(contract_dict=staking_contract_dict) staking_contract_addr = Web3.toChecksumAddress("843c409373ffd5c0bec1dddb7bec830856757b65") self.problem = "0x2bc79b7514884ab00da924607d71542cc4fed3beb8518e747726ae30ab6c7944" self.solution = "0xc4d2751c52311d0d7efe44e5c4195e058ad5ef4bb89b3e1761b24dc277b132c2" self.priv_key = default_config["GENESIS_PRI_KEY"] self.sender = encode_hex_0x(priv_to_addr(self.priv_key)) self.sender_checksum = Web3.toChecksumAddress(self.sender) self.pub = [] self.pri = [] self.rpc = RpcClient(self.nodes[0]) gas = CONTRACT_DEFAULT_GAS gas_price = 10 # lock token for genesis account self.tx_conf = {"from":self.sender, "gas":int_to_hex(gas), "gasPrice":int_to_hex(gas_price), "chainId":0} self.tx_conf['to'] = staking_contract_addr tx_data = decode_hex(staking_contract.functions.deposit(1000000 * 10 ** 18).buildTransaction(self.tx_conf)["data"]) tx = self.rpc.new_tx(value=0, receiver=staking_contract_addr, data=tx_data, gas=gas, gas_price=gas_price) self.rpc.send_tx(tx, True) for i in range(10): priv_key = random.randint(0, 2 ** 256).to_bytes(32, "big") pub_key = encode_hex_0x(priv_to_addr(priv_key)) self.pub.append(pub_key) self.pri.append(priv_key) transaction = self.rpc.new_tx(sender=self.sender, receiver=pub_key, value=1000000 * 10 ** 18, priv_key=self.priv_key) self.rpc.send_tx(transaction, True) # deposit 10000 tokens tx_data = decode_hex(staking_contract.functions.deposit(10000 * 10 ** 18).buildTransaction(self.tx_conf)["data"]) tx = self.rpc.new_tx(value=0, sender=pub_key, receiver=self.tx_conf["to"], gas=gas, data=tx_data, priv_key=priv_key) self.rpc.send_tx(tx) self.tx_conf = {"from":self.sender, "gas":int_to_hex(gas), "gasPrice":int_to_hex(gas_price), "chainId":0} self.filter = Filter(from_epoch="earliest", to_epoch="latest_state") self.testEventContract() self.tx_conf = {"from":self.sender, "gas":int_to_hex(gas), "gasPrice":int_to_hex(gas_price), "chainId":0} self.testBallotContract() self.tx_conf = {"from":self.sender, "gas":int_to_hex(gas), "gasPrice":int_to_hex(gas_price), "chainId":0} self.testPayContract() self.tx_conf = {"from":self.sender, "gas":int_to_hex(gas), "gasPrice":int_to_hex(gas_price), "chainId":0} self.testHTLCContract() self.tx_conf = {"from":self.sender, "gas":int_to_hex(gas), "gasPrice":int_to_hex(gas_price), "chainId":0} self.testDaiContract() self.tx_conf = {"from":self.sender, "gas":int_to_hex(gas), "gasPrice":int_to_hex(gas_price), "chainId":0} self.testMappingContract() self.tx_conf = {"from":self.sender, "gas":int_to_hex(gas), "gasPrice":int_to_hex(gas_price), "chainId":0} self.testDaiJoinContract() self.log.info("Pass")
def run_test(self): priv_key = default_config["GENESIS_PRI_KEY"] sender = eth_utils.encode_hex(priv_to_addr(priv_key)) self.rpc = RpcClient(self.nodes[0]) # apply filter, we expect no logs filter = Filter() result = self.rpc.get_logs(filter) assert_equal(result, []) # deploy contract bytecode_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), CONTRACT_PATH) assert(os.path.isfile(bytecode_file)) bytecode = open(bytecode_file).read() _, contractAddr = self.deploy_contract(sender, priv_key, bytecode) # apply filter, we expect a single log with 2 topics filter = Filter(from_epoch="earliest", to_epoch="latest_state") logs0 = self.rpc.get_logs(filter) self.assert_response_format_correct(logs0) assert_equal(len(logs0), 1) assert_equal(len(logs0[0]["topics"]), 2) assert_equal(logs0[0]["topics"][0], CONSTRUCTED_TOPIC) assert_equal(logs0[0]["topics"][1], self.address_to_topic(sender)) assert_equal(logs0[0]["data"], self.address_to_topic(sender)) # call method receipt = self.call_contract(sender, priv_key, contractAddr, encode_hex_0x(keccak(b"foo()")), storage_limit=64) # apply filter, we expect two logs with 2 and 3 topics respectively filter = Filter(from_epoch="earliest", to_epoch="latest_state") logs1 = self.rpc.get_logs(filter) self.assert_response_format_correct(logs1) assert_equal(len(logs1), 2) assert_equal(logs1[0], logs0[0]) assert_equal(len(logs1[1]["topics"]), 3) assert_equal(logs1[1]["topics"][0], FOO_TOPIC) assert_equal(logs1[1]["topics"][1], self.address_to_topic(sender)) assert_equal(logs1[1]["topics"][2], self.number_to_topic(1)) # apply filter for specific block, we expect a single log with 3 topics filter = Filter(block_hashes=[receipt["blockHash"]]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), 1) assert_equal(logs[0], logs1[1]) # call many times for ii in range(2, NUM_CALLS): self.call_contract(sender, priv_key, contractAddr, encode_hex_0x(keccak(b"foo()")), storage_limit=0) # apply filter, we expect NUM_CALLS log entries with increasing uint32 fields filter = Filter(from_epoch="earliest", to_epoch="latest_state") logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), NUM_CALLS) for ii in range(2, NUM_CALLS): assert_equal(len(logs[ii]["topics"]), 3) assert_equal(logs[ii]["topics"][0], FOO_TOPIC) assert(logs[ii]["topics"][1] == self.address_to_topic(sender)) assert_equal(logs[ii]["topics"][2], self.number_to_topic(ii)) # apply filter for specific topics filter = Filter(topics=[CONSTRUCTED_TOPIC]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), 1) filter = Filter(topics=[FOO_TOPIC]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), NUM_CALLS - 1) filter = Filter(topics=[None, self.address_to_topic(sender)]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), NUM_CALLS) # find logs with `FOO_TOPIC` as 1st topic and `3` or `4` as 3rd topic filter = Filter(topics=[FOO_TOPIC, None, [self.number_to_topic(3), self.number_to_topic(4)]]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), 2) # apply filter with limit filter = Filter(limit=hex(NUM_CALLS // 2)) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), NUM_CALLS // 2) # apply filter with offset filter = Filter(offset=hex(NUM_CALLS // 4)) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), 3 * NUM_CALLS // 4) # apply filter for specific contract address _, contractAddr2 = self.deploy_contract(sender, priv_key, bytecode) filter = Filter(address=[contractAddr]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), NUM_CALLS) filter = Filter(address=[contractAddr2]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), 1) # apply filter to very first epoch, we expect no logs filter = Filter(from_epoch="earliest", to_epoch="earliest") result = self.rpc.get_logs(filter) assert_equal(result, []) # generate two blocks with `NUM_CALLS` transactions in each; # transactions will generate 2 logs each parent_hash = self.rpc.block_by_epoch("latest_mined")['hash'] start_nonce = self.rpc.get_nonce(sender) txs1 = [self.rpc.new_contract_tx(receiver=contractAddr, data_hex=encode_hex_0x(keccak(b"bar()")), sender=sender, priv_key=priv_key, storage_limit=64, nonce = start_nonce + ii) for ii in range(0, NUM_CALLS)] block_hash_1 = self.rpc.generate_custom_block(parent_hash = parent_hash, referee = [], txs = txs1) epoch_1 = self.rpc.block_by_hash(block_hash_1)["epochNumber"] txs2 = [self.rpc.new_contract_tx(receiver=contractAddr, data_hex=encode_hex_0x(keccak(b"bar()")), sender=sender, priv_key=priv_key, storage_limit=64, nonce = start_nonce + NUM_CALLS + ii) for ii in range(0, NUM_CALLS)] block_hash_2 = self.rpc.generate_custom_block(parent_hash = block_hash_1, referee = [], txs = txs2) epoch_2 = self.rpc.block_by_hash(block_hash_2)["epochNumber"] txs = txs1 txs.extend(txs2) # blocks not executed yet, filtering should fail # filter = Filter(block_hashes=[block_hash_1, block_hash_2], topics=[BAR_TOPIC]) # assert_raises_rpc_error(None, None, self.rpc.get_logs, filter) # generate some more blocks to ensure our two blocks are executed self.rpc.generate_blocks(10) # filtering for these two blocks should return logs in correct order filter = Filter(block_hashes=[block_hash_1, block_hash_2], topics=[BAR_TOPIC]) logs = self.rpc.get_logs(filter) assert_equal(len(logs), 4 * NUM_CALLS) log_index = 0 transaction_index = 0 transaction_log_index = 0 for ii in range(0, 4 * NUM_CALLS): assert_equal(logs[ii]["address"], contractAddr) assert_equal(logs[ii]["blockHash"], block_hash_1 if ii < 2 * NUM_CALLS else block_hash_2) assert_equal(logs[ii]["epochNumber"], epoch_1 if ii < 2 * NUM_CALLS else epoch_2) assert_equal(logs[ii]["transactionHash"], txs[ii // 2].hash_hex()) assert_equal(len(logs[ii]["topics"]), 3) assert_equal(logs[ii]["topics"][0], BAR_TOPIC) assert_equal(logs[ii]["topics"][1], self.address_to_topic(sender)) assert_equal(logs[ii]["topics"][2], self.number_to_topic(ii)) # logIndex: # 0, 1, 2, 3, 4, 6, 7, 8, ..., 2 * NUM_CALLS, 0, 1, 2, ... assert_equal(logs[ii]["logIndex"], hex(log_index % (2 * NUM_CALLS))) log_index += 1 # transactionIndex: # 0, 0, 1, 1, 2, 2, 3, 3, ..., NUM_CALLS, 0, 0, 1, 1, ... assert_equal(logs[ii]["transactionIndex"], hex((transaction_index // 2) % NUM_CALLS)) transaction_index += 1 # transactionLogIndex: # 0, 1, 0, 1, 0, 1, 0, 1, ... assert_equal(logs[ii]["transactionLogIndex"], hex(transaction_log_index % 2)) transaction_log_index += 1 # block hash order should not affect log order filter = Filter(block_hashes=[block_hash_2, block_hash_1], topics=[BAR_TOPIC]) logs2 = self.rpc.get_logs(filter) assert_equal(logs, logs2) # given a limit, we should receive the _last_ few logs filter = Filter(block_hashes=[block_hash_1, block_hash_2], limit = hex(3 * NUM_CALLS + NUM_CALLS // 2), topics=[BAR_TOPIC]) logs = self.rpc.get_logs(filter) assert_equal(len(logs), 3 * NUM_CALLS + NUM_CALLS // 2) for ii in range(0, 3 * NUM_CALLS + NUM_CALLS // 2): assert_equal(len(logs[ii]["topics"]), 3) assert_equal(logs[ii]["topics"][0], BAR_TOPIC) assert_equal(logs[ii]["topics"][1], self.address_to_topic(sender)) assert_equal(logs[ii]["topics"][2], self.number_to_topic(NUM_CALLS // 2 + ii)) # given an offset and a limit, we should receive the corresponding logs filter = Filter(block_hashes=[block_hash_1, block_hash_2], offset = hex(NUM_CALLS // 2), limit = hex(NUM_CALLS // 2), topics=[BAR_TOPIC]) logs = self.rpc.get_logs(filter) assert_equal(len(logs), NUM_CALLS // 2) for ii in range(0, NUM_CALLS // 2): assert_equal(len(logs[ii]["topics"]), 3) assert_equal(logs[ii]["topics"][0], BAR_TOPIC) assert_equal(logs[ii]["topics"][1], self.address_to_topic(sender)) assert_equal(logs[ii]["topics"][2], self.number_to_topic(3 * NUM_CALLS + ii)) filter = Filter(from_epoch = epoch_1, to_epoch = epoch_2, offset = hex(NUM_CALLS // 2), limit = hex(NUM_CALLS // 2), topics=[BAR_TOPIC]) logs2 = self.rpc.get_logs(filter) assert_equal(logs, logs2) # test paging use case BATCH_SIZE = 7 filter = Filter(block_hashes=[block_hash_1, block_hash_2], topics=[BAR_TOPIC]) all_logs = self.rpc.get_logs(filter) collected_logs = [] offset = 0 while True: filter = Filter(block_hashes=[block_hash_1, block_hash_2], offset = hex(offset), limit = hex(BATCH_SIZE), topics=[BAR_TOPIC]) logs = self.rpc.get_logs(filter) if len(logs) == 0: break collected_logs = logs + collected_logs offset += BATCH_SIZE assert_equal(collected_logs, all_logs) # get-logs-filter-max-epoch-range should limit the number of epochs queried. self.stop_node(0) self.start_node(0, ["--get-logs-filter-max-epoch-range", "16"]) filter = Filter(from_epoch="0x0", to_epoch="0x0f", topics=[BAR_TOPIC]) # should not raise error self.rpc.get_logs(filter) filter = Filter(from_epoch="0x0", to_epoch="0x10", topics=[BAR_TOPIC]) assert_raises_rpc_error(None, None, self.rpc.get_logs, filter) self.log.info("Pass")
async def run_async(self): priv_key = default_config["GENESIS_PRI_KEY"] sender = eth_utils.encode_hex(priv_to_addr(priv_key)) # deploy two instances of the contract bytecode_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), CONTRACT_PATH) assert (os.path.isfile(bytecode_file)) bytecode = open(bytecode_file).read() _, contract1 = self.deploy_contract(sender, priv_key, bytecode) _, contract2 = self.deploy_contract(sender, priv_key, bytecode) # subscribe sub_all = await self.pubsub[FULLNODE1].subscribe("logs") sub_one = await self.pubsub[FULLNODE1].subscribe( "logs", Filter(address=[contract2]).__dict__) # call contracts and collect receipts receipts = [] for _ in range(NUM_CALLS): r = self.call_contract(sender, priv_key, contract1, FOO_TOPIC) assert (r != None) receipts.append(r) r = self.call_contract(sender, priv_key, contract2, FOO_TOPIC) receipts.append(r) assert (r != None) # collect pub-sub notifications logs1 = [l async for l in sub_all.iter()] logs2 = [l async for l in sub_one.iter()] assert_equal(len(logs1), 2 * NUM_CALLS) assert_equal(len(logs2), NUM_CALLS) self.log.info(f"Pass -- retrieved logs with no fork") # create alternative fork old_tip = self.rpc[FULLNODE0].best_block_hash() old_tip_epoch = self.rpc[FULLNODE0].epoch_number() fork_hash = receipts[len(receipts) // 2]["blockHash"] fork_epoch = receipts[len(receipts) // 2]["epochNumber"] self.log.info(f"Creating fork at {fork_hash[:20]}... (#{fork_epoch})") new_tip = self.generate_chain(fork_hash, 2 * (old_tip_epoch - fork_epoch))[-1] new_tip = self.rpc[FULLNODE0].generate_block_with_parent( new_tip, referee=[old_tip]) new_tip = self.generate_chain(new_tip, 20)[-1] new_tip_epoch = self.rpc[FULLNODE0].epoch_number() sync_blocks(self.nodes) self.log.info( f"Tip: {old_tip[:20]}... (#{old_tip_epoch}) --> {new_tip[:20]}... (#{new_tip_epoch})" ) # block order changed, some transactions need to be re-executed num_to_reexecute = sum(1 for r in receipts if r["epochNumber"] > fork_epoch) msg = await sub_all.next(timeout=5) assert (msg["revertTo"] != None) assert_equal(int(msg["revertTo"], 16), fork_epoch) logs = [l async for l in sub_all.iter()] assert_equal(len(logs), num_to_reexecute) self.log.info(f"Pass -- retrieved re-executed logs after fork")
def run_test(self): priv_key = default_config["GENESIS_PRI_KEY"] sender = eth_utils.encode_hex(priv_to_addr(priv_key)) # deploy contract bytecode_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), CONTRACT_PATH) assert (os.path.isfile(bytecode_file)) bytecode = open(bytecode_file).read() _, contractAddr = self.deploy_contract(sender, priv_key, bytecode) self.log.info("contract deployed") contract_epoch = hex(self.rpc[FULLNODE0].epoch_number()) # call method once receipt = self.call_contract(sender, priv_key, contractAddr, encode_hex_0x(keccak(b"foo()"))) call_epoch = hex(self.rpc[FULLNODE0].epoch_number()) # deploy another instance of the contract _, contractAddr2 = self.deploy_contract(sender, priv_key, bytecode) # call method multiple times for ii in range(0, NUM_CALLS - 3): self.call_contract(sender, priv_key, contractAddr, encode_hex_0x(keccak(b"foo()"))) # make sure we have enough blocks to be certain about the validity of previous blocks self.log.info("generating blocks...") for _ in range(50): self.generate_correct_block(FULLNODE0) self.log.info("syncing full nodes...") sync_blocks(self.nodes[FULLNODE0:FULLNODE1]) # connect light node to full nodes connect_nodes(self.nodes, LIGHTNODE, FULLNODE0) connect_nodes(self.nodes, LIGHTNODE, FULLNODE1) # make sure we all nodes are in sync self.log.info("syncing light node...") sync_blocks(self.nodes[:]) # retrieve contract code self.log.info("retrieving contract code...") self.check_code(contractAddr, contract_epoch) # apply filter, we expect a single log with 2 topics self.log.info("testing filter range...") self.check_filter( Filter(from_epoch="earliest", to_epoch=contract_epoch)) self.check_filter(Filter(from_epoch="earliest", to_epoch=call_epoch)) self.check_filter(Filter()) self.check_filter(Filter(from_epoch="0x0", to_epoch="0x0")) # apply filter for specific block, we expect a single log with 3 topics self.check_filter(Filter(block_hashes=[receipt["blockHash"]])) # apply filter for specific topics self.log.info("testing filter topics...") self.check_filter(Filter(topics=[CONSTRUCTED_TOPIC])) self.check_filter(Filter(topics=[CALLED_TOPIC])) self.check_filter(Filter(topics=[None, self.address_to_topic(sender)])) self.check_filter( Filter(topics=[ CALLED_TOPIC, None, [self.number_to_topic(3), self.number_to_topic(4)] ])) # apply filter with limit self.log.info("testing filter limit...") self.check_filter(Filter(limit=("0x%x" % (NUM_CALLS // 2)))) # apply filter for specific contract address self.log.info("testing address filtering...") self.check_filter(Filter(address=[contractAddr])) self.check_filter(Filter(address=[contractAddr2])) self.log.info("Pass")
def run_test(self): # initialize Conflux account self.cfxPrivkey = default_config['GENESIS_PRI_KEY'] self.cfxAccount = self.rpc.GENESIS_ADDR print(f'Using Conflux account {self.cfxAccount}') # initialize EVM account self.evmAccount = self.w3.eth.account.privateKeyToAccount( '0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef' ) print(f'Using EVM account {self.evmAccount.address}') self.cross_space_transfer(self.evmAccount.address, 1 * 10**18) assert_equal(self.nodes[0].eth_getBalance(self.evmAccount.address), hex(1 * 10**18)) # deploy Conflux space contract confluxContractAddr = self.deploy_conflux_space(CONFLUX_CONTRACT_PATH) print(f'Conflux contract: {confluxContractAddr}') # deploy EVM space contract evmContractAddr = self.deploy_evm_space(EVM_CONTRACT_PATH) print(f'EVM contract: {evmContractAddr}') # --- # .-----------------| D |.... # V --- | # --- --- --- --- # ... <-- | A | <- | B | <- | C | <- | E | <- ... # --- --- --- --- # # A --- B --- C --- D --- E # block number 0 | 1 | 2 | 3 | 4 | # epoch number 0 | 1 | 2 | 3 | cfx_next_nonce = self.rpc.get_nonce(self.cfxAccount) cfx_tx_hashes = [] evm_next_nonce = self.w3.eth.getTransactionCount( self.evmAccount.address) evm_tx_hashes = [] def emitConflux(n): nonlocal cfx_next_nonce, cfx_tx_hashes data_hex = (encode_hex_0x(keccak(b"emitConflux(uint256)"))[:10] + encode_u256(n)) tx = self.rpc.new_contract_tx(receiver=confluxContractAddr, data_hex=data_hex, nonce=cfx_next_nonce, sender=self.cfxAccount, priv_key=self.cfxPrivkey) cfx_next_nonce += 1 cfx_tx_hashes.append(tx.hash_hex()) return tx def emitBoth(n): nonlocal cfx_next_nonce, cfx_tx_hashes data_hex = encode_hex_0x( keccak(b"emitBoth(uint256,bytes20)"))[:10] + encode_u256( n) + encode_bytes20(evmContractAddr.replace('0x', '')) tx = self.rpc.new_contract_tx(receiver=confluxContractAddr, data_hex=data_hex, nonce=cfx_next_nonce, sender=self.cfxAccount, priv_key=self.cfxPrivkey) cfx_next_nonce += 1 cfx_tx_hashes.append(tx.hash_hex()) return tx def emitEVM(n): nonlocal evm_next_nonce, evm_tx_hashes data_hex = (encode_hex_0x(keccak(b"emitEVM(uint256)"))[:10] + encode_u256(n)) tx, hash = self.construct_evm_tx(receiver=evmContractAddr, data_hex=data_hex, nonce=evm_next_nonce) evm_next_nonce += 1 evm_tx_hashes.append(hash) return tx # generate ledger block_0 = self.rpc.block_by_epoch("latest_mined")['hash'] block_a = self.rpc.generate_custom_block(parent_hash=block_0, referee=[], txs=[ emitConflux(11), emitBoth(12), emitEVM(13), ]) block_b = self.rpc.generate_custom_block(parent_hash=block_a, referee=[], txs=[ emitConflux(14), emitBoth(15), emitEVM(16), ]) block_c = self.rpc.generate_custom_block(parent_hash=block_b, referee=[], txs=[]) block_d = self.rpc.generate_custom_block(parent_hash=block_a, referee=[], txs=[ emitConflux(21), emitBoth(22), emitEVM(23), ]) block_e = self.rpc.generate_custom_block(parent_hash=block_c, referee=[block_d], txs=[ emitConflux(24), emitBoth(25), emitEVM(26), ]) epoch_a = self.rpc.block_by_hash(block_a)['epochNumber'] epoch_b = self.rpc.block_by_hash(block_b)['epochNumber'] epoch_e = self.rpc.block_by_hash(block_e)['epochNumber'] # make sure transactions have been executed parent_hash = block_e for _ in range(5): block = self.rpc.generate_custom_block(parent_hash=parent_hash, referee=[], txs=[]) parent_hash = block for h in cfx_tx_hashes: receipt = self.rpc.get_transaction_receipt(h) assert_equal(receipt["outcomeStatus"], "0x0") for h in evm_tx_hashes: receipt = self.w3.eth.waitForTransactionReceipt(h) assert_equal(receipt["status"], 1) # check Conflux events filter = Filter(topics=[TEST_EVENT_TOPIC], from_epoch=epoch_a, to_epoch=epoch_e) logs = self.rpc.get_logs(filter) assert_equal(len(logs), 8) # --------------- 1 block per epoch --------------- # check EVM events # we expect 4 events: #12, #13, #15, #16 filter = { "topics": [TEST_EVENT_TOPIC], "fromBlock": epoch_a, "toBlock": epoch_b } logs = self.nodes[0].eth_getLogs(filter) assert_equal(len(logs), 4) # emitBoth: TestEvent(12) assert_equal(logs[0]["data"], number_to_topic(12)) assert_equal(logs[0]["address"], evmContractAddr.lower()) assert_equal(logs[0]["blockHash"], block_a) assert_equal(logs[0]["blockNumber"], epoch_a) assert_equal(logs[0]["transactionHash"], cfx_tx_hashes[1]) # TODO: should use phantom tx here # assert_equal(logs[0]["logIndex"], '0x0') # assert_equal(logs[0]["transactionIndex"], '0x0') # assert_equal(logs[0]["transactionLogIndex"], '0x0') assert_equal(logs[0]["removed"], False) # emitEVM: TestEvent(13) assert_equal(logs[1]["data"], number_to_topic(13)) assert_equal(logs[1]["address"], evmContractAddr.lower()) assert_equal(logs[1]["blockHash"], block_a) assert_equal(logs[1]["blockNumber"], epoch_a) assert_equal(logs[1]["transactionHash"], evm_tx_hashes[0].hex()) # assert_equal(logs[1]["logIndex"], '0x1') # assert_equal(logs[1]["transactionIndex"], '0x1') assert_equal(logs[1]["transactionLogIndex"], '0x0') assert_equal(logs[1]["removed"], False) # emitBoth: TestEvent(15) assert_equal(logs[2]["data"], number_to_topic(15)) assert_equal(logs[2]["address"], evmContractAddr.lower()) assert_equal(logs[2]["blockHash"], block_b) assert_equal(logs[2]["blockNumber"], epoch_b) assert_equal(logs[2]["transactionHash"], cfx_tx_hashes[3]) # TODO: should use phantom tx here # assert_equal(logs[2]["logIndex"], '0x0') # assert_equal(logs[2]["transactionIndex"], '0x0') # assert_equal(logs[2]["transactionLogIndex"], '0x0') assert_equal(logs[2]["removed"], False) # emitEVM: TestEvent(16) assert_equal(logs[3]["data"], number_to_topic(16)) assert_equal(logs[3]["address"], evmContractAddr.lower()) assert_equal(logs[3]["blockHash"], block_b) assert_equal(logs[3]["blockNumber"], epoch_b) assert_equal(logs[3]["transactionHash"], evm_tx_hashes[1].hex()) # assert_equal(logs[3]["logIndex"], '0x1') # assert_equal(logs[3]["transactionIndex"], '0x1') assert_equal(logs[3]["transactionLogIndex"], '0x0') assert_equal(logs[3]["removed"], False) # --------------- 2 blocks per epoch --------------- # check EVM events # we expect 4 events: #22, #23, #25, #26 filter = { "topics": [TEST_EVENT_TOPIC], "fromBlock": epoch_e, "toBlock": epoch_e } logs = self.nodes[0].eth_getLogs(filter) assert_equal(len(logs), 4) # emitBoth: TestEvent(22) assert_equal(logs[0]["data"], number_to_topic(22)) assert_equal(logs[0]["address"], evmContractAddr.lower()) assert_equal(logs[0]["blockHash"], block_e) assert_equal(logs[0]["blockNumber"], epoch_e) assert_equal(logs[0]["transactionHash"], cfx_tx_hashes[5]) # TODO: should use phantom tx here # assert_equal(logs[0]["logIndex"], '0x0') # assert_equal(logs[0]["transactionIndex"], '0x0') # assert_equal(logs[0]["transactionLogIndex"], '0x0') assert_equal(logs[0]["removed"], False) # emitEVM: TestEvent(23) assert_equal(logs[1]["data"], number_to_topic(23)) assert_equal(logs[1]["address"], evmContractAddr.lower()) assert_equal(logs[1]["blockHash"], block_e) assert_equal(logs[1]["blockNumber"], epoch_e) assert_equal(logs[1]["transactionHash"], evm_tx_hashes[2].hex()) # assert_equal(logs[1]["logIndex"], '0x1') # assert_equal(logs[1]["transactionIndex"], '0x1') assert_equal(logs[1]["transactionLogIndex"], '0x0') assert_equal(logs[1]["removed"], False) # emitBoth: TestEvent(25) assert_equal(logs[2]["data"], number_to_topic(25)) assert_equal(logs[2]["address"], evmContractAddr.lower()) assert_equal(logs[2]["blockHash"], block_e) assert_equal(logs[2]["blockNumber"], epoch_e) assert_equal(logs[2]["transactionHash"], cfx_tx_hashes[7]) # TODO: should use phantom tx here # assert_equal(logs[2]["logIndex"], '0x2') # assert_equal(logs[2]["transactionIndex"], '0x2') # assert_equal(logs[2]["transactionLogIndex"], '0x0') assert_equal(logs[2]["removed"], False) # emitEVM: TestEvent(26) assert_equal(logs[3]["data"], number_to_topic(26)) assert_equal(logs[3]["address"], evmContractAddr.lower()) assert_equal(logs[3]["blockHash"], block_e) assert_equal(logs[3]["blockNumber"], epoch_e) assert_equal(logs[3]["transactionHash"], evm_tx_hashes[3].hex()) # assert_equal(logs[3]["logIndex"], '0x3') # assert_equal(logs[3]["transactionIndex"], '0x3') assert_equal(logs[3]["transactionLogIndex"], '0x0') assert_equal(logs[3]["removed"], False) # --------------- other fields --------------- # filter by block hash filter = {"topics": [TEST_EVENT_TOPIC], "blockHash": block_c} logs_2 = self.nodes[0].eth_getLogs(filter) assert_equal(logs_2, []) filter = { "topics": [TEST_EVENT_TOPIC], "blockHash": block_d } # from EVM perspective, D does not exist assert_raises_rpc_error(None, None, self.nodes[0].eth_getLogs, filter) filter = {"topics": [TEST_EVENT_TOPIC], "blockHash": block_e} logs_2 = self.nodes[0].eth_getLogs(filter) assert_equal(logs_2, logs) # filter limit filter = { "topics": [TEST_EVENT_TOPIC], "blockHash": block_e, "limit": 1 } logs_2 = self.nodes[0].eth_getLogs(filter) assert_equal(logs_2, [logs[-1]]) # "earliest", "latest" filter = { "topics": [TEST_EVENT_TOPIC], "fromBlock": "earliest", "toBlock": "latest" } logs_2 = self.nodes[0].eth_getLogs(filter) assert_equal(len(logs_2), 8) filter = { "topics": [TEST_EVENT_TOPIC], "fromBlock": "earliest", "toBlock": "latest", "limit": 4 } logs_2 = self.nodes[0].eth_getLogs(filter) assert_equal(logs_2, logs) # address filter = {"address": confluxContractAddr} logs_2 = self.nodes[0].eth_getLogs(filter) assert_equal(logs_2, []) filter = {"address": evmContractAddr} logs_2 = self.nodes[0].eth_getLogs(filter) assert_equal(len(logs_2), 8) self.log.info("Pass")
async def run_async(self): priv_key = default_config["GENESIS_PRI_KEY"] sender = eth_utils.encode_hex(priv_to_addr(priv_key)) # deploy two instances of the contract bytecode_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), CONTRACT_PATH) assert (os.path.isfile(bytecode_file)) bytecode = open(bytecode_file).read() _, contract1 = self.deploy_contract(sender, priv_key, bytecode) _, contract2 = self.deploy_contract(sender, priv_key, bytecode) # subscribe sub_all = await self.pubsub[FULLNODE1].subscribe("logs") sub_one = await self.pubsub[FULLNODE1].subscribe( "logs", Filter(address=[contract2]).__dict__) # call contracts and collect receipts receipts = [] for _ in range(NUM_CALLS): r = self.call_contract(sender, priv_key, contract1, FOO_TOPIC) assert (r != None) receipts.append(r) r = self.call_contract(sender, priv_key, contract2, FOO_TOPIC) receipts.append(r) assert (r != None) # make sure the pub-sub layer processes the logs self.rpc[FULLNODE0].generate_blocks(20) sync_blocks(self.nodes) # collect pub-sub notifications logs1 = [l async for l in sub_all.iter()] logs2 = [l async for l in sub_one.iter()] assert_equal(len(logs1), 2 * NUM_CALLS) assert_equal(len(logs2), NUM_CALLS) self.log.info(f"Pass -- retrieved logs with no fork") # create alternative fork old_tip = self.rpc[FULLNODE0].best_block_hash() old_tip_epoch = self.rpc[FULLNODE0].epoch_number() fork_hash = receipts[len(receipts) // 2]["blockHash"] fork_epoch = int(receipts[len(receipts) // 2]["epochNumber"], 16) self.log.info(f"Creating fork at {fork_hash[:20]}... (#{fork_epoch})") new_tip = self.generate_chain(fork_hash, 2 * (old_tip_epoch - fork_epoch))[-1] new_tip = self.rpc[FULLNODE0].generate_block_with_parent( new_tip, referee=[old_tip]) new_tip = self.generate_chain(new_tip, 20)[-1] new_tip_epoch = self.rpc[FULLNODE0].epoch_number() sync_blocks(self.nodes) self.log.info( f"Tip: {old_tip[:20]}... (#{old_tip_epoch}) --> {new_tip[:20]}... (#{new_tip_epoch})" ) # block order changed, some transactions need to be re-executed num_to_reexecute = sum(1 for r in receipts if int(r["epochNumber"], 16) > fork_epoch) msg = await sub_all.next(timeout=5) assert (msg["revertTo"] != None) assert_equal(int(msg["revertTo"], 16), fork_epoch) logs = [l async for l in sub_all.iter()] assert_equal(len(logs), num_to_reexecute) self.log.info(f"Pass -- retrieved re-executed logs after fork") # create one transaction that is mined but not executed yet sync_blocks(self.nodes) tx = self.rpc[FULLNODE0].new_contract_tx(receiver=contract1, data_hex=FOO_TOPIC, sender=sender, priv_key=priv_key, storage_limit=20000) assert_equal(self.rpc[FULLNODE0].send_tx(tx, wait_for_receipt=False), tx.hash_hex()) self.rpc[FULLNODE0].generate_block(num_txs=1) receipt = self.rpc[FULLNODE0].get_transaction_receipt(tx.hash_hex()) assert_equal(receipt, None) time.sleep(1) # mine more blocks, the transaction is now executed self.rpc[FULLNODE0].generate_blocks(4) receipt = self.rpc[FULLNODE0].get_transaction_receipt(tx.hash_hex()) assert_ne(receipt, None) # make sure the pub-sub layer processes the logs self.rpc[FULLNODE0].generate_blocks(20) sync_blocks(self.nodes) # this would timeout before #1989 was fixed await sub_all.next() self.log.info(f"Pass -- test #1989 fix")
def test_invalid_filter(self): self.generate_blocks_to_state() # invalid epoch type filter = Filter(from_epoch=0) assert_raises_rpc_error(None, None, self.get_logs, filter) filter = Filter( from_epoch="latest") # should be `latest_state` or `latest_mined` assert_raises_rpc_error(None, None, self.get_logs, filter) # inconsistent epoch numbers filter = Filter(from_epoch="0x02", to_epoch="0x01") assert_raises_rpc_error(None, None, self.get_logs, filter) filter = Filter(from_epoch="latest_state", to_epoch="earliest") assert_raises_rpc_error(None, None, self.get_logs, filter) # invalid epoch hex filter = Filter(from_epoch="0xQQQQ") assert_raises_rpc_error(None, None, self.get_logs, filter) # invalid `blockHashes` type filter = Filter(block_hashes="") assert_raises_rpc_error(None, None, self.get_logs, filter) filter = Filter(block_hashes=["0x0"]) assert_raises_rpc_error(None, None, self.get_logs, filter) # invalid `address` type filter = Filter(address="") assert_raises_rpc_error(None, None, self.get_logs, filter) filter = Filter(address=["0x0"]) assert_raises_rpc_error(None, None, self.get_logs, filter) # invalid `topics` type filter = Filter(topics="") assert_raises_rpc_error(None, None, self.get_logs, filter) filter = Filter(topics=["0x0"]) assert_raises_rpc_error(None, None, self.get_logs, filter) # invalid `limit` type filter = Filter(limit=1) assert_raises_rpc_error(None, None, self.get_logs, filter)
def run_test(self): # Pos contract enabled, stake and register in the first hard-fork phase. client = RpcClient(self.nodes[self.num_nodes - 1]) client.generate_empty_blocks(300) sync_blocks(self.nodes) for node in self.nodes[:-1]: client = RpcClient(node) pos_identifier, _ = client.wait_for_pos_register() sync_blocks(self.nodes) client = RpcClient(self.nodes[self.num_nodes - 1]) # generate blocks until we are after pos initialization and before pos start. best_epoch = client.epoch_number() client.generate_empty_blocks(600 - best_epoch) sync_blocks(self.nodes) voting_power_map = {} pub_keys_map = {} logs = client.get_logs(filter=Filter(from_epoch="earliest", to_epoch="latest_state", address=["0x0888000000000000000000000000000000000005"])) for log in logs: pos_identifier = log["topics"][1] if log["topics"][0] == REGISTER_TOPIC: bls_pub_key, vrf_pub_key = eth_abi.decode_abi(["bytes", "bytes"], decode_hex(log["data"])) pub_keys_map[pos_identifier] = (encode_hex_0x(bls_pub_key), encode_hex_0x(vrf_pub_key)) elif log["topics"][0] == INCREASE_STAKE_TOPIC: assert pos_identifier in pub_keys_map voting_power_map[pos_identifier] = parse_as_int(log["data"]) with open(os.path.join(self.options.tmpdir, "public_keys"), "w") as f: for pos_identifier in pub_keys_map.keys(): f.write(",".join([pub_keys_map[pos_identifier][0][2:], pub_keys_map[pos_identifier][1][2:], str(voting_power_map[pos_identifier])]) + "\n") initialize_tg_config(self.options.tmpdir, len(self.nodes), len(self.nodes), DEFAULT_PY_TEST_CHAIN_ID, pkfile="public_keys") # generate blocks until pos start self.nodes[0].generate_empty_blocks(500) sync_blocks(self.nodes) pos_identifier, _ = client.wait_for_pos_register() client.generate_empty_blocks(400) sync_blocks(self.nodes) time.sleep(2) latest_pos_ref = self.latest_pos_ref() for i in range(55): print(i) if i == 10: self.stop_node(5, clean=True) self.start_node(5, phase_to_wait=None) self.nodes[5].wait_for_recovery(["NormalSyncPhase"], 30) if i == 12: self.maybe_restart_node(5, 1, 0) if i == 15: assert_equal(int(client.pos_get_account(pos_identifier)["status"]["availableVotes"], 0), 2000) client.pos_retire_self() if i == 30: self.maybe_restart_node(5, 1, 1) # Retire node 3 after 5 min. # Generate enough PoW block for PoS to progress self.nodes[0].generate_empty_blocks(60) # Leave some time for PoS to reach consensus time.sleep(3) self.nodes[0].generate_empty_blocks(1) new_pos_ref = self.latest_pos_ref() if i >= 10: assert_ne(latest_pos_ref, new_pos_ref) client.wait_for_unstake(client.node.pow_sk) assert client.get_balance(eth_utils.encode_hex(priv_to_addr(client.node.pow_sk))) > 10000 * 10**18 assert_equal(int(client.pos_get_account(pos_identifier)["status"]["availableVotes"], 0), 0)
def run_test(self): priv_key = default_config["GENESIS_PRI_KEY"] sender = eth_utils.encode_hex(priv_to_addr(priv_key)) # deploy storage test contract bytecode_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), CONTRACT_PATH) assert (os.path.isfile(bytecode_file)) bytecode = open(bytecode_file).read() tx = self.rpc.new_contract_tx(receiver="", data_hex=bytecode, sender=sender, priv_key=priv_key, storage_limit=20000) assert_equal(self.rpc.send_tx(tx, True), tx.hash_hex()) receipt = self.rpc.get_transaction_receipt(tx.hash_hex()) contractAddr = receipt["contractCreated"] assert_is_hex_string(contractAddr) # --- # .-----------------| D |.... # V --- | # --- --- --- --- # ... <-- | A | <- | B | <- | C | <- | E | <- ... # --- --- --- --- # # A --- B --- C --- D --- E # block number 0 | 1 | 2 | 3 | 4 | # epoch number 0 | 1 | 2 | 3 | start_nonce = self.rpc.get_nonce(self.rpc.GENESIS_ADDR) txs = [ self.rpc.new_contract_tx(receiver=contractAddr, data_hex=encode_hex_0x(keccak(b"foo()")), nonce=start_nonce + 0, sender=sender, priv_key=priv_key, storage_limit=64), self.rpc.new_contract_tx(receiver=contractAddr, data_hex=encode_hex_0x(keccak(b"foo()")), nonce=start_nonce + 1, sender=sender, priv_key=priv_key), self.rpc.new_contract_tx(receiver=contractAddr, data_hex=encode_hex_0x(keccak(b"foo()")), nonce=start_nonce + 2, sender=sender, priv_key=priv_key), self.rpc.new_contract_tx(receiver=contractAddr, data_hex=encode_hex_0x(keccak(b"foo()")), nonce=start_nonce + 3, sender=sender, priv_key=priv_key), ] block_0 = self.rpc.block_by_epoch("latest_mined")['hash'] epoch_0 = int(self.rpc.block_by_hash(block_0)['epochNumber'], 0) block_number_0 = int(self.rpc.block_by_hash(block_0)['blockNumber'], 0) block_a = self.rpc.generate_custom_block(parent_hash=block_0, referee=[], txs=[]) block_b = self.rpc.generate_custom_block(parent_hash=block_a, referee=[], txs=[]) block_c = self.rpc.generate_custom_block(parent_hash=block_b, referee=[], txs=[]) block_d = self.rpc.generate_custom_block(parent_hash=block_a, referee=[], txs=txs[0:2]) block_e = self.rpc.generate_custom_block(parent_hash=block_c, referee=[block_d], txs=txs[2:4]) # make sure transactions have been executed parent_hash = block_e for _ in range(5): block = self.rpc.generate_custom_block(parent_hash=parent_hash, referee=[], txs=[]) parent_hash = block # check logs block_number_a = int(self.rpc.block_by_hash(block_a)['blockNumber'], 0) block_number_d = int(self.rpc.block_by_hash(block_d)['blockNumber'], 0) filter = Filter(from_block=hex(block_number_a), to_block=hex(block_number_d), offset=hex(0), limit=hex(1)) logs = self.rpc.get_logs(filter) assert_equal(len(logs), 1) assert_equal(logs[0]["topics"][2], number_to_topic(2)) filter = Filter(from_block=hex(block_number_a), to_block=hex(block_number_d), offset=hex(1), limit=hex(1)) logs = self.rpc.get_logs(filter) assert_equal(len(logs), 1) assert_equal(logs[0]["topics"][2], number_to_topic(1)) filter = Filter(from_block=hex(block_number_a), to_block=hex(block_number_d), offset=hex(0), limit=hex(2)) logs = self.rpc.get_logs(filter) assert_equal(len(logs), 2) assert_equal(logs[0]["topics"][2], number_to_topic(1)) assert_equal(logs[1]["topics"][2], number_to_topic(2)) self.log.info("Pass")
def run_test(self): time.sleep(7) priv_key = default_config["GENESIS_PRI_KEY"] sender = eth_utils.encode_hex(privtoaddr(priv_key)) self.rpc = RpcClient(self.nodes[0]) # lock tokens in bank solc = Solc() file_dir = os.path.dirname(os.path.realpath(__file__)) staking_contract = solc.get_contract_instance( abi_file=os.path.join( file_dir, "contracts/storage_interest_staking_abi.json"), bytecode_file=os.path.join( file_dir, "contracts/storage_interest_staking_bytecode.dat"), ) gas_price = 1 gas = 50000000 self.tx_conf = { "gas": int_to_hex(gas), "gasPrice": int_to_hex(gas_price), "chainId": 0 } staking_contract_addr = Web3.toChecksumAddress( "443c409373ffd5c0bec1dddb7bec830856757b65") self.tx_conf["to"] = staking_contract_addr tx_data = eth_utils.decode_hex( staking_contract.functions.deposit( 10000 * 10**18).buildTransaction(self.tx_conf)["data"]) genesis_key = default_config["GENESIS_PRI_KEY"] genesis_addr = privtoaddr(genesis_key) tx = self.rpc.new_tx(value=0, receiver=staking_contract_addr, nonce=0, data=tx_data, gas=gas, gas_price=gas_price) self.rpc.send_tx(tx, True) # apply filter, we expect no logs filter = Filter(from_epoch="earliest", to_epoch="latest_mined") result = self.rpc.get_logs(filter) assert_equal(result, []) # deploy contract bytecode_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), CONTRACT_PATH) assert (os.path.isfile(bytecode_file)) bytecode = open(bytecode_file).read() _, contractAddr = self.deploy_contract(sender, priv_key, bytecode) # apply filter, we expect a single log with 2 topics filter = Filter(from_epoch="earliest", to_epoch="latest_mined") logs0 = self.rpc.get_logs(filter) self.assert_response_format_correct(logs0) assert_equal(len(logs0), 1) assert_equal(len(logs0[0]["topics"]), 2) assert_equal(logs0[0]["topics"][0], CONSTRUCTED_TOPIC) assert_equal(logs0[0]["topics"][1], self.address_to_topic(sender)) assert_equal(logs0[0]["data"], self.address_to_topic(sender)) # call method receipt = self.call_contract(sender, priv_key, contractAddr, encode_hex_0x(keccak(b"foo()"))) # apply filter, we expect two logs with 2 and 3 topics respectively filter = Filter(from_epoch="earliest", to_epoch="latest_mined") logs1 = self.rpc.get_logs(filter) self.assert_response_format_correct(logs1) assert_equal(len(logs1), 2) assert_equal(logs1[0], logs0[0]) assert_equal(len(logs1[1]["topics"]), 3) assert_equal(logs1[1]["topics"][0], CALLED_TOPIC) assert_equal(logs1[1]["topics"][1], self.address_to_topic(sender)) assert_equal(logs1[1]["topics"][2], self.number_to_topic(1)) # apply filter for specific block, we expect a single log with 3 topics filter = Filter(block_hashes=[receipt["blockHash"]]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), 1) assert_equal(logs[0], logs1[1]) # call many times for ii in range(0, NUM_CALLS - 2): self.call_contract(sender, priv_key, contractAddr, encode_hex_0x(keccak(b"foo()"))) # apply filter, we expect NUM_CALLS log entries with inreasing uint32 fields filter = Filter(from_epoch="earliest", to_epoch="latest_mined") logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), NUM_CALLS) for ii in range(2, NUM_CALLS): assert_equal(len(logs[ii]["topics"]), 3) assert_equal(logs[ii]["topics"][0], CALLED_TOPIC) assert (logs[ii]["topics"][1] == self.address_to_topic(sender)) assert_equal(logs[ii]["topics"][2], self.number_to_topic(ii)) # apply filter for specific topics filter = Filter(topics=[CONSTRUCTED_TOPIC]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), 1) filter = Filter(topics=[CALLED_TOPIC]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), NUM_CALLS - 1) filter = Filter(topics=[None, self.address_to_topic(sender)]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), NUM_CALLS) # find logs with `CALLED_TOPIC` as 1st topic and `3` or `4` as 3rd topic filter = Filter(topics=[ CALLED_TOPIC, None, [self.number_to_topic(3), self.number_to_topic(4)] ]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), 2) # apply filter with limit filter = Filter(limit=("0x%x" % (NUM_CALLS // 2))) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), NUM_CALLS // 2) # apply filter for specific contract address _, contractAddr2 = self.deploy_contract(sender, priv_key, bytecode) filter = Filter(address=[contractAddr]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), NUM_CALLS) filter = Filter(address=[contractAddr2]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), 1) # apply filter to very first epoch, we expect no logs filter = Filter(from_epoch="0x0", to_epoch="0x0") result = self.rpc.get_logs(filter) assert_equal(result, []) self.log.info("Pass")
def run_test(self): priv_key = default_config["GENESIS_PRI_KEY"] sender = eth_utils.encode_hex(priv_to_addr(priv_key)) self.rpc = RpcClient(self.nodes[0]) # apply filter, we expect no logs filter = Filter() result = self.rpc.get_logs(filter) assert_equal(result, []) # deploy contract bytecode_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), CONTRACT_PATH) assert (os.path.isfile(bytecode_file)) bytecode = open(bytecode_file).read() _, contractAddr = self.deploy_contract(sender, priv_key, bytecode) # apply filter, we expect a single log with 2 topics filter = Filter(from_epoch="earliest", to_epoch="latest_state") logs0 = self.rpc.get_logs(filter) self.assert_response_format_correct(logs0) assert_equal(len(logs0), 1) assert_equal(len(logs0[0]["topics"]), 2) assert_equal(logs0[0]["topics"][0], CONSTRUCTED_TOPIC) assert_equal(logs0[0]["topics"][1], self.address_to_topic(sender)) assert_equal(logs0[0]["data"], self.address_to_topic(sender)) # call method receipt = self.call_contract(sender, priv_key, contractAddr, encode_hex_0x(keccak(b"foo()")), storage_limit=64) # apply filter, we expect two logs with 2 and 3 topics respectively filter = Filter(from_epoch="earliest", to_epoch="latest_state") logs1 = self.rpc.get_logs(filter) self.assert_response_format_correct(logs1) assert_equal(len(logs1), 2) assert_equal(logs1[0], logs0[0]) assert_equal(len(logs1[1]["topics"]), 3) assert_equal(logs1[1]["topics"][0], CALLED_TOPIC) assert_equal(logs1[1]["topics"][1], self.address_to_topic(sender)) assert_equal(logs1[1]["topics"][2], self.number_to_topic(1)) # apply filter for specific block, we expect a single log with 3 topics filter = Filter(block_hashes=[receipt["blockHash"]]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), 1) assert_equal(logs[0], logs1[1]) # call many times for ii in range(2, NUM_CALLS): self.call_contract(sender, priv_key, contractAddr, encode_hex_0x(keccak(b"foo()")), storage_limit=0) # apply filter, we expect NUM_CALLS log entries with increasing uint32 fields filter = Filter(from_epoch="earliest", to_epoch="latest_state") logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), NUM_CALLS) for ii in range(2, NUM_CALLS): assert_equal(len(logs[ii]["topics"]), 3) assert_equal(logs[ii]["topics"][0], CALLED_TOPIC) assert (logs[ii]["topics"][1] == self.address_to_topic(sender)) assert_equal(logs[ii]["topics"][2], self.number_to_topic(ii)) # apply filter for specific topics filter = Filter(topics=[CONSTRUCTED_TOPIC]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), 1) filter = Filter(topics=[CALLED_TOPIC]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), NUM_CALLS - 1) filter = Filter(topics=[None, self.address_to_topic(sender)]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), NUM_CALLS) # find logs with `CALLED_TOPIC` as 1st topic and `3` or `4` as 3rd topic filter = Filter(topics=[ CALLED_TOPIC, None, [self.number_to_topic(3), self.number_to_topic(4)] ]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), 2) # apply filter with limit filter = Filter(limit=hex(NUM_CALLS // 2)) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), NUM_CALLS // 2) # apply filter for specific contract address _, contractAddr2 = self.deploy_contract(sender, priv_key, bytecode) filter = Filter(address=[contractAddr]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), NUM_CALLS) filter = Filter(address=[contractAddr2]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), 1) # apply filter to very first epoch, we expect no logs filter = Filter(from_epoch="earliest", to_epoch="earliest") result = self.rpc.get_logs(filter) assert_equal(result, []) # generate two blocks with `NUM_CALLS` valid logs in each parent_hash = self.rpc.block_by_epoch("latest_mined")['hash'] start_nonce = self.rpc.get_nonce(sender) txs = [ self.rpc.new_contract_tx(receiver=contractAddr, data_hex=encode_hex_0x(keccak(b"foo()")), sender=sender, priv_key=priv_key, storage_limit=64, nonce=start_nonce + ii) for ii in range(0, NUM_CALLS) ] block_hash_1 = self.rpc.generate_custom_block(parent_hash=parent_hash, referee=[], txs=txs) txs = [ self.rpc.new_contract_tx(receiver=contractAddr, data_hex=encode_hex_0x(keccak(b"foo()")), sender=sender, priv_key=priv_key, storage_limit=64, nonce=start_nonce + NUM_CALLS + ii) for ii in range(0, NUM_CALLS) ] block_hash_2 = self.rpc.generate_custom_block(parent_hash=block_hash_1, referee=[], txs=txs) # blocks not executed yet, filtering should fail filter = Filter(block_hashes=[block_hash_1, block_hash_2]) assert_raises_rpc_error(None, None, self.rpc.get_logs, filter) # generate some more blocks to ensure our two blocks are executed self.rpc.generate_blocks(10) # filtering for these two blocks should return logs in correct order filter = Filter(block_hashes=[block_hash_1, block_hash_2]) logs = self.rpc.get_logs(filter) assert_equal(len(logs), 2 * NUM_CALLS) for ii in range(0, 2 * NUM_CALLS): assert_equal(len(logs[ii]["topics"]), 3) assert_equal(logs[ii]["topics"][0], CALLED_TOPIC) assert (logs[ii]["topics"][1] == self.address_to_topic(sender)) assert_equal(logs[ii]["topics"][2], self.number_to_topic(ii + NUM_CALLS)) # block hash order should not affect log order filter = Filter(block_hashes=[block_hash_2, block_hash_1]) logs2 = self.rpc.get_logs(filter) assert_equal(logs, logs2) # given a limit, we should receive the _last_ few logs filter = Filter(block_hashes=[block_hash_1, block_hash_2], limit=hex(NUM_CALLS + NUM_CALLS // 2)) logs = self.rpc.get_logs(filter) assert_equal(len(logs), NUM_CALLS + NUM_CALLS // 2) for ii in range(0, NUM_CALLS + NUM_CALLS // 2): assert_equal(len(logs[ii]["topics"]), 3) assert_equal(logs[ii]["topics"][0], CALLED_TOPIC) assert (logs[ii]["topics"][1] == self.address_to_topic(sender)) assert_equal(logs[ii]["topics"][2], self.number_to_topic(ii + NUM_CALLS + NUM_CALLS // 2)) self.log.info("Pass")
def run_test(self): time.sleep(7) priv_key = default_config["GENESIS_PRI_KEY"] sender = eth_utils.encode_hex(privtoaddr(priv_key)) self.rpc = RpcClient(self.nodes[0]) # apply filter, we expect no logs filter = Filter(from_epoch="earliest", to_epoch="latest_mined") result = self.rpc.get_logs(filter) assert_equal(result, []) # deploy contract bytecode_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), CONTRACT_PATH) assert (os.path.isfile(bytecode_file)) bytecode = open(bytecode_file).read() _, contractAddr = self.deploy_contract(sender, priv_key, bytecode) # apply filter, we expect a single log with 2 topics filter = Filter(from_epoch="earliest", to_epoch="latest_mined") logs0 = self.rpc.get_logs(filter) self.assert_response_format_correct(logs0) assert_equal(len(logs0), 1) assert_equal(len(logs0[0]["topics"]), 2) assert_equal(logs0[0]["topics"][0], CONSTRUCTED_TOPIC) assert_equal(logs0[0]["topics"][1], self.address_to_topic(sender)) # call method receipt = self.call_contract(sender, priv_key, contractAddr, encode_hex_0x(keccak(b"foo()"))) # apply filter, we expect two logs with 2 and 3 topics respectively filter = Filter(from_epoch="earliest", to_epoch="latest_mined") logs1 = self.rpc.get_logs(filter) self.assert_response_format_correct(logs1) assert_equal(len(logs1), 2) assert_equal(logs1[0], logs0[0]) assert_equal(len(logs1[1]["topics"]), 3) assert_equal(logs1[1]["topics"][0], CALLED_TOPIC) assert_equal(logs1[1]["topics"][1], self.address_to_topic(sender)) assert_equal(logs1[1]["topics"][2], self.number_to_topic(1)) # apply filter for specific block, we expect a single log with 3 topics filter = Filter(block_hashes=[receipt["blockHash"]]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), 1) assert_equal(logs[0], logs1[1]) # call many times for ii in range(0, NUM_CALLS - 2): self.call_contract(sender, priv_key, contractAddr, encode_hex_0x(keccak(b"foo()"))) # apply filter, we expect NUM_CALLS log entries with inreasing uint32 fields filter = Filter(from_epoch="earliest", to_epoch="latest_mined") logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), NUM_CALLS) for ii in range(2, NUM_CALLS): assert_equal(len(logs[ii]["topics"]), 3) assert_equal(logs[ii]["topics"][0], CALLED_TOPIC) assert (logs[ii]["topics"][1] == self.address_to_topic(sender)) assert_equal(logs[ii]["topics"][2], self.number_to_topic(ii)) # apply filter for specific topics filter = Filter(topics=[CONSTRUCTED_TOPIC]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), 1) filter = Filter(topics=[CALLED_TOPIC]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), NUM_CALLS - 1) filter = Filter(topics=[None, self.address_to_topic(sender)]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), NUM_CALLS) # find logs with `CALLED_TOPIC` as 1st topic and `3` or `4` as 3rd topic filter = Filter(topics=[ CALLED_TOPIC, None, [self.number_to_topic(3), self.number_to_topic(4)] ]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), 2) # apply filter with limit filter = Filter(limit=("0x%x" % (NUM_CALLS // 2))) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), NUM_CALLS // 2) # apply filter for specific contract address _, contractAddr2 = self.deploy_contract(sender, priv_key, bytecode) filter = Filter(address=[contractAddr]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), NUM_CALLS) filter = Filter(address=[contractAddr2]) logs = self.rpc.get_logs(filter) self.assert_response_format_correct(logs) assert_equal(len(logs), 1) # apply filter to very first epoch, we expect no logs filter = Filter(from_epoch="0x0", to_epoch="0x0") result = self.rpc.get_logs(filter) assert_equal(result, []) self.log.info("Pass")
def test_invalid_filter(self): self.blocks = self.generate_blocks_to_state() # invalid epoch type filter = Filter(from_epoch=0) assert_raises_rpc_error(None, None, self.get_logs, filter) filter = Filter( from_epoch="latest") # should be `latest_state` or `latest_mined` assert_raises_rpc_error(None, None, self.get_logs, filter) # inconsistent epoch numbers filter = Filter(from_epoch="0x02", to_epoch="0x01") assert_raises_rpc_error(None, None, self.get_logs, filter) filter = Filter(from_epoch="latest_state", to_epoch="earliest") assert_raises_rpc_error(None, None, self.get_logs, filter) # invalid epoch hex filter = Filter(from_epoch="0xQQQQ") assert_raises_rpc_error(None, None, self.get_logs, filter) # invalid `block_hashes` type filter = Filter(block_hashes="") assert_raises_rpc_error(None, None, self.get_logs, filter) filter = Filter(block_hashes=["0x0"]) assert_raises_rpc_error(None, None, self.get_logs, filter) # invalid `block_hashes` length filter = Filter(block_hashes=[NULL_H256] * 129) assert_raises_rpc_error(None, None, self.get_logs, filter) # invalid `address` type filter = Filter(address="", encode_address=False) assert_raises_rpc_error(None, None, self.get_logs, filter) filter = Filter(address=["0x0"], encode_address=False) assert_raises_rpc_error(None, None, self.get_logs, filter) # invalid `topics` type filter = Filter(topics="", encode_address=False) assert_raises_rpc_error(None, None, self.get_logs, filter) filter = Filter(topics=["0x0"], encode_address=False) assert_raises_rpc_error(None, None, self.get_logs, filter) # invalid topics length filter = Filter(topics=[NULL_H256] * 5) assert_raises_rpc_error(None, None, self.get_logs, filter) # invalid `limit` type filter = Filter(limit=1) assert_raises_rpc_error(None, None, self.get_logs, filter) # invalid filter fields filter = Filter() filter.fromBlock = "0x0" assert_raises_rpc_error(None, None, self.get_logs, filter)
bitcoin_block_hash = "00000000000000000005e306896781cf5169a8bdff8aed8dce19c084adf4cc0d" start_block_number = 68845000 end_block_number = 69245000 REGISTER_TOPIC = encode_hex_0x(keccak(b"Register(bytes32,bytes,bytes)")) INCREASE_STAKE_TOPIC = encode_hex_0x(keccak(b"IncreaseStake(bytes32,uint64)")) client = RpcClient(node=get_simple_rpc_proxy(rpc_url, timeout=10)) cwd = "./run/pos_config" voting_power_map = collections.defaultdict(lambda: 0) pub_keys_map = {} for i in range(start_block_number, end_block_number + 1, 1000): start = i end = min(i + 999, end_block_number + 1) print(start, end) logs = client.get_logs(filter=Filter(from_block=hex(start), to_block=hex(end), address=["0x0888000000000000000000000000000000000005"], networkid=1)) print("logs=", logs) for log in logs: pos_identifier = log["topics"][1] if log["topics"][0] == REGISTER_TOPIC: bls_pub_key, vrf_pub_key = eth_abi.decode_abi(["bytes", "bytes"], decode_hex(log["data"])) pub_keys_map[pos_identifier] = (encode_hex_0x(bls_pub_key), encode_hex_0x(vrf_pub_key)) print(pub_keys_map[pos_identifier]) elif log["topics"][0] == INCREASE_STAKE_TOPIC: assert pos_identifier in pub_keys_map voting_power_map[pos_identifier] += parse_as_int(log["data"]) with open(os.path.join(cwd, "public_keys"), "w") as f: for pos_identifier in pub_keys_map.keys(): f.write(",".join([pub_keys_map[pos_identifier][0][2:], pub_keys_map[pos_identifier][1][2:], str(voting_power_map[pos_identifier])]) + "\n") cfx_block_hash = client.block_by_block_number(hex(end_block_number))["hash"] initial_seed = encode_hex(keccak(hexstr=cfx_block_hash[2:]+bitcoin_block_hash))
def run_test(self): # initialize EVM account self.evmAccount = self.w3.eth.account.privateKeyToAccount( '0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef' ) print(f'Using EVM account {self.evmAccount.address}') self.cross_space_transfer(self.evmAccount.address, 1 * 10**18) assert_equal(self.nodes[0].eth_getBalance(self.evmAccount.address), hex(1 * 10**18)) # deploy Conflux space contract confluxContractAddr = self.deploy_conflux_space(CONFLUX_CONTRACT_PATH) print(f'Conflux contract: {confluxContractAddr}') # deploy EVM space contract evmContractAddr = self.deploy_evm_space(EVM_CONTRACT_PATH) print(f'EVM contract: {evmContractAddr}') # #1: call emitConflux(1) # this will emit 1 event in the Conflux space data_hex = encode_hex_0x( keccak(b"emitConflux(uint256)"))[:10] + encode_u256(1) receipt = self.call_conflux_space(confluxContractAddr, data_hex) assert_equal(len(receipt["logs"]), 1) assert_equal(receipt["logs"][0]["data"], number_to_topic(1)) # TestEvent(1) # #2: call emitBoth(2) # this will emit 2 events in the Conflux space (our contract + internal contract) and 1 event in the EVM space data_hex = encode_hex_0x( keccak(b"emitBoth(uint256,bytes20)"))[:10] + encode_u256( 2) + encode_bytes20(evmContractAddr.replace('0x', '')) receipt = self.call_conflux_space(confluxContractAddr, data_hex) assert_equal(len(receipt["logs"]), 2) assert_equal(receipt["logs"][0]["data"], number_to_topic(2)) # TestEvent(2) # NOTE: EVM-space events are not returned here # #3: call emitEVM(3) # this will emit 1 event in the EVM space data_hex = encode_hex_0x( keccak(b"emitEVM(uint256)"))[:10] + encode_u256(3) receipt = self.call_evm_space(evmContractAddr, data_hex) assert_equal(len(receipt["logs"]), 1) assert_equal(receipt["logs"][0]["data"], number_to_topic(3)) # TestEvent(3) # NOTE: EVM-space events are not returned here # check Conflux events # we expect two events from #1 and #2 filter = Filter(topics=[TEST_EVENT_TOPIC], from_epoch="earliest", to_epoch="latest_state") logs = self.rpc.get_logs(filter) assert_equal(len(logs), 2) assert_equal(logs[0]["data"], number_to_topic(1)) # TestEvent(1) assert_equal(logs[1]["data"], number_to_topic(2)) # TestEvent(2) # check EVM events # we expect two events from #2 and #3 filter = { "topics": [TEST_EVENT_TOPIC], "fromBlock": "earliest", "toBlock": "latest" } logs = self.nodes[0].eth_getLogs(filter) assert_equal(len(logs), 2) assert_equal(logs[0]["data"], number_to_topic(2)) # TestEvent(2) assert_equal(logs[0]["address"], evmContractAddr.lower()) assert_equal(logs[0]["removed"], False) assert_equal(logs[1]["data"], number_to_topic(3)) # TestEvent(3) assert_equal(logs[1]["address"], evmContractAddr.lower()) assert_equal(logs[1]["removed"], False) # TODO(thegaram): add more detailed tests once we have more control over block production # - events in pivot and non-pivot blocks # - log.blockHash and log.blockNumber should correspond to pivot block # - logIndex, transactionIndex, transactionLogIndex self.log.info("Pass")