def test_message_causes_disconnect(self, message): # Add a p2p connection that sends a message and check that it disconnects peer = self.nodes[0].add_p2p_connection(P2PInterface()) peer.send_message(message) peer.wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 0)
def run_test(self): node0 = self.nodes[0].add_p2p_connection(P2PInterface()) # Set node time to 60 days ago self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 60) # Generating a chain of 10 blocks block_hashes = self.nodes[0].generatetoaddress( 10, self.nodes[0].get_deterministic_priv_key().address) # Create longer chain starting 2 blocks before current tip height = len(block_hashes) - 2 block_hash = block_hashes[height - 1] block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1 new_blocks = self.build_chain(5, block_hash, height, block_time) # Force reorg to a longer chain node0.send_message(msg_headers(new_blocks)) node0.wait_for_getdata() for block in new_blocks: node0.send_and_ping(msg_block(block)) # Check that reorg succeeded assert_equal(self.nodes[0].getblockcount(), 13) stale_hash = int(block_hashes[-1], 16) # Check that getdata request for stale block succeeds self.send_block_request(stale_hash, node0) test_function = lambda: self.last_block_equals(stale_hash, node0) wait_until(test_function, timeout=3) # Check that getheader request for stale block header succeeds self.send_header_request(stale_hash, node0) test_function = lambda: self.last_header_equals(stale_hash, node0) wait_until(test_function, timeout=3) # Longest chain is extended so stale is much older than chain tip self.nodes[0].setmocktime(0) tip = self.nodes[0].generatetoaddress( 1, self.nodes[0].get_deterministic_priv_key().address)[0] assert_equal(self.nodes[0].getblockcount(), 14) # Send getdata & getheaders to refresh last received getheader message block_hash = int(tip, 16) self.send_block_request(block_hash, node0) self.send_header_request(block_hash, node0) node0.sync_with_ping() # Request for very old stale block should now fail self.send_block_request(stale_hash, node0) time.sleep(3) assert not self.last_block_equals(stale_hash, node0) # Request for very old stale block header should now fail self.send_header_request(stale_hash, node0) time.sleep(3) assert not self.last_header_equals(stale_hash, node0) # Verify we can fetch very old blocks and headers on the active chain block_hash = int(block_hashes[2], 16) self.send_block_request(block_hash, node0) self.send_header_request(block_hash, node0) node0.sync_with_ping() self.send_block_request(block_hash, node0) test_function = lambda: self.last_block_equals(block_hash, node0) wait_until(test_function, timeout=3) self.send_header_request(block_hash, node0) test_function = lambda: self.last_header_equals(block_hash, node0) wait_until(test_function, timeout=3)
def run_test(self): self.nodes[0].add_p2p_connection(P2PInterface()) self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2) self.coinbase_txids = [ self.nodes[0].getblock(b)['tx'][0] for b in self.nodes[0].generate(DERSIG_HEIGHT - 2) ] self.nodeaddress = self.nodes[0].getnewaddress() self.log.info( "Test that a transaction with non-DER signature can still appear in a block" ) spendtx = create_transaction(self.nodes[0], self.coinbase_txids[0], self.nodeaddress, amount=1.0) unDERify(spendtx) spendtx.rehash() tip = self.nodes[0].getbestblockhash() block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT - 1), block_time) block.nVersion = 2 block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), block.hash) self.log.info("Test that blocks must now be at least version 3") tip = block.sha256 block_time += 1 block = create_block(tip, create_coinbase(DERSIG_HEIGHT), block_time) block.nVersion = 2 block.rehash() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) with mininode_lock: assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE) assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'bad-version(0x00000002)') assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) del self.nodes[0].p2p.last_message["reject"] self.log.info( "Test that transactions with non-DER signatures cannot appear in a block" ) block.nVersion = 3 spendtx = create_transaction(self.nodes[0], self.coinbase_txids[1], self.nodeaddress, amount=1.0) unDERify(spendtx) spendtx.rehash() # First we show that this tx is valid except for DERSIG by getting it # rejected from the mempool for exactly that reason. assert_equal([{ 'txid': spendtx.hash, 'allowed': False, 'reject-reason': '64: non-mandatory-script-verify-flag (Non-canonical DER signature)' }], self.nodes[0].testmempoolaccept( rawtxs=[bytes_to_hex_str(spendtx.serialize())], allowhighfees=True)) # Now we verify that a block with this transaction is also invalid. block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) with mininode_lock: # We can receive different reject messages depending on whether # imperiumd is running with multiple script check threads. If script # check threads are not in use, then transaction script validation # happens sequentially, and imperiumd produces more specific reject # reasons. assert self.nodes[0].p2p.last_message["reject"].code in [ REJECT_INVALID, REJECT_NONSTANDARD ] assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID: # Generic rejection when a block is invalid assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed') else: assert b'Non-canonical DER signature' in self.nodes[ 0].p2p.last_message["reject"].reason self.log.info( "Test that a version 3 block with a DERSIG-compliant transaction is accepted" ) block.vtx[1] = create_transaction(self.nodes[0], self.coinbase_txids[1], self.nodeaddress, amount=1.0) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
def run_test(self): no_version_disconnect_node = self.nodes[0].add_p2p_connection( CNodeNoVersionMisbehavior(), send_version=False, wait_for_verack=False) no_version_idlenode = self.nodes[0].add_p2p_connection( CNodeNoVersionIdle(), send_version=False, wait_for_verack=False) no_verack_idlenode = self.nodes[0].add_p2p_connection( CNodeNoVerackIdle(), wait_for_verack=False) # Wait until we got the verack in response to the version. Though, don't wait for the other node to receive the # verack, since we never sent one no_verack_idlenode.wait_for_verack() wait_until(lambda: no_version_disconnect_node.ever_connected, timeout=10, lock=mininode_lock) wait_until(lambda: no_version_idlenode.ever_connected, timeout=10, lock=mininode_lock) wait_until(lambda: no_verack_idlenode.version_received, timeout=10, lock=mininode_lock) # Mine a block and make sure that it's not sent to the connected nodes self.nodes[0].generatetoaddress( 1, self.nodes[0].get_deterministic_priv_key().address) #Give the node enough time to possibly leak out a message time.sleep(5) # Expect this node to be disconnected for misbehavior assert not no_version_disconnect_node.is_connected self.nodes[0].disconnect_p2ps() # Make sure no unexpected messages came in assert no_version_disconnect_node.unexpected_msg == False assert no_version_idlenode.unexpected_msg == False assert no_verack_idlenode.unexpected_msg == False self.log.info( 'Check that the version message does not leak the local address of the node' ) p2p_version_store = self.nodes[0].add_p2p_connection(P2PVersionStore()) ver = p2p_version_store.version_received # Check that received time is within one hour of now assert_greater_than_or_equal(ver.nTime, time.time() - 3600) assert_greater_than_or_equal(time.time() + 3600, ver.nTime) assert_equal(ver.addrFrom.port, 0) assert_equal(ver.addrFrom.ip, '0.0.0.0') assert_equal(ver.nStartingHeight, 201) assert_equal(ver.nRelay, 1) self.log.info('Check that old nodes are disconnected') p2p_old_node = self.nodes[0].add_p2p_connection(P2PInterface(), send_version=False, wait_for_verack=False) old_version_msg = msg_version() old_version_msg.nVersion = 31799 wait_until(lambda: p2p_old_node.is_connected) with self.nodes[0].assert_debug_log( ['peer=4 using obsolete version 31799; disconnecting']): p2p_old_node.send_message(old_version_msg) p2p_old_node.wait_for_disconnect()
def run_test(self): self.log.info("Read headers data") self.headers_file_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), self.options.datafile) with open(self.headers_file_path, encoding='utf-8') as headers_data: h_lines = [l.strip() for l in headers_data.readlines()] # The headers data is taken from testnet3 for early blocks from genesis until the first checkpoint. There are # two headers with valid POW at height 1 and 2, forking off from genesis. They are indicated by the FORK_PREFIX. FORK_PREFIX = 'fork:' self.headers = [l for l in h_lines if not l.startswith(FORK_PREFIX)] self.headers_fork = [ l[len(FORK_PREFIX):] for l in h_lines if l.startswith(FORK_PREFIX) ] self.headers = [FromHex(CBlockHeader(), h) for h in self.headers] self.headers_fork = [ FromHex(CBlockHeader(), h) for h in self.headers_fork ] self.log.info( "Feed all non-fork headers, including and up to the first checkpoint" ) self.nodes[0].add_p2p_connection(P2PInterface()) self.nodes[0].p2p.send_and_ping(msg_headers(self.headers)) assert { 'height': 546, 'hash': '000000002a936ca763904c3c35fce2f3556c559c0214345d31b1bcebf76acb70', 'branchlen': 546, 'status': 'headers-only', } in self.nodes[0].getchaintips() self.log.info("Feed all fork headers (fails due to checkpoint)") with self.nodes[0].assert_debug_log(['bad-fork-prior-to-checkpoint']): self.nodes[0].p2p.send_message(msg_headers(self.headers_fork)) self.nodes[0].p2p.wait_for_disconnect() self.log.info("Feed all fork headers (succeeds without checkpoint)") # On node 0 it succeeds because checkpoints are disabled self.restart_node(0, extra_args=['-nocheckpoints']) self.nodes[0].add_p2p_connection(P2PInterface()) self.nodes[0].p2p.send_and_ping(msg_headers(self.headers_fork)) assert { "height": 2, "hash": "00000000b0494bd6c3d5ff79c497cfce40831871cbf39b1bc28bd1dac817dc39", "branchlen": 2, "status": "headers-only", } in self.nodes[0].getchaintips() # On node 1 it succeeds because no checkpoint has been reached yet by a chain tip self.nodes[1].add_p2p_connection(P2PInterface()) self.nodes[1].p2p.send_and_ping(msg_headers(self.headers_fork)) assert { "height": 2, "hash": "00000000b0494bd6c3d5ff79c497cfce40831871cbf39b1bc28bd1dac817dc39", "branchlen": 2, "status": "headers-only", } in self.nodes[1].getchaintips()
def run_test(self): self.nodes[0].add_p2p_connection(P2PInterface()) self.log.info( 'Check that txs from p2p are rejected and result in disconnect') prevtx = self.nodes[0].getblock(self.nodes[0].getblockhash(1), 2)['tx'][0] rawtx = self.nodes[0].createrawtransaction( inputs=[{ 'txid': prevtx['txid'], 'vout': 0 }], outputs=[{ self.nodes[0].get_deterministic_priv_key().address: 50 - 0.00125 }], ) sigtx = self.nodes[0].signrawtransactionwithkey( hexstring=rawtx, privkeys=[self.nodes[0].get_deterministic_priv_key().key], prevtxs=[{ 'txid': prevtx['txid'], 'vout': 0, 'scriptPubKey': prevtx['vout'][0]['scriptPubKey']['hex'], }], )['hex'] assert_equal(self.nodes[0].getnetworkinfo()['localrelay'], False) with self.nodes[0].assert_debug_log( ['transaction sent in violation of protocol peer=0']): self.nodes[0].p2p.send_message( msg_tx(FromHex(CTransaction(), sigtx))) self.nodes[0].p2p.wait_for_disconnect() assert_equal(self.nodes[0].getmempoolinfo()['size'], 0) # Remove the disconnected peer and add a new one. del self.nodes[0].p2ps[0] self.nodes[0].add_p2p_connection(P2PInterface()) self.log.info( 'Check that txs from rpc are not rejected and relayed to other peers' ) assert_equal(self.nodes[0].getpeerinfo()[0]['relaytxes'], True) txid = self.nodes[0].testmempoolaccept([sigtx])[0]['txid'] with self.nodes[0].assert_debug_log( ['received getdata for: tx {} peer=1'.format(txid)]): self.nodes[0].sendrawtransaction(sigtx) self.nodes[0].p2p.wait_for_tx(txid) assert_equal(self.nodes[0].getmempoolinfo()['size'], 1) self.log.info( 'Check that txs from whitelisted peers are not rejected and relayed to others' ) self.log.info( "Restarting node 0 with whitelist permission and blocksonly") self.restart_node(0, [ "-persistmempool=0", "-whitelist=127.0.0.1", "-whitelistforcerelay", "-blocksonly" ]) assert_equal(self.nodes[0].getrawmempool(), []) first_peer = self.nodes[0].add_p2p_connection(P2PInterface()) second_peer = self.nodes[0].add_p2p_connection(P2PInterface()) peer_1_info = self.nodes[0].getpeerinfo()[0] assert_equal(peer_1_info['whitelisted'], True) assert_equal(peer_1_info['permissions'], ['noban', 'forcerelay', 'relay', 'mempool']) peer_2_info = self.nodes[0].getpeerinfo()[1] assert_equal(peer_2_info['whitelisted'], True) assert_equal(peer_2_info['permissions'], ['noban', 'forcerelay', 'relay', 'mempool']) assert_equal(self.nodes[0].testmempoolaccept([sigtx])[0]['allowed'], True) txid = self.nodes[0].testmempoolaccept([sigtx])[0]['txid'] self.log.info( 'Check that the tx from whitelisted first_peer is relayed to others (ie.second_peer)' ) with self.nodes[0].assert_debug_log(["received getdata"]): first_peer.send_message(msg_tx(FromHex(CTransaction(), sigtx))) self.log.info( 'Check that the whitelisted peer is still connected after sending the transaction' ) assert_equal(first_peer.is_connected, True) second_peer.wait_for_tx(txid) assert_equal(self.nodes[0].getmempoolinfo()['size'], 1) self.log.info("Whitelisted peer's transaction is accepted and relayed")
def run_test(self): # Node 0 supports COMPACT_FILTERS, node 1 does not. node0 = self.nodes[0].add_p2p_connection(CFiltersClient()) node1 = self.nodes[1].add_p2p_connection(CFiltersClient()) # Nodes 0 & 1 share the same first 999 blocks in the chain. self.nodes[0].generate(999) self.sync_blocks(timeout=600) # Stale blocks by disconnecting nodes 0 & 1, mining, then reconnecting disconnect_nodes(self.nodes[0], 1) self.nodes[0].generate(1) wait_until(lambda: self.nodes[0].getblockcount() == 1000) stale_block_hash = self.nodes[0].getblockhash(1000) self.nodes[1].generate(1001) wait_until(lambda: self.nodes[1].getblockcount() == 2000) self.log.info("get cfcheckpt on chain to be re-orged out.") request = msg_getcfcheckpt(filter_type=FILTER_TYPE_BASIC, stop_hash=int(stale_block_hash, 16)) node0.send_and_ping(message=request) response = node0.last_message['cfcheckpt'] assert_equal(response.filter_type, request.filter_type) assert_equal(response.stop_hash, request.stop_hash) assert_equal(len(response.headers), 1) self.log.info("Reorg node 0 to a new chain.") connect_nodes(self.nodes[0], 1) self.sync_blocks(timeout=600) main_block_hash = self.nodes[0].getblockhash(1000) assert main_block_hash != stale_block_hash, "node 0 chain did not reorganize" self.log.info("Check that peers can fetch cfcheckpt on active chain.") tip_hash = self.nodes[0].getbestblockhash() request = msg_getcfcheckpt(filter_type=FILTER_TYPE_BASIC, stop_hash=int(tip_hash, 16)) node0.send_and_ping(request) response = node0.last_message['cfcheckpt'] assert_equal(response.filter_type, request.filter_type) assert_equal(response.stop_hash, request.stop_hash) main_cfcheckpt = self.nodes[0].getblockfilter(main_block_hash, 'basic')['header'] tip_cfcheckpt = self.nodes[0].getblockfilter(tip_hash, 'basic')['header'] assert_equal( response.headers, [int(header, 16) for header in (main_cfcheckpt, tip_cfcheckpt)]) self.log.info("Check that peers can fetch cfcheckpt on stale chain.") request = msg_getcfcheckpt(filter_type=FILTER_TYPE_BASIC, stop_hash=int(stale_block_hash, 16)) node0.send_and_ping(request) response = node0.last_message['cfcheckpt'] stale_cfcheckpt = self.nodes[0].getblockfilter(stale_block_hash, 'basic')['header'] assert_equal(response.headers, [int(header, 16) for header in (stale_cfcheckpt, )]) self.log.info("Check that peers can fetch cfheaders on active chain.") request = msg_getcfheaders(filter_type=FILTER_TYPE_BASIC, start_height=1, stop_hash=int(main_block_hash, 16)) node0.send_and_ping(request) response = node0.last_message['cfheaders'] main_cfhashes = response.hashes assert_equal(len(main_cfhashes), 1000) assert_equal( compute_last_header(response.prev_header, response.hashes), int(main_cfcheckpt, 16)) self.log.info("Check that peers can fetch cfheaders on stale chain.") request = msg_getcfheaders(filter_type=FILTER_TYPE_BASIC, start_height=1, stop_hash=int(stale_block_hash, 16)) node0.send_and_ping(request) response = node0.last_message['cfheaders'] stale_cfhashes = response.hashes assert_equal(len(stale_cfhashes), 1000) assert_equal( compute_last_header(response.prev_header, response.hashes), int(stale_cfcheckpt, 16)) self.log.info("Check that peers can fetch cfilters.") stop_hash = self.nodes[0].getblockhash(10) request = msg_getcfilters(filter_type=FILTER_TYPE_BASIC, start_height=1, stop_hash=int(stop_hash, 16)) node0.send_message(request) node0.sync_with_ping() response = node0.pop_cfilters() assert_equal(len(response), 10) self.log.info("Check that cfilter responses are correct.") for cfilter, cfhash, height in zip(response, main_cfhashes, range(1, 11)): block_hash = self.nodes[0].getblockhash(height) assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC) assert_equal(cfilter.block_hash, int(block_hash, 16)) computed_cfhash = uint256_from_str(hash256(cfilter.filter_data)) assert_equal(computed_cfhash, cfhash) self.log.info("Check that peers can fetch cfilters for stale blocks.") request = msg_getcfilters(filter_type=FILTER_TYPE_BASIC, start_height=1000, stop_hash=int(stale_block_hash, 16)) node0.send_message(request) node0.sync_with_ping() response = node0.pop_cfilters() assert_equal(len(response), 1) cfilter = response[0] assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC) assert_equal(cfilter.block_hash, int(stale_block_hash, 16)) computed_cfhash = uint256_from_str(hash256(cfilter.filter_data)) assert_equal(computed_cfhash, stale_cfhashes[999]) self.log.info( "Requests to node 1 without NODE_COMPACT_FILTERS results in disconnection." ) requests = [ msg_getcfcheckpt(filter_type=FILTER_TYPE_BASIC, stop_hash=int(main_block_hash, 16)), msg_getcfheaders(filter_type=FILTER_TYPE_BASIC, start_height=1000, stop_hash=int(main_block_hash, 16)), msg_getcfilters(filter_type=FILTER_TYPE_BASIC, start_height=1000, stop_hash=int(main_block_hash, 16)), ] for request in requests: node1 = self.nodes[1].add_p2p_connection(P2PInterface()) node1.send_message(request) node1.wait_for_disconnect() self.log.info("Check that invalid requests result in disconnection.") requests = [ # Requesting too many filters results in disconnection. msg_getcfilters(filter_type=FILTER_TYPE_BASIC, start_height=0, stop_hash=int(main_block_hash, 16)), # Requesting too many filter headers results in disconnection. msg_getcfheaders(filter_type=FILTER_TYPE_BASIC, start_height=0, stop_hash=int(tip_hash, 16)), # Requesting unknown filter type results in disconnection. msg_getcfcheckpt(filter_type=255, stop_hash=int(main_block_hash, 16)), # Requesting unknown hash results in disconnection. msg_getcfcheckpt( filter_type=FILTER_TYPE_BASIC, stop_hash=123456789, ), ] for request in requests: node0 = self.nodes[0].add_p2p_connection(P2PInterface()) node0.send_message(request) node0.wait_for_disconnect()
def run_test(self): self.nodes[0].setmocktime(1501545600) # August 1st 2017 no_version_bannode = self.nodes[0].add_p2p_connection( CNodeNoVersionBan(), send_version=False) no_version_idlenode = self.nodes[0].add_p2p_connection( CNodeNoVersionIdle(), send_version=False) no_verack_idlenode = self.nodes[0].add_p2p_connection( CNodeNoVerackIdle()) unsupported_service_bit5_node = self.nodes[0].add_p2p_connection( CLazyNode(), services=NODE_NETWORK) unsupported_service_bit7_node = self.nodes[0].add_p2p_connection( CLazyNode(), services=NODE_NETWORK) wait_until(lambda: no_version_bannode.ever_connected, timeout=10, lock=mininode_lock) wait_until(lambda: no_version_idlenode.ever_connected, timeout=10, lock=mininode_lock) wait_until(lambda: no_verack_idlenode.version_received, timeout=10, lock=mininode_lock) wait_until(lambda: unsupported_service_bit5_node.ever_connected, timeout=10, lock=mininode_lock) wait_until(lambda: unsupported_service_bit7_node.ever_connected, timeout=10, lock=mininode_lock) # Mine a block and make sure that it's not sent to the connected nodes self.nodes[0].generate(1) #Give the node enough time to possibly leak out a message time.sleep(5) #This node should have been banned assert not no_version_bannode.is_connected # These nodes should have been disconnected assert not unsupported_service_bit5_node.is_connected assert not unsupported_service_bit7_node.is_connected self.nodes[0].disconnect_p2ps() # Wait until all connections are closed wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 0) # Make sure no unexpected messages came in assert (no_version_bannode.unexpected_msg == False) assert (no_version_idlenode.unexpected_msg == False) assert (no_verack_idlenode.unexpected_msg == False) assert not unsupported_service_bit5_node.unexpected_msg assert not unsupported_service_bit7_node.unexpected_msg self.log.info("Service bits 5 and 7 are allowed after August 1st 2018") self.nodes[0].setmocktime(1533168000) # August 2nd 2018 allowed_service_bit5_node = self.nodes[0].add_p2p_connection( P2PInterface(), services=NODE_NETWORK) allowed_service_bit7_node = self.nodes[0].add_p2p_connection( P2PInterface(), services=NODE_NETWORK) wait_until(lambda: allowed_service_bit5_node.message_count["verack"], lock=mininode_lock) wait_until(lambda: allowed_service_bit7_node.message_count["verack"], lock=mininode_lock)
assert { 'height': 546, 'hash': 'xxxx', 'branchlen': 546, 'status': 'headers-only', } in self.nodes[0].getchaintips() self.log.info("Feed all fork headers (failes due to checkpoint)") with self.nodes[0].assert_debug_log(['bad-fork-prior-to-checkpoint']) self.nodes[0].p2p.send_message(msg_headers(self.headers_fork)) self.nodes[0].p2p.wait_for_disconnect() self.log.info("Feed all fork headers (succeeds with out checkpoint)") self.restart_node(0, extra_args=['-nocheckpoints']) self.nodes[0].add_p2p_connection(P2PInterface()) self.nodes[0].p2p.send_message(msg_headers(self.headers_fork)) self.nodes[0].p2p.sync_with_ping() assert { "height": 2, "hash": "xxxx", "branchlen": 2, "status": "headers-only", } in self.nodes[0].getchaintips() self.nodes[1].add_p2p_connection(P2PInterface()): self.nodes[1].p2p_send_message(msg_headers(self.headers_fork)) self.nodes[1].p2p.sync_with_ping() assert { "height": 2, "hash": "xxxx",
def orphan_list_check(self): """Test that: - getdsprooflist omits orphans and/or includes them as expected for the include_orphans arg - the orphans appearing in the orphan list match what we expect""" # assumption is previous test left some proofs around len_noorphans = len(self.nodes[0].getdsprooflist(False, False)) assert_greater_than(len_noorphans, 0) # previous test may or may not have left some orphans around, account for them len_orphans = len(self.nodes[0].getdsprooflist(False, True)) - len_noorphans orphans_ids = [ "978c2b3d829dbc934c170ff797c539a86d35fcfc0ec806a5753c00794cd5caad", "0e7f2e002073916cfa16692df9b44c2d52e808f7b55e75e7d941356dd90f2096", ] orphans_data = [ bytes.fromhex(hx) for hx in ( "326bd6eee699d18308a04720583f663ba039070a1abd8a59868ee03a1c250be10000000001000000feffffffbadb1500dbd7cf882" "c620ed8fb660b64444bfb4febf6c553d0f19cafdc3070bc2c27664618606b350cd8bf565266bc352f0caddcf01e8fa789dd8a1538" "6327cf8cabe198fdef7e5d2f370d4e96ab7cc22482f181b2c0e7e6275838aeed19eeedbfd378170141467adbad7deb7635bdf6bbe" "4c605ce57c3dccd01f5fb9e32b22c3479a1d3f143f3f9592f9e1ef9ea96f01141b261e468c46d31a4a63cde692947d126f34641e3" "4101000000feffffffbadb1500dbd7cf882c620ed8fb660b64444bfb4febf6c553d0f19cafdc3070bc2c27664618606b350cd8bf5" "65266bc352f0caddcf01e8fa789dd8a15386327cf8cabe198188af582e7a09fcfe0b1a37ee3ca6c91f80c13006e595c79320ac38d" "40a945cf0141210f8a36fe24b9fb1cb5a2a8cb01ac27d58410d8d8f3abf6fe935b2b1c1eadb285a4cdcd24727472af4d65b1c7ccb" "120361bdcbcadfb2f1436df9bfe9b9a5b0641", "11565d6e11586e4d0b989358be23299458afd76d8eedad32f96a671f775970740000000001000000feffffff4fdc1500ac7850e9b" "64559f703a9e6068bde8c175761408f0777299691083b0fc534aef618606b350cd8bf565266bc352f0caddcf01e8fa789dd8a1538" "6327cf8cabe198cdf604b8294fe87f39c637bcab10869db7cc306b0ddbf35ed92ab526dd18af69014126b3e82473f456f9bcb2bf1" "20ede1ad6e3ee588935b70033cfb625c317ced26f116b54d2effc3c9abf5efd38cffae57af50fb5fef88e1be7dc9d82a415fc1367" "4101000000feffffff4fdc1500ac7850e9b64559f703a9e6068bde8c175761408f0777299691083b0fc534aef618606b350cd8bf5" "65266bc352f0caddcf01e8fa789dd8a15386327cf8cabe1981194289492779938f938ce59ba48f916ba0b883803fbf2bfab22bf8d" "b09227ba0141fdb9ff69c028a6a1e4143bedcf2f44b1ea2b6996bd463440f9d3037845ad7a879963acbb424d3850ba6affdf81325" "e7753294a2e1959d9e84ba6108ce15e7cdc41", ) ] orphans_proofs = [] orphans_outpoints = set() for od in orphans_data: proof = CDSProof() proof.deserialize(BytesIO(od)) orphans_outpoints.add(( int.to_bytes(proof.prevTxId, 32, byteorder='big').hex(), # txid proof.prevOutIndex # vout )) orphans_proofs.append(proof) assert len(orphans_outpoints) == 2 p2p = P2PInterface() self.nodes[0].add_p2p_connection(p2p) wait_until(lambda: sum(p2p.message_count.values()) > 0, lock=mininode_lock) # send orphans to node0 for proof in orphans_proofs: p2p.send_message(msg_dsproof(proof)) # wait for node0 to have acknowledged the orphans wait_until(lambda: len(self.nodes[0].getdsprooflist(False, True)) == len_noorphans + len_orphans + 2) def check(len_noorphans, len_orphans): # verify that node0 has a view of the orphans that we expect dsplist_all = self.nodes[0].getdsprooflist(False, True) non_orph_ct = 0 orph_ct = 0 orphs_seen = set() outpoints_seen = set() matches = 0 for dspid in dsplist_all: dsp = self.nodes[0].getdsproof(dspid, True) if dsp["txid"] is not None: non_orph_ct += 1 continue orphs_seen.add(dsp["dspid"]) orph_ct += 1 op = dsp["outpoint"] outpoints_seen.add((op["txid"], op["vout"])) hexdata = self.nodes[0].getdsproof(dspid, False)["hex"] try: # ensure hexdata we got for this dspid matches our data orphans_data.index(bytes.fromhex(hexdata)) orphans_ids.index(dspid) matches += 1 except ValueError: pass # this is ok, stale oprhan (ignore) assert_equal(matches, len(orphans_data)) assert_equal(non_orph_ct, len_noorphans) assert_equal(orph_ct, len_orphans) assert_equal(orphs_seen & set(orphans_ids), set(orphans_ids)) assert_equal(outpoints_seen & orphans_outpoints, orphans_outpoints) check(len_noorphans=len_noorphans, len_orphans=len_orphans + len(orphans_ids)) # Mining a block should take every dsproof and send them to the orphan list # it should also keep the same orphans from before around block_hash = self.nodes[0].generate(1)[0] self.sync_all() # No non-orphans assert_equal(len(self.nodes[0].getdsprooflist()), 0) # All previous dsproofs are now orphans (because their associated tx was mined assert_equal(len(self.nodes[0].getdsprooflist(False, True)), len_orphans + len_noorphans + len(orphans_ids)) # Test reorg behavior. On reorg, all tx's that go back to mempool should continue to have their previous # proofs. We invalidate the block and make sure that all the orphaned dsp's got claimed again by their # respective tx's which were put back into the mempool self.nodes[0].invalidateblock(block_hash) wait_until( lambda: len(self.nodes[0].getdsprooflist()) == len_noorphans, timeout=10) check(len_noorphans=len_noorphans, len_orphans=len_orphans + len(orphans_ids)) # Now put the block back self.nodes[0].reconsiderblock(block_hash) self.sync_all() # There should again be no non-orphans assert_equal(len(self.nodes[0].getdsprooflist()), 0) # All previous dsproofs are now orphans again assert_equal(len(self.nodes[0].getdsprooflist(False, True)), len_orphans + len_noorphans + len(orphans_ids)) # Wait for all orphans to get auto-cleaned (this may take up to 60 seconds) self.nodes[0].setmocktime(int(time.time() + 100)) wait_until(lambda: len(self.nodes[0].getdsprooflist(False, True)) == 0, timeout=90)
def run_test(self): self.nodes[0].add_p2p_connection(P2PInterface()) network_thread_start() # wait_for_verack ensures that the P2P connection is fully up. self.nodes[0].p2p.wait_for_verack() self.log.info("Mining {} blocks".format(CLTV_HEIGHT - 2)) self.coinbase_blocks = self.nodes[0].generate(CLTV_HEIGHT - 2) self.nodeaddress = self.nodes[0].getnewaddress() self.log.info( "Test that an invalid-according-to-CLTV transaction can still appear in a block" ) fundtx = spend_from_coinbase(self.nodes[0], self.coinbase_blocks[0], self.nodeaddress, 49.99) fundtx, spendtx = cltv_lock_to_height(self.nodes[0], fundtx, self.nodeaddress, 49.98) tip = self.nodes[0].getbestblockhash() block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 block = create_block(int(tip, 16), create_coinbase(CLTV_HEIGHT - 1), block_time) block.nVersion = 3 block.vtx.append(fundtx) # include the -1 CLTV in block block.vtx.append(spendtx) make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) # This block is valid assert_equal(self.nodes[0].getbestblockhash(), block.hash) self.log.info("Test that blocks must now be at least version 4") tip = block.sha256 block_time += 1 block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time) block.nVersion = 3 block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) with mininode_lock: assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE) assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'bad-version(0x00000003)') assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) del self.nodes[0].p2p.last_message["reject"] self.log.info( "Test that invalid-according-to-cltv transactions cannot appear in a block" ) block.nVersion = 4 fundtx = spend_from_coinbase(self.nodes[0], self.coinbase_blocks[1], self.nodeaddress, 49.99) fundtx, spendtx = cltv_lock_to_height(self.nodes[0], fundtx, self.nodeaddress, 49.98) # The funding tx only has unexecuted bad CLTV, in scriptpubkey; this is valid. self.nodes[0].p2p.send_and_ping(msg_tx(fundtx)) assert fundtx.hash in self.nodes[0].getrawmempool() # Mine a block containing the funding transaction block.vtx.append(fundtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) # This block is valid assert_equal(self.nodes[0].getbestblockhash(), block.hash) # We show that this tx is invalid due to CLTV by getting it # rejected from the mempool for exactly that reason. assert_equal([{ 'txid': spendtx.hash, 'allowed': False, 'reject-reason': '64: non-mandatory-script-verify-flag (Negative locktime)' }], self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()], allowhighfees=True)) rejectedtx_signed = self.nodes[0].signrawtransactionwithwallet( ToHex(spendtx)) # Couldn't complete signature due to CLTV assert (rejectedtx_signed['errors'][0]['error'] == 'Negative locktime') tip = block.hash block_time += 1 block = create_block(block.sha256, create_coinbase(CLTV_HEIGHT + 1), block_time) block.nVersion = 4 block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) # This block is invalid assert_equal(self.nodes[0].getbestblockhash(), tip) wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) with mininode_lock: assert self.nodes[0].p2p.last_message["reject"].code in [ REJECT_INVALID, REJECT_NONSTANDARD ] assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID: # Generic rejection when a block is invalid assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'blk-bad-inputs') else: assert b'Negative locktime' in self.nodes[0].p2p.last_message[ "reject"].reason self.log.info( "Test that a version 4 block with a valid-according-to-CLTV transaction is accepted" ) fundtx = spend_from_coinbase(self.nodes[0], self.coinbase_blocks[2], self.nodeaddress, 49.99) fundtx, spendtx = cltv_lock_to_height(self.nodes[0], fundtx, self.nodeaddress, 49.98, CLTV_HEIGHT) # make sure sequence is nonfinal and locktime is good spendtx.vin[0].nSequence = 0xfffffffe spendtx.nLockTime = CLTV_HEIGHT # both transactions are fully valid self.nodes[0].sendrawtransaction(ToHex(fundtx)) self.nodes[0].sendrawtransaction(ToHex(spendtx)) # Modify the transactions in the block to be valid against CLTV block.vtx.pop(1) block.vtx.append(fundtx) block.vtx.append(spendtx) make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) # This block is now valid assert_equal(self.nodes[0].getbestblockhash(), block.hash)
def run_test(self): self.setup_stake_coins(self.nodes[0], self.nodes[1], self.nodes[2]) for i in range(self.num_nodes): self.nodes[i].add_p2p_connection(P2PInterface()) wait_until(lambda: all(self.nodes[i].p2p.got_verack() for i in range(self.num_nodes)), timeout=10) self.log.info("Ensure submitblock can in principle reorg to a competing chain") self.nodes[0].generate(1) assert_equal(self.nodes[0].getblockcount(), 1) hashZ = self.nodes[1].generate(2)[-1] assert_equal(self.nodes[1].getblockcount(), 2) node_sync_via_rpc(self.nodes[0:3]) assert_equal(self.nodes[0].getbestblockhash(), hashZ) self.log.info("Mine blocks A-B-C on Node 0") hashC = self.nodes[0].generate(3)[-1] assert_equal(self.nodes[0].getblockcount(), 5) self.log.info("Mine competing blocks E-F-G on Node 1") hashG = self.nodes[1].generate(3)[-1] assert_equal(self.nodes[1].getblockcount(), 5) assert hashC != hashG self.log.info("Connect nodes and check no reorg occurs") # Submit competing blocks via RPC so any reorg should occur before we proceed (no way to wait on inaction for p2p sync) node_sync_via_rpc(self.nodes[0:2]) connect_nodes_bi(self.nodes,0,1) assert_equal(self.nodes[0].getbestblockhash(), hashC) assert_equal(self.nodes[1].getbestblockhash(), hashG) self.log.info("Make Node0 prefer block G") self.nodes[0].preciousblock(hashG) assert_equal(self.nodes[0].getbestblockhash(), hashG) self.log.info("Make Node0 prefer block C again") self.nodes[0].preciousblock(hashC) assert_equal(self.nodes[0].getbestblockhash(), hashC) self.log.info("Make Node1 prefer block C") self.nodes[1].preciousblock(hashC) sync_blocks(self.nodes[0:2]) # wait because node 1 may not have downloaded hashC assert_equal(self.nodes[1].getbestblockhash(), hashC) self.log.info("Make Node1 prefer block G again") self.nodes[1].preciousblock(hashG) assert_equal(self.nodes[1].getbestblockhash(), hashG) self.log.info("Make Node0 prefer block G again") self.nodes[0].preciousblock(hashG) assert_equal(self.nodes[0].getbestblockhash(), hashG) self.log.info("Make Node1 prefer block C again") self.nodes[1].preciousblock(hashC) assert_equal(self.nodes[1].getbestblockhash(), hashC) self.log.info("Mine another block (E-F-G-)H on Node 0 and reorg Node 1") self.nodes[0].generate(1) assert_equal(self.nodes[0].getblockcount(), 6) sync_blocks(self.nodes[0:2]) hashH = self.nodes[0].getbestblockhash() assert_equal(self.nodes[1].getbestblockhash(), hashH) self.log.info("Node1 should not be able to prefer block C anymore") self.nodes[1].preciousblock(hashC) assert_equal(self.nodes[1].getbestblockhash(), hashH) self.log.info("Mine competing blocks I-J-K-L on Node 2") self.nodes[2].generate(4) assert_equal(self.nodes[2].getblockcount(), 6) hashL = self.nodes[2].getbestblockhash() self.log.info("Connect nodes and check no reorg occurs") node_sync_via_rpc(self.nodes[1:3]) connect_nodes_bi(self.nodes,1,2) connect_nodes_bi(self.nodes,0,2) assert_equal(self.nodes[0].getbestblockhash(), hashH) assert_equal(self.nodes[1].getbestblockhash(), hashH) assert_equal(self.nodes[2].getbestblockhash(), hashL) self.log.info("Make Node1 prefer block L") self.nodes[1].preciousblock(hashL) assert_equal(self.nodes[1].getbestblockhash(), hashL) self.log.info("Make Node2 prefer block H") self.nodes[2].preciousblock(hashH) assert_equal(self.nodes[2].getbestblockhash(), hashH)
def run_test(self): self.nodes[0].add_p2p_connection(P2PInterface()) network_thread_start() # wait_for_verack ensures that the P2P connection is fully up. self.nodes[0].p2p.wait_for_verack() self.log.info("Mining {} blocks".format(DERSIG_HEIGHT - 1)) self.coinbase_blocks = self.nodes[0].generate(DERSIG_HEIGHT - 1) self.nodeaddress = self.nodes[0].getnewaddress() self.log.info("Test that blocks must now be at least version 3") tip = self.nodes[0].getbestblockhash() block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT), block_time) block.nVersion = 2 block.rehash() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), tip) wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) with mininode_lock: assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE) assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'bad-version(0x00000002)') assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) del self.nodes[0].p2p.last_message["reject"] self.log.info( "Test that transactions with non-DER signatures cannot appear in a block" ) block.nVersion = 3 spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1], self.nodeaddress, 1.0) unDERify(spendtx) spendtx.rehash() # Now we verify that a block with this transaction is invalid. block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), tip) wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) with mininode_lock: # We can receive different reject messages depending on whether # bitcoind is running with multiple script check threads. If script # check threads are not in use, then transaction script validation # happens sequentially, and bitcoind produces more specific reject # reasons. assert self.nodes[0].p2p.last_message["reject"].code in [ REJECT_INVALID, REJECT_NONSTANDARD ] assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID: # Generic rejection when a block is invalid assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'blk-bad-inputs') else: assert b'Non-canonical DER signature' in self.nodes[ 0].p2p.last_message["reject"].reason self.log.info( "Test that a version 3 block with a DERSIG-compliant transaction is accepted" ) block.vtx[1] = create_transaction(self.nodes[0], self.coinbase_blocks[1], self.nodeaddress, 1.0) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
def run_test(self): # First, quick check that CSV is ACTIVE at genesis assert_equal(self.nodes[0].getblockcount(), 0) assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'active') self.nodes[0].add_p2p_connection(P2PInterface()) self.nodeaddress = self.nodes[0].getnewaddress() self.log.info( "Test that blocks past the genesis block must be at least version 4" ) # Create a v3 block tip = self.nodes[0].getbestblockhash() block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 block = create_block(int(tip, 16), create_coinbase(1), block_time) block.nVersion = 3 block.solve() # The best block should not have changed, because... assert_equal(self.nodes[0].getbestblockhash(), tip) # ... we rejected it because it is v3 with self.nodes[0].assert_debug_log(expected_msgs=[ '{}, bad-version(0x00000003)'.format(block.hash) ]): # Send it to the node self.nodes[0].p2p.send_and_ping(msg_block(block)) self.log.info( "Test that a version 4 block with a valid-according-to-CLTV transaction is accepted" ) # Generate 100 blocks so that first coinbase matures generated_blocks = self.nodes[0].generate(100) spendable_coinbase_txid = self.nodes[0].getblock( generated_blocks[0])['tx'][0] coinbase_value = self.nodes[0].decoderawtransaction( self.nodes[0].gettransaction( spendable_coinbase_txid)["hex"])["vout"][0]["value"] tip = generated_blocks[-1] # Construct a v4 block block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 block = create_block(int(tip, 16), create_coinbase(len(generated_blocks) + 1), block_time) block.nVersion = 4 # Create a CLTV transaction spendtx = create_transaction(self.nodes[0], spendable_coinbase_txid, self.nodeaddress, amount=1.0, fee=coinbase_value - 1) spendtx = cltv_validate(self.nodes[0], spendtx, 1) spendtx.rehash() # Add the CLTV transaction and prepare for sending block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Send block and check that it becomes new best block self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
def run_test(self): self.stop_node(0) shutil.rmtree(self.nodes[0].datadir) initialize_datadir(self.options.tmpdir, 0, self.signblockpubkeys, self.signblockthreshold) self.log.info("Test with no genesis file") self.nodes[0].assert_start_raises_init_error( [], 'ReadGenesisBlock: unable to read genesis file', match=ErrorMatch.PARTIAL_REGEX) self.log.info("Phase 1: Tests using genesis block") self.log.info("Test correct genesis file") self.writeGenesisBlockToFile(self.nodes[0].datadir) self.start_node(0) self.stop_node(0) self.log.info("Restart with correct genesis file") self.start_node(0) self.stop_node(0) self.log.info("Test incorrect genesis block - No Coinbase") genesis_coinbase = createGenesisCoinbase(self.signblockthreshold, self.signblockpubkeys) genesis_coinbase.vin[0].prevout.hash = 111111 genesis = createIncorectGenesisBlock(genesis_coinbase, self.signblockprivkeys) writeIncorrectGenesisBlockToFile(self.nodes[0].datadir, genesis) self.nodes[0].assert_start_raises_init_error( [], 'ReadGenesisBlock: invalid genesis block', match=ErrorMatch.PARTIAL_REGEX) self.log.info("Test incorrect genesis block - Incorrect height") genesis_coinbase_height = createGenesisCoinbase( self.signblockthreshold, self.signblockpubkeys) genesis_coinbase_height.vin[0].prevout.n = 10 genesis = createIncorectGenesisBlock(genesis_coinbase_height, self.signblockprivkeys) writeIncorrectGenesisBlockToFile(self.nodes[0].datadir, genesis) self.nodes[0].assert_start_raises_init_error( [], 'ReadGenesisBlock: invalid height in genesis block', match=ErrorMatch.PARTIAL_REGEX) self.log.info("Test incorrect genesis block - Multiple transactions") genesis_coinbase = createGenesisCoinbase(self.signblockthreshold, self.signblockpubkeys) genesis = createIncorectGenesisBlock(genesis_coinbase, self.signblockprivkeys) genesis.vtx.append(CTransaction()) genesis.hashMerkleRoot = genesis.calc_merkle_root() genesis.hashImMerkleRoot = genesis.calc_immutable_merkle_root() genesis.solve(self.signblockprivkeys) writeIncorrectGenesisBlockToFile(self.nodes[0].datadir, genesis) self.nodes[0].assert_start_raises_init_error( [], 'ReadGenesisBlock: invalid genesis block', match=ErrorMatch.PARTIAL_REGEX) self.log.info("Test incorrect genesis block - No proof") genesis = createIncorectGenesisBlock(genesis_coinbase, self.signblockprivkeys) genesis.proof.clear() writeIncorrectGenesisBlockToFile(self.nodes[0].datadir, genesis) self.nodes[0].assert_start_raises_init_error( [], 'ReadGenesisBlock: invalid genesis block', match=ErrorMatch.PARTIAL_REGEX) self.log.info("Test incorrect genesis block - Insufficient Proof") genesis = createIncorectGenesisBlock(genesis_coinbase, self.signblockprivkeys) genesis.proof = genesis.proof[:self.signblockthreshold - 1] writeIncorrectGenesisBlockToFile(self.nodes[0].datadir, genesis) self.nodes[0].assert_start_raises_init_error( [], 'ReadGenesisBlock: invalid genesis block', match=ErrorMatch.PARTIAL_REGEX) self.log.info("Test incorrect genesis block - No hashMerkleRoot") genesis_coinbase = createGenesisCoinbase(self.signblockthreshold, self.signblockpubkeys) genesis = CBlock() genesis.nTime = int(time.time() + 600) genesis.hashPrevBlock = 0 genesis.vtx.append(genesis_coinbase) # not populating hashMerkleRoot and hashImMerkleRoot genesis.solve(self.signblockprivkeys) writeIncorrectGenesisBlockToFile(self.nodes[0].datadir, genesis) self.nodes[0].assert_start_raises_init_error( [], 'ReadGenesisBlock: invalid MerkleRoot in genesis block', match=ErrorMatch.PARTIAL_REGEX) self.log.info("Test incorrect genesis block - No hashImMerkleRoot") genesis_coinbase = createGenesisCoinbase(self.signblockthreshold, self.signblockpubkeys) genesis = CBlock() genesis.nTime = int(time.time() + 600) genesis.hashPrevBlock = 0 genesis.vtx.append(genesis_coinbase) genesis.hashMerkleRoot = genesis.calc_merkle_root() # not populating hashImMerkleRoot genesis.solve(self.signblockprivkeys) writeIncorrectGenesisBlockToFile(self.nodes[0].datadir, genesis) self.nodes[0].assert_start_raises_init_error( [], 'ReadGenesisBlock: invalid MerkleRoot in genesis block', match=ErrorMatch.PARTIAL_REGEX) self.log.info("Phase 2: Tests using genesis.dat file") self.log.info("Test new genesis file") self.genesisBlock = None self.writeGenesisBlockToFile(self.nodes[0].datadir, int(time.time())) #different genesis file self.nodes[0].assert_start_raises_init_error( [], 'Error: Incorrect or no genesis block found.', match=ErrorMatch.PARTIAL_REGEX) datadir = self.nodes[0].datadir genesisFile = os.path.join(datadir, "genesis.dat") self.log.info("Test incorrect genesis file - append 2 bytes") self.writeGenesisBlockToFile(self.nodes[0].datadir) with open(genesisFile, 'a', encoding='utf8') as f: f.write("abcd") self.nodes[0].assert_start_raises_init_error( [], 'ReadGenesisBlock: invalid genesis file', match=ErrorMatch.PARTIAL_REGEX) os.remove(genesisFile) self.log.info("Test incorrect genesis file - append many bytes") self.writeGenesisBlockToFile(self.nodes[0].datadir) with open(genesisFile, 'a', encoding='utf8') as f: s = "".join([str(i) for i in range(0, 16) for j in range(0, 100)]) f.write(s) self.nodes[0].assert_start_raises_init_error( [], 'ReadGenesisBlock: invalid genesis file', match=ErrorMatch.PARTIAL_REGEX) os.remove(genesisFile) self.log.info("Test incorrect genesis file - replace 2 bytes") self.writeGenesisBlockToFile(self.nodes[0].datadir) with open(genesisFile, 'r+', encoding='utf8') as f: content = f.readline() clen = len(content) content = content[:1000] + "0000" + content[1004:] assert (len(content) == clen) f.write(content) self.nodes[0].assert_start_raises_init_error( [], 'ReadGenesisBlock: invalid genesis file', match=ErrorMatch.PARTIAL_REGEX) os.remove(genesisFile) self.log.info("Test incorrect genesis file - insert 2 bytes") content = "" self.writeGenesisBlockToFile(self.nodes[0].datadir) with open(genesisFile, 'r+', encoding='utf8') as f: content = f.readline() clen = len(content) content = content[:2000] + "1111" + content[2000:] assert (len(content) == clen + 4) f.write(content) self.nodes[0].assert_start_raises_init_error( [], 'ReadGenesisBlock: invalid genesis file', match=ErrorMatch.PARTIAL_REGEX) os.remove(genesisFile) self.log.info("Test incorrect genesis file - remove 2 bytes") content = "" self.writeGenesisBlockToFile(self.nodes[0].datadir) with open(genesisFile, 'r+', encoding='utf8') as f: content = f.readline() clen = len(content) content = content[:100] + content[104:] assert (len(content) == clen - 4) f.write(content) self.nodes[0].assert_start_raises_init_error( [], 'ReadGenesisBlock: invalid genesis file', match=ErrorMatch.PARTIAL_REGEX) os.remove(genesisFile) self.log.info("Test incorrect genesis file - truncate file") self.writeGenesisBlockToFile(self.nodes[0].datadir) with open(genesisFile, 'r+', encoding='utf8') as f: f.truncate(500) self.nodes[0].assert_start_raises_init_error( [], 'CDataStream::read().*end of data', match=ErrorMatch.PARTIAL_REGEX) self.log.info( "Phase 3: Edit genesis file after sarting the blockchain") self.stop_node(0) shutil.rmtree(self.nodes[0].datadir) initialize_datadir(self.options.tmpdir, 0, self.signblockpubkeys, self.signblockthreshold) self.log.info("Starting node") self.writeGenesisBlockToFile(self.nodes[0].datadir) self.start_node(0) self.nodes[0].add_p2p_connection(P2PInterface()) self.log.info("Generating 10 blocks") blocks = self.nodes[0].generate(10, self.signblockprivkeys) self.sync_all([self.nodes[0:1]]) assert_equal(self.nodes[0].getbestblockhash(), blocks[-1]) self.stop_node(0) shutil.copytree(self.nodes[0].datadir, os.path.join(self.options.tmpdir, "backup")) self.log.info("Creating corrupt genesis file") with open(genesisFile, 'r+', encoding='utf8') as f: content = f.readline() clen = len(content) content = content[:1500] + "0000" + content[1504:] assert (len(content) == clen) f.write(content) self.nodes[0].assert_start_raises_init_error([]) self.log.info("Starting node again") self.genesisBlock = None self.writeGenesisBlockToFile(self.nodes[0].datadir) self.nodes[0].assert_start_raises_init_error( [], 'Error: Incorrect or no genesis block found.', match=ErrorMatch.PARTIAL_REGEX) self.log.info("Recovering original blockchain") shutil.rmtree(self.nodes[0].datadir) shutil.copytree(os.path.join(self.options.tmpdir, "backup"), self.nodes[0].datadir) self.start_node(0) self.nodes[0].add_p2p_connection(P2PInterface()) self.sync_all([self.nodes[0:1]]) assert_equal(self.nodes[0].getbestblockhash(), blocks[-1]) self.log.info("Blockchain intact!")
def run_test(self): self.setup_stake_coins(self.nodes[0]) self.address = self.nodes[0].getnewaddress() self.ms_address = self.nodes[0].addmultisigaddress( 1, [self.address])['address'] self.wit_address = self.nodes[0].addwitnessaddress(self.address) self.wit_ms_address = self.nodes[0].addmultisigaddress( 1, [self.address], '', 'p2sh-segwit')['address'] self.nodes[0].add_p2p_connection(P2PInterface()) self.coinbase_blocks = self.nodes[0].generate(2) # Block 2 coinbase_txid = [] for i in self.coinbase_blocks: coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0]) self.lastblockhash = self.nodes[0].getbestblockhash() self.tip = int("0x" + self.lastblockhash, 0) self.lastblockheight = self.nodes[0].getblockcount() self.lastblocktime = int(time.time()) + 2 ms_tx = create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, amount=PROPOSER_REWARD - 1) ms_txid = self.nodes[0].sendrawtransaction( bytes_to_hex_str(ms_tx.serialize_with_witness()), True) wit_ms_tx = create_transaction(self.nodes[0], coinbase_txid[1], self.wit_ms_address, amount=PROPOSER_REWARD - 1) wit_ms_txid = self.nodes[0].sendrawtransaction( bytes_to_hex_str(wit_ms_tx.serialize_with_witness()), True) self.send_block(self.nodes[0], [ms_tx, wit_ms_tx], True) self.log.info( "Test 1: Non-NULLDUMMY base multisig transaction is invalid") test1tx = create_transaction(self.nodes[0], ms_txid, self.address, amount=PROPOSER_REWARD - 2) test3txs = [CTransaction(test1tx)] trueDummy(test1tx) assert_raises_rpc_error( -26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test1tx.serialize_with_witness()), True) self.send_block(self.nodes[0], [test1tx]) self.log.info( "Test 2: Non-NULLDUMMY P2WSH multisig transaction invalid") test2tx = create_transaction(self.nodes[0], wit_ms_txid, self.wit_address, amount=PROPOSER_REWARD - 2) test3txs.append(CTransaction(test2tx)) test2tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01' assert_raises_rpc_error( -26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test2tx.serialize_with_witness()), True) self.send_block(self.nodes[0], [test2tx]) self.log.info( "Test 3: NULLDUMMY compliant base/witness transactions should be accepted to mempool" ) for i in test3txs: self.nodes[0].sendrawtransaction( bytes_to_hex_str(i.serialize_with_witness()), True) self.send_block(self.nodes[0], test3txs, True)
def run_test(self): # create a p2p receiver dspReceiver = P2PInterface() self.nodes[0].add_p2p_connection(dspReceiver) # workaround - nodes think they're in IBD unless one block is mined self.nodes[0].generate(1) self.sync_all() # Disconnect the third node, will be used later for triple-spend disconnect_nodes(self.nodes[1], self.nodes[2]) # Put fourth node (the non-dsproof-enabled node) with the connected group # (we will check its log at the end to ensure it ignored dsproof inv's) non_dsproof_node = self.nodes[3] disconnect_nodes(self.nodes[2], non_dsproof_node) connect_nodes(self.nodes[1], non_dsproof_node) # Create and mine a regular non-coinbase transaction for spending fundingtxid = self.nodes[0].getblock(self.nodes[0].getblockhash(1))['tx'][0] fundingtx = FromHex(CTransaction(), self.nodes[0].getrawtransaction(fundingtxid)) # Create three conflicting transactions. They are only signed, but not yet submitted to the mempool firstDSTx = create_raw_transaction(self.nodes[0], fundingtxid, self.nodes[0].getnewaddress(), 49.95) secondDSTx = create_raw_transaction(self.nodes[0], fundingtxid, self.nodes[0].getnewaddress(), 49.95) thirdDSTx = create_raw_transaction(self.nodes[0], fundingtxid, self.nodes[0].getnewaddress(), 49.95) # Send the two conflicting transactions to the network # Submit to two different nodes, because a node would simply reject # a double spend submitted through RPC firstDSTxId = self.nodes[0].sendrawtransaction(firstDSTx) self.nodes[1].call_rpc('sendrawtransaction', secondDSTx, ignore_error='txn-mempool-conflict') wait_until( lambda: dspReceiver.message_count["dsproof-beta"] == 1, lock=mininode_lock, timeout=25 ) # 1. The DSP message is well-formed and contains all fields # If the message arrived and was deserialized successfully, then 1. is satisfied dsp = dspReceiver.last_message["dsproof-beta"].dsproof dsps = set() dsps.add(dsp.serialize()) # Check that it is valid, both spends are signed with the same key # NB: pushData is made of the sig + one last byte for hashtype pubkey = self.getpubkey() sighash1 = getSighashes(dsp.getPrevOutput(), dsp.spender1, fundingtx) sighash2 = getSighashes(dsp.getPrevOutput(), dsp.spender2, fundingtx) assert(pubkey.verify_ecdsa(dsp.spender1.pushData[0][:-1], sighash1)) assert(pubkey.verify_ecdsa(dsp.spender2.pushData[0][:-1], sighash2)) # 2. For p2pkh these is exactly one pushdata per spender assert_equal(1, len(dsp.spender1.pushData)) assert_equal(1, len(dsp.spender2.pushData)) # 3. The two spenders are different, specifically the signature (push data) has to be different. assert(dsp.spender1.pushData != dsp.spender2.pushData) # 4. The first & double spenders are sorted with two hashes as keys. assert(dsp.spender1.hashOutputs < dsp.spender2.hashOutputs) # 5. The double spent output is still available in the UTXO database, # implying no spending transaction has been mined. assert_equal(self.nodes[0].gettransaction(firstDSTxId)["confirmations"], 0) # The original fundingtx is the same as the transaction being spent reported by the DSP assert_equal(hex(dsp.prevTxId)[2:], fundingtxid) assert_equal(dsp.prevOutIndex, 0) # 6. No other valid proof is known. # IE if a valid proof is known, no new proofs will be constructed # We submit a _triple_ spend transaction to the third node connect_nodes(self.nodes[0], self.nodes[2]) self.nodes[2].call_rpc('sendrawtransaction', thirdDSTx, ignore_error='txn-mempool-conflict') # Await for a new dsp to be relayed to the node # if such a dsp (or the double or triple spending tx) arrives, the test fails assert_raises( AssertionError, wait_until, lambda: dspReceiver.message_count["dsproof-beta"] == 2 or dspReceiver.message_count["tx"] == 2, lock=mininode_lock, timeout=5 ) # Only P2PKH inputs are protected # Check that a non-P2PKH output is not protected self.nodes[0].generate(1) fundingtxid = self.nodes[0].getblock(self.nodes[0].getblockhash(2))['tx'][0] fundingtx = FromHex(CTransaction(), self.nodes[0].getrawtransaction(fundingtxid)) fundingtx.rehash() nonP2PKHTx = create_tx_with_script(fundingtx, 0, b'', int(49.95 * COIN), CScript([OP_TRUE])) signedNonP2PKHTx = self.nodes[0].signrawtransactionwithwallet(ToHex(nonP2PKHTx)) self.nodes[0].sendrawtransaction(signedNonP2PKHTx['hex']) self.sync_all() tx = FromHex(CTransaction(), signedNonP2PKHTx['hex']) tx.rehash() firstDSTx = create_tx_with_script(tx, 0, b'', int(49.90 * COIN), CScript([OP_TRUE])) secondDSTx = create_tx_with_script(tx, 0, b'', int(49.90 * COIN), CScript([OP_FALSE])) self.nodes[0].sendrawtransaction(ToHex(firstDSTx)) self.nodes[1].call_rpc('sendrawtransaction', ToHex(secondDSTx), ignore_error='txn-mempool-conflict') assert_raises( AssertionError, wait_until, lambda: dspReceiver.message_count["dsproof-beta"] == 2, lock=mininode_lock, timeout=5 ) # Check that unconfirmed outputs are also protected self.nodes[0].generate(1) unconfirmedtx = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 25) self.sync_all() firstDSTx = create_raw_transaction(self.nodes[0], unconfirmedtx, self.nodes[0].getnewaddress(), 24.9) secondDSTx = create_raw_transaction(self.nodes[0], unconfirmedtx, self.nodes[0].getnewaddress(), 24.9) self.nodes[0].sendrawtransaction(firstDSTx) self.nodes[1].call_rpc('sendrawtransaction', secondDSTx, ignore_error='txn-mempool-conflict') wait_until( lambda: dspReceiver.message_count["dsproof-beta"] == 2, lock=mininode_lock, timeout=5 ) dsp2 = dspReceiver.last_message["dsproof-beta"].dsproof dsps.add(dsp2.serialize()) assert(len(dsps) == 2) # Check that a double spent tx, which has some non-P2PKH inputs # in its ancestor, still results in a dsproof being emitted. self.nodes[0].generate(1) # Create a 1-of-2 multisig address which will be an in-mempool # ancestor to a double-spent tx pubkey0 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress())['pubkey'] pubkey1 = self.nodes[1].getaddressinfo( self.nodes[1].getnewaddress())['pubkey'] p2sh = self.nodes[0].addmultisigaddress(1, [pubkey0, pubkey1], "")['address'] # Fund the p2sh address fundingtxid = self.nodes[0].sendtoaddress(p2sh, 49) vout = find_output(self.nodes[0], fundingtxid, Decimal('49')) self.sync_all() # Spend from the P2SH to a P2PKH, which we will double spend from # in the next step. p2pkh1 = self.nodes[0].getnewaddress() rawtx1 = create_raw_transaction(self.nodes[0], fundingtxid, p2pkh1, 48.999, vout) signed_tx1 = self.nodes[0].signrawtransactionwithwallet(rawtx1) txid1 = self.nodes[0].sendrawtransaction(signed_tx1['hex']) vout1 = find_output(self.nodes[0], txid1, Decimal('48.999')) self.sync_all() # Now double spend the P2PKH which has a P2SH ancestor. firstDSTx = create_raw_transaction(self.nodes[0], txid1, self.nodes[0].getnewaddress(), 48.9, vout1) secondDSTx = create_raw_transaction(self.nodes[0], txid1, self.nodes[1].getnewaddress(), 48.9, vout1) self.nodes[0].sendrawtransaction(firstDSTx) self.nodes[1].call_rpc('sendrawtransaction', secondDSTx, ignore_error='txn-mempool-conflict') # We still get a dsproof, showing that not all ancestors have # to be P2PKH. wait_until( lambda: dspReceiver.message_count["dsproof-beta"] == 3, lock=mininode_lock, timeout=5 ) dsp3 = dspReceiver.last_message["dsproof-beta"].dsproof dsps.add(dsp3.serialize()) assert(len(dsps) == 3) # Check that a double spent tx, which has some unconfirmed ANYONECANPAY # transactions in its ancestry, still results in a dsproof being emitted. self.nodes[0].generate(1) fundingtxid = self.nodes[0].getblock(self.nodes[0].getblockhash(5))['tx'][0] vout1 = find_output(self.nodes[0], fundingtxid, Decimal('50')) addr = self.nodes[1].getnewaddress() pubkey = self.nodes[1].getaddressinfo(addr)['pubkey'] inputs = [ {'txid': fundingtxid, 'vout': vout1, 'amount': 49.99, 'scriptPubKey': pubkey} ] outputs = {addr: 49.99} rawtx = self.nodes[0].createrawtransaction(inputs, outputs) signed = self.nodes[0].signrawtransactionwithwallet(rawtx, None, "NONE|FORKID|ANYONECANPAY") assert 'complete' in signed assert_equal(signed['complete'], True) assert 'errors' not in signed txid = self.nodes[0].sendrawtransaction(signed['hex']) self.sync_all() # The ANYONECANPAY is still unconfirmed, but let's create some # double spends from it. vout2 = find_output(self.nodes[0], txid, Decimal('49.99')) firstDSTx = create_raw_transaction(self.nodes[1], txid, self.nodes[0].getnewaddress(), 49.98, vout2) secondDSTx = create_raw_transaction(self.nodes[1], txid, self.nodes[1].getnewaddress(), 49.98, vout2) self.nodes[0].sendrawtransaction(firstDSTx) self.nodes[1].call_rpc('sendrawtransaction', secondDSTx, ignore_error='txn-mempool-conflict') # We get a dsproof. wait_until( lambda: dspReceiver.message_count["dsproof-beta"] == 4, lock=mininode_lock, timeout=5 ) dsp4 = dspReceiver.last_message["dsproof-beta"].dsproof dsps.add(dsp4.serialize()) assert(len(dsps) == 4) # Create a P2SH to double-spend directly (1-of-1 multisig) self.nodes[0].generate(1) self.sync_all() pubkey2 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress())['pubkey'] p2sh = self.nodes[0].addmultisigaddress(1, [pubkey2,], "")['address'] fundingtxid = self.nodes[0].sendtoaddress(p2sh, 49) vout = find_output(self.nodes[0], fundingtxid, Decimal('49')) self.sync_all() # Now double spend it firstDSTx = create_raw_transaction(self.nodes[0], fundingtxid, self.nodes[0].getnewaddress(), 48.9, vout) secondDSTx = create_raw_transaction(self.nodes[0], fundingtxid, self.nodes[1].getnewaddress(), 48.9, vout) self.nodes[0].sendrawtransaction(firstDSTx) self.nodes[1].call_rpc('sendrawtransaction', secondDSTx, ignore_error='txn-mempool-conflict') # No dsproof is generated. assert_raises( AssertionError, wait_until, lambda: dspReceiver.message_count["dsproof-beta"] == 5, lock=mininode_lock, timeout=5 ) # Check end conditions - still only 4 DSPs last_dsp = dspReceiver.last_message["dsproof-beta"].dsproof dsps.add(last_dsp.serialize()) assert(len(dsps) == 4) # Next, test that submitting a double-spend via the RPC interface also results in a broadcasted # dsproof self.nodes[0].generate(1) self.sync_all() fundingtxid = self.nodes[0].getblock(self.nodes[0].getblockhash(6))['tx'][0] # Create 2 new double-spends firstDSTx = create_raw_transaction(self.nodes[0], fundingtxid, self.nodes[0].getnewaddress(), 49.95) secondDSTx = create_raw_transaction(self.nodes[0], fundingtxid, self.nodes[0].getnewaddress(), 49.95) # Send the two conflicting transactions to the same node via RPC assert_equal(dspReceiver.message_count["dsproof-beta"], 4) self.nodes[0].sendrawtransaction(firstDSTx) # send second tx to same node via RPC # -- it's normal for it to reject the tx, but it should still generate a dsproof broadcast assert_raises_rpc_error( -26, "txn-mempool-conflict (code 18)", self.nodes[0].sendrawtransaction, secondDSTx ) wait_until( lambda: dspReceiver.message_count["dsproof-beta"] == 5, lock=mininode_lock, timeout=5 ) # Finally, ensure that the non-dsproof node has the messages we expect in its log # (this checks that dsproof was disabled for this node) debug_log = os.path.join(non_dsproof_node.datadir, 'regtest', 'debug.log') dsp_inv_ctr = 0 with open(debug_log, encoding='utf-8') as dl: for line in dl.readlines(): if "Got DSProof INV" in line: # Ensure that if this node did see a dsproof inv, it explicitly ignored it assert "(ignored, -doublespendproof=0)" in line dsp_inv_ctr += 1 else: # Ensure this node is not processing dsproof messages and not requesting them via getdata assert ("received: dsproof-beta" not in line and "Good DSP" not in line and "DSP broadcasting" not in line and "bad-dsproof" not in line) # We expect it to have received at least some DSP inv broadcasts assert_greater_than(dsp_inv_ctr, 0)
def run_test(self): SCHEME = self.options.scheme self.nodes[0].add_p2p_connection(P2PInterface()) self.log.info("Mining %d blocks", CLTV_HEIGHT - 1) self.coinbase_txids = [ self.nodes[0].getblock(b)['tx'][0] for b in self.nodes[0].generate(CLTV_HEIGHT - 1, self.signblockprivkeys) ] self.nodeaddress = self.nodes[0].getnewaddress() self.log.info( "Test that an invalid-according-to-CLTV transaction cannot appear in a block at any height" ) spendtx = create_transaction(self.nodes[0], self.coinbase_txids[0], self.nodeaddress, amount=1.0) cltv_invalidate(spendtx) spendtx.rehash() tip = self.nodes[0].getbestblockhash() block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 block = create_block(int(tip, 16), create_coinbase(CLTV_HEIGHT), block_time) block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.hashImMerkleRoot = block.calc_immutable_merkle_root() block.solve(self.signblockprivkeys) self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), tip) wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) with mininode_lock: assert self.nodes[0].p2p.last_message["reject"].code in [ REJECT_INVALID, REJECT_NONSTANDARD ] assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID: # Generic rejection when a block is invalid assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed') else: assert b'Negative locktime' in self.nodes[0].p2p.last_message[ "reject"].reason self.log.info("Test that blocks must now be at least version 4") #tip = block.sha256 block_time += 1 block = create_block(int(tip, 16), create_coinbase(CLTV_HEIGHT), block_time) block.solve(self.signblockprivkeys) spendtx = create_transaction(self.nodes[0], self.coinbase_txids[1], self.nodeaddress, amount=1.0) cltv_invalidate(spendtx) spendtx.rehash() # First we show that this tx is valid except for CLTV by getting it # rejected from the mempool for exactly that reason. # now CLTV is mandatory script flag. assert_equal([{ 'txid': spendtx.hashMalFix, 'allowed': False, 'reject-reason': '16: mandatory-script-verify-flag-failed (Negative locktime)' }], self.nodes[0].testmempoolaccept( rawtxs=[bytes_to_hex_str(spendtx.serialize())], allowhighfees=True)) # Now we verify that a block with this transaction is also invalid. block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.hashImMerkleRoot = block.calc_immutable_merkle_root() block.solve(self.signblockprivkeys) self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), tip) wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) with mininode_lock: assert self.nodes[0].p2p.last_message["reject"].code in [ REJECT_INVALID, REJECT_NONSTANDARD ] assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID: # Generic rejection when a block is invalid assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed') else: assert b'Negative locktime' in self.nodes[0].p2p.last_message[ "reject"].reason self.log.info( "Test that a version 4 block with a valid-according-to-CLTV transaction is accepted" ) spendtx = cltv_validate(self.nodes[0], spendtx, CLTV_HEIGHT - 1) spendtx.rehash() block.vtx.pop(1) block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.hashImMerkleRoot = block.calc_immutable_merkle_root() block.solve(self.signblockprivkeys) self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
def run_test(self): self.nodes[0].add_p2p_connection(P2PInterface()) self.nodes[0].p2p.wait_for_verack() self.log.info("Mining %d blocks", CLTV_HEIGHT - 2) self.coinbase_blocks = self.nodes[0].generate(CLTV_HEIGHT - 2) self.nodeaddress = self.nodes[0].getnewaddress() self.log.info( "Test that an invalid-according-to-CLTV transaction can still appear in a block" ) spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[0], self.nodeaddress, 1.0) cltv_invalidate(spendtx) spendtx.rehash() tip = self.nodes[0].getbestblockhash() block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 block = create_block(int(tip, 16), create_coinbase(CLTV_HEIGHT - 1), block_time) block.nVersion = 4 block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), block.hash) self.nodes[0].generate(205) self.log.info("Test that blocks must now be at least version 5") tip = self.nodes[0].getbestblockhash() block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 block = create_block(int(tip, 16), create_coinbase(CLTV_HEIGHT + 205), block_time) block.nVersion = 4 block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) with mininode_lock: assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE) assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'bad-version') assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) del self.nodes[0].p2p.last_message["reject"] self.log.info( "Test that invalid-according-to-cltv transactions cannot appear in a block" ) block.nVersion = 5 spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1], self.nodeaddress, 1.0) cltv_invalidate(spendtx) spendtx.rehash() # Verify that a block with this transaction is invalid. block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) with mininode_lock: assert self.nodes[0].p2p.last_message["reject"].code in [ REJECT_INVALID, REJECT_NONSTANDARD ] assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID: # Generic rejection when a block is invalid assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed') else: assert b'Negative locktime' in self.nodes[0].p2p.last_message[ "reject"].reason self.log.info( "Test that a version 5 block with a valid-according-to-CLTV transaction is accepted" ) spendtx = cltv_validate(self.nodes[0], spendtx, CLTV_HEIGHT - 1) spendtx.rehash() block.vtx.pop(1) block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
def run_test(self): self.nodes[0].add_p2p_connection(P2PInterface()) network_thread_start() # wait_for_verack ensures that the P2P connection is fully up. self.nodes[0].p2p.wait_for_verack() self.log.info("Mining {} blocks".format(CLTV_HEIGHT - 2)) self.coinbase_blocks = self.nodes[0].generate(CLTV_HEIGHT - 2) self.nodeaddress = self.nodes[0].getnewaddress() self.log.info( "Test that an invalid-according-to-CLTV transaction can still appear in a block" ) spendtx = spend_from_coinbase(self.nodes[0], self.coinbase_blocks[0], self.nodeaddress, 50.0) spendtx = cltv_lock_to_height(self.nodes[0], spendtx) # Make sure the tx is valid self.nodes[0].sendrawtransaction(ToHex(spendtx)) tip = self.nodes[0].getbestblockhash() block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 block = create_block(int(tip, 16), create_coinbase(CLTV_HEIGHT - 1), block_time) block.nVersion = 3 block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), block.hash) self.log.info("Test that blocks must now be at least version 4") tip = block.sha256 block_time += 1 block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time) block.nVersion = 3 block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) with mininode_lock: assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE) assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'bad-version(0x00000003)') assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) del self.nodes[0].p2p.last_message["reject"] self.log.info( "Test that invalid-according-to-cltv transactions cannot appear in a block" ) block.nVersion = 4 spendtx = spend_from_coinbase(self.nodes[0], self.coinbase_blocks[1], self.nodeaddress, 49.99) spendtx = cltv_lock_to_height(self.nodes[0], spendtx) # First we show that this tx is valid except for CLTV by getting it # accepted to the mempool (which we can achieve with # -promiscuousmempoolflags). self.nodes[0].p2p.send_and_ping(msg_tx(spendtx)) assert spendtx.hash in self.nodes[0].getrawmempool() # Mine a block containing the funding transaction block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) # This block is valid assert_equal(self.nodes[0].getbestblockhash(), block.hash) # But a block containing a transaction spending this utxo is not rawspendtx = self.nodes[0].decoderawtransaction(ToHex(spendtx)) inputs = [{ "txid": rawspendtx['txid'], "vout": rawspendtx['vout'][0]['n'] }] output = {self.nodeaddress: 49.98} rejectedtx_raw = self.nodes[0].createrawtransaction(inputs, output) rejectedtx_signed = self.nodes[0].signrawtransaction(rejectedtx_raw) # Couldn't complete signature due to CLTV assert (rejectedtx_signed['errors'][0]['error'] == 'Negative locktime') rejectedtx = FromHex(CTransaction(), rejectedtx_signed['hex']) pad_tx(rejectedtx) rejectedtx.rehash() tip = block.hash block_time += 1 block = create_block(block.sha256, create_coinbase(CLTV_HEIGHT + 1), block_time) block.nVersion = 4 block.vtx.append(rejectedtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) # This block is invalid assert_equal(self.nodes[0].getbestblockhash(), tip) wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) with mininode_lock: assert self.nodes[0].p2p.last_message["reject"].code in [ REJECT_INVALID, REJECT_NONSTANDARD ] assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID: # Generic rejection when a block is invalid assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'blk-bad-inputs') else: assert b'Negative locktime' in self.nodes[0].p2p.last_message[ "reject"].reason self.log.info( "Test that a version 4 block with a valid-according-to-CLTV transaction is accepted" ) spendtx = spend_from_coinbase(self.nodes[0], self.coinbase_blocks[2], self.nodeaddress, 49.99) spendtx = cltv_lock_to_height(self.nodes[0], spendtx, CLTV_HEIGHT - 1) # Modify the transaction in the block to be valid against CLTV block.vtx.pop(1) block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) # This block is now valid assert_equal(self.nodes[0].getbestblockhash(), block.hash) # A block containing a transaction spending this utxo is also valid # Build this transaction rawspendtx = self.nodes[0].decoderawtransaction(ToHex(spendtx)) inputs = [{ "txid": rawspendtx['txid'], "vout": rawspendtx['vout'][0]['n'], "sequence": 0 }] output = {self.nodeaddress: 49.98} validtx_raw = self.nodes[0].createrawtransaction( inputs, output, CLTV_HEIGHT) validtx = FromHex(CTransaction(), validtx_raw) # Signrawtransaction won't sign a non standard tx. # But the prevout being anyone can spend, scriptsig can be left empty validtx.vin[0].scriptSig = CScript() pad_tx(validtx) validtx.rehash() tip = block.sha256 block_time += 1 block = create_block(tip, create_coinbase(CLTV_HEIGHT + 3), block_time) block.nVersion = 4 block.vtx.append(validtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) # This block is valid assert_equal(self.nodes[0].getbestblockhash(), block.hash)
def run_test(self): self.nodes[0].add_p2p_connection(P2PInterface()) self.log.info("Mining %d blocks", CLTV_HEIGHT - 2) self.coinbase_txids = [self.nodes[0].getblock(b)['tx'][0] for b in self.nodes[0].generate(CLTV_HEIGHT - 2)] self.nodeaddress = self.nodes[0].getnewaddress() self.log.info("Test that an invalid-according-to-CLTV transaction can still appear in a block") spendtx = create_transaction(self.nodes[0], self.coinbase_txids[0], self.nodeaddress, amount=1.0) cltv_invalidate(spendtx) spendtx.rehash() tip = self.nodes[0].getbestblockhash() block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 block = create_block(int(tip, 16), create_coinbase(CLTV_HEIGHT - 1), block_time) block.nVersion = 3 block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), block.hash) self.log.info("Test that blocks must now be at least version 4") tip = block.sha256 block_time += 1 block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time) block.nVersion = 3 block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) with mininode_lock: assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE) assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'bad-version(0x00000003)') assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) del self.nodes[0].p2p.last_message["reject"] self.log.info("Test that invalid-according-to-cltv transactions cannot appear in a block") block.nVersion = 4 spendtx = create_transaction(self.nodes[0], self.coinbase_txids[1], self.nodeaddress, amount=1.0) cltv_invalidate(spendtx) spendtx.rehash() # First we show that this tx is valid except for CLTV by getting it # rejected from the mempool for exactly that reason. assert_equal( [{'txid': spendtx.hash, 'allowed': False, 'reject-reason': '64: non-mandatory-script-verify-flag (Negative locktime)'}], self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()], maxfeerate=0) ) # Now we verify that a block with this transaction is also invalid. block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) with mininode_lock: assert self.nodes[0].p2p.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD] assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID: # Generic rejection when a block is invalid assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed') else: assert b'Negative locktime' in self.nodes[0].p2p.last_message["reject"].reason self.log.info("Test that a version 4 block with a valid-according-to-CLTV transaction is accepted") spendtx = cltv_validate(self.nodes[0], spendtx, CLTV_HEIGHT - 1) spendtx.rehash() block.vtx.pop(1) block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
def run_test(self): # Node 0 supports COMPACT_FILTERS, node 1 does not. node0 = self.nodes[0].add_p2p_connection(P2PInterface()) node1 = self.nodes[1].add_p2p_connection(P2PInterface()) # Nodes 0 & 1 share the same first 999 blocks in the chain. self.nodes[0].generate(999) self.sync_blocks(timeout=600) # Stale blocks by disconnecting nodes 0 & 1, mining, then reconnecting disconnect_nodes(self.nodes[0], 1) self.nodes[0].generate(1) wait_until(lambda: self.nodes[0].getblockcount() == 1000) stale_block_hash = self.nodes[0].getblockhash(1000) self.nodes[1].generate(1001) wait_until(lambda: self.nodes[1].getblockcount() == 2000) self.log.info("get cfcheckpt on chain to be re-orged out.") request = msg_getcfcheckpt( filter_type=FILTER_TYPE_BASIC, stop_hash=int(stale_block_hash, 16) ) node0.send_and_ping(message=request) response = node0.last_message['cfcheckpt'] assert_equal(response.filter_type, request.filter_type) assert_equal(response.stop_hash, request.stop_hash) assert_equal(len(response.headers), 1) self.log.info("Reorg node 0 to a new chain.") connect_nodes(self.nodes[0], 1) self.sync_blocks(timeout=600) main_block_hash = self.nodes[0].getblockhash(1000) assert main_block_hash != stale_block_hash, "node 0 chain did not reorganize" self.log.info("Check that peers can fetch cfcheckpt on active chain.") tip_hash = self.nodes[0].getbestblockhash() request = msg_getcfcheckpt( filter_type=FILTER_TYPE_BASIC, stop_hash=int(tip_hash, 16) ) node0.send_and_ping(request) response = node0.last_message['cfcheckpt'] assert_equal(response.filter_type, request.filter_type) assert_equal(response.stop_hash, request.stop_hash) main_cfcheckpt = self.nodes[0].getblockfilter(main_block_hash, 'basic')['header'] tip_cfcheckpt = self.nodes[0].getblockfilter(tip_hash, 'basic')['header'] assert_equal( response.headers, [int(header, 16) for header in (main_cfcheckpt, tip_cfcheckpt)] ) self.log.info("Check that peers can fetch cfcheckpt on stale chain.") request = msg_getcfcheckpt( filter_type=FILTER_TYPE_BASIC, stop_hash=int(stale_block_hash, 16) ) node0.send_and_ping(request) response = node0.last_message['cfcheckpt'] stale_cfcheckpt = self.nodes[0].getblockfilter(stale_block_hash, 'basic')['header'] assert_equal( response.headers, [int(header, 16) for header in (stale_cfcheckpt,)] ) self.log.info("Requests to node 1 without NODE_COMPACT_FILTERS results in disconnection.") requests = [ msg_getcfcheckpt( filter_type=FILTER_TYPE_BASIC, stop_hash=int(main_block_hash, 16) ), ] for request in requests: node1 = self.nodes[1].add_p2p_connection(P2PInterface()) node1.send_message(request) node1.wait_for_disconnect() self.log.info("Check that invalid requests result in disconnection.") requests = [ # Requesting unknown filter type results in disconnection. msg_getcfcheckpt( filter_type=255, stop_hash=int(main_block_hash, 16) ), # Requesting unknown hash results in disconnection. msg_getcfcheckpt( filter_type=FILTER_TYPE_BASIC, stop_hash=123456789, ), ] for request in requests: node0 = self.nodes[0].add_p2p_connection(P2PInterface()) node0.send_message(request) node0.wait_for_disconnect()
def run_test(self): self.nodes[0].add_p2p_connection(P2PInterface()) self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2) self.coinbase_txids = [self.nodes[0].getblock(b)['tx'][0] for b in self.nodes[0].generate(DERSIG_HEIGHT - 2)] self.nodeaddress = self.nodes[0].getnewaddress() self.log.info("Test that a transaction with non-DER signature can still appear in a block") spendtx = create_transaction(self.nodes[0], self.coinbase_txids[0], self.nodeaddress, amount=1.0) unDERify(spendtx) spendtx.rehash() tip = self.nodes[0].getbestblockhash() block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT - 1), block_time) block.nVersion = 2 block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), block.hash) self.log.info("Test that blocks must now be at least version 3") tip = block.sha256 block_time += 1 block = create_block(tip, create_coinbase(DERSIG_HEIGHT), block_time) block.nVersion = 2 block.rehash() block.solve() with self.nodes[0].assert_debug_log(expected_msgs=['{}, bad-version(0x00000002)'.format(block.hash)]): self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) self.nodes[0].p2p.sync_with_ping() self.log.info("Test that transactions with non-DER signatures cannot appear in a block") block.nVersion = 3 spendtx = create_transaction(self.nodes[0], self.coinbase_txids[1], self.nodeaddress, amount=1.0) unDERify(spendtx) spendtx.rehash() # First we show that this tx is valid except for DERSIG by getting it # rejected from the mempool for exactly that reason. assert_equal( [{'txid': spendtx.hash, 'allowed': False, 'reject-reason': '64: non-mandatory-script-verify-flag (Non-canonical DER signature)'}], self.nodes[0].testmempoolaccept(rawtxs=[bytes_to_hex_str(spendtx.serialize())], allowhighfees=True) ) # Now we verify that a block with this transaction is also invalid. block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() with self.nodes[0].assert_debug_log(expected_msgs=['CheckInputs on {} failed with non-mandatory-script-verify-flag (Non-canonical DER signature)'.format(block.vtx[-1].hash)]): self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) self.nodes[0].p2p.sync_with_ping() wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) with mininode_lock: assert self.nodes[0].p2p.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD] assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256) assert b'Non-canonical DER signature' in self.nodes[0].p2p.last_message["reject"].reason self.log.info("Test that a version 3 block with a DERSIG-compliant transaction is accepted") block.vtx[1] = create_transaction(self.nodes[0], self.coinbase_txids[1], self.nodeaddress, amount=1.0) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
def run_test(self): self.nodes[0].add_p2p_connection(P2PInterface()) self.log.info("Mining {} blocks".format(CLTV_HEIGHT - 2)) self.coinbase_txids = [ self.nodes[0].getblock(b)['tx'][0] for b in self.nodes[0].generate(CLTV_HEIGHT - 2) ] self.nodeaddress = self.nodes[0].getnewaddress() self.log.info( "Test that an invalid-according-to-CLTV transaction can still appear in a block" ) fundtx = create_transaction(self.nodes[0], self.coinbase_txids[0], self.nodeaddress, 49.99) fundtx, spendtx = cltv_lock_to_height(self.nodes[0], fundtx, self.nodeaddress, 49.98) tip = self.nodes[0].getbestblockhash() block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 block = create_block(int(tip, 16), create_coinbase(CLTV_HEIGHT - 1), block_time) block.nVersion = 3 block.vtx.append(fundtx) # include the -1 CLTV in block block.vtx.append(spendtx) make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) # This block is valid assert_equal(self.nodes[0].getbestblockhash(), block.hash) self.log.info("Test that blocks must now be at least version 4") tip = block.sha256 block_time += 1 block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time) block.nVersion = 3 block.solve() with self.nodes[0].assert_debug_log(expected_msgs=[ '{}, bad-version(0x00000003)'.format(block.hash) ]): self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) self.nodes[0].p2p.sync_with_ping() self.log.info( "Test that invalid-according-to-cltv transactions cannot appear in a block" ) block.nVersion = 4 fundtx = create_transaction(self.nodes[0], self.coinbase_txids[1], self.nodeaddress, 49.99) fundtx, spendtx = cltv_lock_to_height(self.nodes[0], fundtx, self.nodeaddress, 49.98) # The funding tx only has unexecuted bad CLTV, in scriptpubkey; this is # valid. self.nodes[0].p2p.send_and_ping(msg_tx(fundtx)) assert fundtx.hash in self.nodes[0].getrawmempool() # Mine a block containing the funding transaction block.vtx.append(fundtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) # This block is valid assert_equal(self.nodes[0].getbestblockhash(), block.hash) # We show that this tx is invalid due to CLTV by getting it # rejected from the mempool for exactly that reason. assert_equal([{ 'txid': spendtx.hash, 'allowed': False, 'reject-reason': '64: non-mandatory-script-verify-flag (Negative locktime)' }], self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()], maxfeerate=0)) rejectedtx_signed = self.nodes[0].signrawtransactionwithwallet( ToHex(spendtx)) # Couldn't complete signature due to CLTV assert rejectedtx_signed['errors'][0]['error'] == 'Negative locktime' tip = block.hash block_time += 1 block = create_block(block.sha256, create_coinbase(CLTV_HEIGHT + 1), block_time) block.nVersion = 4 block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() with self.nodes[0].assert_debug_log(expected_msgs=[ 'ConnectBlock {} failed, blk-bad-inputs'.format(block.hash) ]): self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), tip) self.nodes[0].p2p.sync_with_ping() self.log.info( "Test that a version 4 block with a valid-according-to-CLTV transaction is accepted" ) fundtx = create_transaction(self.nodes[0], self.coinbase_txids[2], self.nodeaddress, 49.99) fundtx, spendtx = cltv_lock_to_height(self.nodes[0], fundtx, self.nodeaddress, 49.98, CLTV_HEIGHT) # make sure sequence is nonfinal and locktime is good spendtx.vin[0].nSequence = 0xfffffffe spendtx.nLockTime = CLTV_HEIGHT # both transactions are fully valid self.nodes[0].sendrawtransaction(ToHex(fundtx)) self.nodes[0].sendrawtransaction(ToHex(spendtx)) # Modify the transactions in the block to be valid against CLTV block.vtx.pop(1) block.vtx.append(fundtx) block.vtx.append(spendtx) make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) # This block is now valid assert_equal(self.nodes[0].getbestblockhash(), block.hash)
def run_test(self): self.nodes[0].add_p2p_connection(P2PInterface()) self.test_cltv_info(is_active=False) self.log.info("Mining %d blocks", CLTV_HEIGHT - 2) self.coinbase_txids = [ self.nodes[0].getblock(b)['tx'][0] for b in self.nodes[0].generate(CLTV_HEIGHT - 2) ] self.nodeaddress = self.nodes[0].getnewaddress() self.log.info( "Test that an invalid-according-to-CLTV transaction can still appear in a block" ) spendtx = create_transaction(self.nodes[0], self.coinbase_txids[0], self.nodeaddress, amount=1.0) cltv_invalidate(spendtx) spendtx.rehash() tip = self.nodes[0].getbestblockhash() block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 block = create_block(self.nodes[0], int(tip, 16), create_coinbase(CLTV_HEIGHT - 1), block_time) block.nVersion = 3 block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.test_cltv_info( is_active=False ) # Not active as of current tip and next block does not need to obey rules self.nodes[0].p2p.send_and_ping(msg_block(block)) self.test_cltv_info( is_active=True ) # Not active as of current tip, but next block must obey rules assert_equal(self.nodes[0].getbestblockhash(), block.hash) self.log.info("Test that blocks must now be at least version 4") tip = block.sha256 block_time += 1 block = create_block(self.nodes[0], tip, create_coinbase(CLTV_HEIGHT), block_time) block.nVersion = 3 block.solve() with self.nodes[0].assert_debug_log(expected_msgs=[ '{}, bad-version(0x00000003)'.format(block.hash) ]): self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) self.nodes[0].p2p.sync_with_ping() self.log.info( "Test that invalid-according-to-cltv transactions cannot appear in a block" ) block.nVersion = 4 spendtx = create_transaction(self.nodes[0], self.coinbase_txids[1], self.nodeaddress, amount=1.0) cltv_invalidate(spendtx) spendtx.rehash() # First we show that this tx is valid except for CLTV by getting it # rejected from the mempool for exactly that reason. assert_equal([{ 'txid': spendtx.hash, 'allowed': False, 'reject-reason': 'non-mandatory-script-verify-flag (Negative locktime)' }], self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()], maxfeerate=0)) # Now we verify that a block with this transaction is also invalid. block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() with self.nodes[0].assert_debug_log(expected_msgs=[ 'CheckInputs on {} failed with non-mandatory-script-verify-flag (Negative locktime)' .format(block.vtx[-1].hash) ]): self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) self.nodes[0].p2p.sync_with_ping() self.log.info( "Test that a version 4 block with a valid-according-to-CLTV transaction is accepted" ) spendtx = cltv_validate(self.nodes[0], spendtx, CLTV_HEIGHT - 1) spendtx.rehash() block.vtx.pop(1) block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.test_cltv_info( is_active=True ) # Not active as of current tip, but next block must obey rules self.nodes[0].p2p.send_and_ping(msg_block(block)) self.test_cltv_info(is_active=True) # Active as of current tip assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
def run_test(self): self.nodes[0].add_p2p_connection(P2PInterface()) self.log.info("Mining {} blocks".format(DERSIG_HEIGHT - 1)) self.coinbase_txids = [ self.nodes[0].getblock(b)['tx'][0] for b in self.nodes[0].generate(DERSIG_HEIGHT - 1) ] self.nodeaddress = self.nodes[0].getnewaddress() self.log.info("Test that blocks must now be at least version 3") tip = self.nodes[0].getbestblockhash() block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT), block_time) block.nVersion = 2 block.rehash() block.solve() with self.nodes[0].assert_debug_log(expected_msgs=[ '{}, bad-version(0x00000002)'.format(block.hash) ]): self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), tip) self.nodes[0].p2p.sync_with_ping() self.log.info( "Test that transactions with non-DER signatures cannot appear in a block" ) block.nVersion = 3 spendtx = create_transaction(self.nodes[0], self.coinbase_txids[1], self.nodeaddress, amount=1.0) unDERify(spendtx) spendtx.rehash() # First we show that this tx is valid except for DERSIG by getting it # rejected from the mempool for exactly that reason. assert_equal([{ 'txid': spendtx.hash, 'allowed': False, 'reject-reason': 'mandatory-script-verify-flag-failed (Non-canonical DER signature)' }], self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()], maxfeerate=0)) # Now we verify that a block with this transaction is also invalid. block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() with self.nodes[0].assert_debug_log(expected_msgs=[ 'ConnectBlock {} failed, blk-bad-inputs'.format(block.hash) ]): self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), tip) self.nodes[0].p2p.sync_with_ping() self.log.info( "Test that a version 3 block with a DERSIG-compliant transaction is accepted" ) block.vtx[1] = create_transaction(self.nodes[0], self.coinbase_txids[1], self.nodeaddress, amount=1.0) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
def run_test(self): self.setup_stake_coins(*self.nodes) # Setup the p2p connections # test_node connects to node0 (not whitelisted) test_node = self.nodes[0].add_p2p_connection(P2PInterface()) # min_work_node connects to node1 (whitelisted) min_work_node = self.nodes[1].add_p2p_connection(P2PInterface()) fork_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) utxo_manager = UTXOManager(self.nodes[0], fork_snapshot_meta) genesis_coin = get_unspent_coins(self.nodes[0], 1)[0] genesis_txout = CTxOut( int(genesis_coin['amount'] * UNIT), CScript(hex_str_to_bytes(genesis_coin['scriptPubKey']))) genesis_utxo = [ UTXO( 0, TxType.COINBASE, COutPoint(int(genesis_coin['txid'], 16), genesis_coin['vout']), genesis_txout) ] utxo_manager.available_outputs = genesis_utxo self.log.info("1. Have nodes mine a block (leave IBD)") [n.generate(1) for n in self.nodes] tips = [int("0x" + n.getbestblockhash(), 0) for n in self.nodes] tip_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) self.log.info( "2. Send one block that builds on each tip. This should be accepted by node0." ) blocks_h2 = [] # the height 2 blocks on each node's chain block_time = int(time.time()) + 1 coin = get_unspent_coins(self.nodes[0], 1)[0] for i in range(2): coinbase = sign_coinbase( self.nodes[0], create_coinbase(2, coin, tip_snapshot_meta.hash)) blocks_h2.append(create_block(tips[i], coinbase, block_time)) blocks_h2[i].solve() block_time += 1 test_node.send_message(msg_block(blocks_h2[0])) min_work_node.send_message(msg_block(blocks_h2[1])) for x in [test_node, min_work_node]: x.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) assert_equal(self.nodes[1].getblockcount(), 1) self.log.info( "First height 2 block accepted by node0; correctly rejected by node1" ) self.log.info("3. Send another block that builds on genesis.") coinbase = utxo_manager.get_coinbase(1, n_pieces=300) block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), coinbase, block_time) block_time += 1 block_h1f.solve() test_node.send_message(msg_block(block_h1f)) utxo_manager.process(coinbase, 1) test_node.sync_with_ping() tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h1f.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert tip_entry_found assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash) self.log.info("4. Send another two block that build on the fork.") coinbase = utxo_manager.get_coinbase(2) block_h2f = create_block(block_h1f.sha256, coinbase, block_time) block_time += 1 block_h2f.solve() test_node.send_message(msg_block(block_h2f)) utxo_manager.process(coinbase, 2) test_node.sync_with_ping() # Since the earlier block was not processed by node, the new block # can't be fully validated. tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h2f.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert tip_entry_found # But this block should be accepted by node since it has equal work. self.nodes[0].getblock(block_h2f.hash) self.log.info("Second height 2 block accepted, but not reorg'ed to") self.log.info( "4b. Now send another block that builds on the forking chain.") coinbase = utxo_manager.get_coinbase(3) block_h3 = create_block(block_h2f.sha256, coinbase, block_h2f.nTime + 1) block_h3.solve() test_node.send_message(msg_block(block_h3)) utxo_manager.process(coinbase, 3) test_node.sync_with_ping() # Since the earlier block was not processed by node, the new block # can't be fully validated. tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h3.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert tip_entry_found self.nodes[0].getblock(block_h3.hash) # But this block should be accepted by node since it has more work. self.nodes[0].getblock(block_h3.hash) self.log.info("Unrequested more-work block accepted") self.log.info("4c. Now mine 288 more blocks and deliver") # all should be processed but # the last (height-too-high) on node (as long as it is not missing any headers) tip = block_h3 all_blocks = [] for height in range(4, 292): coinbase = utxo_manager.get_coinbase(height) next_block = create_block(tip.sha256, coinbase, tip.nTime + 1) next_block.solve() all_blocks.append(next_block) tip = next_block utxo_manager.process(coinbase, height) # Now send the block at height 5 and check that it wasn't accepted (missing header) test_node.send_message(msg_block(all_blocks[1])) test_node.sync_with_ping() assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash) assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash) # The block at height 5 should be accepted if we provide the missing header, though headers_message = msg_headers() headers_message.headers.append(CBlockHeader(all_blocks[0])) test_node.send_message(headers_message) test_node.send_message(msg_block(all_blocks[1])) test_node.sync_with_ping() self.nodes[0].getblock(all_blocks[1].hash) # Now send the blocks in all_blocks for i in range(288): test_node.send_message(msg_block(all_blocks[i])) test_node.sync_with_ping() # Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead for x in all_blocks[:-1]: self.nodes[0].getblock(x.hash) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash) self.log.info( "5. Test handling of unrequested block on the node that didn't process" ) # Should still not be processed (even though it has a child that has more # work). # The node should have requested the blocks at some point, so # disconnect/reconnect first self.nodes[0].disconnect_p2ps() self.nodes[1].disconnect_p2ps() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) self.log.info( "Unrequested block that would complete more-work chain was ignored" ) self.log.info("6. Try to get node to request the missing block.") # Poke the node with an inv for block at height 3 and see if that # triggers a getdata on block 2 (it should if block 2 is missing). with mininode_lock: # Clear state so we can check the getdata request test_node.last_message.pop("getdata", None) test_node.send_message(msg_inv([CInv(2, block_h3.sha256)])) test_node.sync_with_ping() with mininode_lock: getdata = test_node.last_message["getdata"] # Check that the getdata includes the right block assert_equal(getdata.inv[0].hash, block_h1f.sha256) self.log.info("Inv at tip triggered getdata for unprocessed block") self.log.info( "7. Send the missing block for the third time (now it is requested)" ) test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 290) self.nodes[0].getblock(all_blocks[286].hash) assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash) self.log.info( "Successfully reorged to longer chain from non-whitelisted peer") self.log.info( "8. Create a chain which is invalid at a height longer than the") # current chain, but which has more blocks on top of that # Reset utxo managers to current state utxo_fork_manager = UTXOManager(self.nodes[0], get_tip_snapshot_meta(self.nodes[0])) utxo_fork_manager.available_outputs = utxo_manager.available_outputs utxo_manager = UTXOManager(self.nodes[0], get_tip_snapshot_meta(self.nodes[0])) utxo_manager.available_outputs = utxo_fork_manager.available_outputs # Create one block on top of the valid chain coinbase = utxo_manager.get_coinbase(291) valid_block = create_block(all_blocks[286].sha256, coinbase, all_blocks[286].nTime + 1) valid_block.solve() test_node.send_and_ping(msg_block(valid_block)) assert_equal(self.nodes[0].getblockcount(), 291) # Create three blocks on a fork, but make the second one invalid coinbase = utxo_fork_manager.get_coinbase(291) block_291f = create_block(all_blocks[286].sha256, coinbase, all_blocks[286].nTime + 1) block_291f.solve() utxo_fork_manager.process(coinbase, 291) coinbase = utxo_fork_manager.get_coinbase(292) block_292f = create_block(block_291f.sha256, coinbase, block_291f.nTime + 1) # block_292f spends a coinbase below maturity! block_292f.vtx.append( create_tx_with_script(block_291f.vtx[0], 0, script_sig=b"42", amount=1)) block_292f.compute_merkle_trees() block_292f.solve() utxo_fork_manager.process(coinbase, 292) utxo_fork_manager.process(block_292f.vtx[1], 292) coinbase = utxo_fork_manager.get_coinbase(293) block_293f = create_block(block_292f.sha256, coinbase, block_292f.nTime + 1) block_293f.solve() utxo_fork_manager.process(coinbase, 293) # Now send all the headers on the chain and enough blocks to trigger reorg headers_message = msg_headers() headers_message.headers.append(CBlockHeader(block_291f)) headers_message.headers.append(CBlockHeader(block_292f)) headers_message.headers.append(CBlockHeader(block_293f)) test_node.send_message(headers_message) test_node.sync_with_ping() tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_293f.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert tip_entry_found assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_293f.hash) test_node.send_message(msg_block(block_291f)) test_node.sync_with_ping() self.nodes[0].getblock(block_291f.hash) test_node.send_message(msg_block(block_292f)) # At this point we've sent an obviously-bogus block, wait for full processing # without assuming whether we will be disconnected or not try: # Only wait a short while so the test doesn't take forever if we do get # disconnected test_node.sync_with_ping(timeout=1) except AssertionError: test_node.wait_for_disconnect() self.nodes[0].disconnect_p2ps() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) # We should have failed reorg and switched back to 290 (but have block 291) assert_equal(self.nodes[0].getblockcount(), 291) assert_equal(self.nodes[0].getbestblockhash(), valid_block.hash) assert_equal(self.nodes[0].getblock(block_292f.hash)["confirmations"], -1) # Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected coinbase = utxo_fork_manager.get_coinbase(294) block_294f = create_block(block_293f.sha256, coinbase, block_293f.nTime + 1) block_294f.solve() headers_message = msg_headers() headers_message.headers.append(CBlockHeader(block_294f)) test_node.send_message(headers_message) test_node.wait_for_disconnect() self.log.info( "9. Connect node1 to node0 and ensure it is able to sync") connect_nodes(self.nodes[0], 1) sync_blocks([self.nodes[0], self.nodes[1]]) self.log.info("Successfully synced nodes 1 and 0")
def test_service_flags(self): self.nodes[0].add_p2p_connection(P2PInterface(), services=(1 << 4) | (1 << 63)) assert_equal(['UNKNOWN[2^4]', 'UNKNOWN[2^63]'], self.nodes[0].getpeerinfo()[-1]['servicesnames']) self.nodes[0].disconnect_p2ps()
def run_test(self): # Setup the p2p connections # test_node connects to node0 (not whitelisted) test_node = self.nodes[0].add_p2p_connection(P2PInterface()) # min_work_node connects to node1 (whitelisted) min_work_node = self.nodes[1].add_p2p_connection(P2PInterface()) # 1. Have nodes mine a block (leave IBD) [ n.generate(1) for n in self.nodes ] tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ] # 2. Send one block that builds on each tip. # This should be accepted by node0 blocks_h2 = [] # the height 2 blocks on each node's chain block_time = int(time.time()) + 1 for i in range(2): blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time)) blocks_h2[i].solve() block_time += 1 test_node.send_message(msg_block(blocks_h2[0])) min_work_node.send_message(msg_block(blocks_h2[1])) for x in [test_node, min_work_node]: x.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) assert_equal(self.nodes[1].getblockcount(), 1) self.log.info("First height 2 block accepted by node0; correctly rejected by node1") # 3. Send another block that builds on genesis. block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time) block_time += 1 block_h1f.solve() test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h1f.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert(tip_entry_found) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash) # 4. Send another two block that build on the fork. block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time) block_time += 1 block_h2f.solve() test_node.send_message(msg_block(block_h2f)) test_node.sync_with_ping() # Since the earlier block was not processed by node, the new block # can't be fully validated. tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h2f.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert(tip_entry_found) # But this block should be accepted by node since it has equal work. self.nodes[0].getblock(block_h2f.hash) self.log.info("Second height 2 block accepted, but not reorg'ed to") # 4b. Now send another block that builds on the forking chain. block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1) block_h3.solve() test_node.send_message(msg_block(block_h3)) test_node.sync_with_ping() # Since the earlier block was not processed by node, the new block # can't be fully validated. tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h3.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert(tip_entry_found) self.nodes[0].getblock(block_h3.hash) # But this block should be accepted by node since it has more work. self.nodes[0].getblock(block_h3.hash) self.log.info("Unrequested more-work block accepted") # 4c. Now mine 288 more blocks and deliver; all should be processed but # the last (height-too-high) on node (as long as it is not missing any headers) tip = block_h3 all_blocks = [] for i in range(288): next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1) next_block.solve() all_blocks.append(next_block) tip = next_block # Now send the block at height 5 and check that it wasn't accepted (missing header) test_node.send_message(msg_block(all_blocks[1])) test_node.sync_with_ping() assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash) assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash) # The block at height 5 should be accepted if we provide the missing header, though headers_message = msg_headers() headers_message.headers.append(CBlockHeader(all_blocks[0])) test_node.send_message(headers_message) test_node.send_message(msg_block(all_blocks[1])) test_node.sync_with_ping() self.nodes[0].getblock(all_blocks[1].hash) # Now send the blocks in all_blocks for i in range(288): test_node.send_message(msg_block(all_blocks[i])) test_node.sync_with_ping() # Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead for x in all_blocks[:-1]: self.nodes[0].getblock(x.hash) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash) # 5. Test handling of unrequested block on the node that didn't process # Should still not be processed (even though it has a child that has more # work). # The node should have requested the blocks at some point, so # disconnect/reconnect first self.nodes[0].disconnect_p2ps() self.nodes[1].disconnect_p2ps() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) self.log.info("Unrequested block that would complete more-work chain was ignored") # 6. Try to get node to request the missing block. # Poke the node with an inv for block at height 3 and see if that # triggers a getdata on block 2 (it should if block 2 is missing). with mininode_lock: # Clear state so we can check the getdata request test_node.last_message.pop("getdata", None) test_node.send_message(msg_inv([CInv(2, block_h3.sha256)])) test_node.sync_with_ping() with mininode_lock: getdata = test_node.last_message["getdata"] # Check that the getdata includes the right block assert_equal(getdata.inv[0].hash, block_h1f.sha256) self.log.info("Inv at tip triggered getdata for unprocessed block") # 7. Send the missing block for the third time (now it is requested) test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 290) self.nodes[0].getblock(all_blocks[286].hash) assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash) self.log.info("Successfully reorged to longer chain from non-whitelisted peer") # 8. Create a chain which is invalid at a height longer than the # current chain, but which has more blocks on top of that block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1) block_289f.solve() block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1) block_290f.solve() block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1) # block_291 spends a coinbase below maturity! block_291.vtx.append(create_tx_with_script(block_290f.vtx[0], 0, script_sig=b"42", amount=1)) block_291.hashMerkleRoot = block_291.calc_merkle_root() block_291.solve() block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1) block_292.solve() # Now send all the headers on the chain and enough blocks to trigger reorg headers_message = msg_headers() headers_message.headers.append(CBlockHeader(block_289f)) headers_message.headers.append(CBlockHeader(block_290f)) headers_message.headers.append(CBlockHeader(block_291)) headers_message.headers.append(CBlockHeader(block_292)) test_node.send_message(headers_message) test_node.sync_with_ping() tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_292.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert(tip_entry_found) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash) test_node.send_message(msg_block(block_289f)) test_node.send_message(msg_block(block_290f)) test_node.sync_with_ping() self.nodes[0].getblock(block_289f.hash) self.nodes[0].getblock(block_290f.hash) test_node.send_message(msg_block(block_291)) # At this point we've sent an obviously-bogus block, wait for full processing # without assuming whether we will be disconnected or not try: # Only wait a short while so the test doesn't take forever if we do get # disconnected test_node.sync_with_ping(timeout=1) except AssertionError: test_node.wait_for_disconnect() self.nodes[0].disconnect_p2ps() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) # We should have failed reorg and switched back to 290 (but have block 291) assert_equal(self.nodes[0].getblockcount(), 290) assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1) # Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1) block_293.solve() headers_message = msg_headers() headers_message.headers.append(CBlockHeader(block_293)) test_node.send_message(headers_message) test_node.wait_for_disconnect() # 9. Connect node1 to node0 and ensure it is able to sync connect_nodes(self.nodes[0], 1) sync_blocks([self.nodes[0], self.nodes[1]]) self.log.info("Successfully synced nodes 1 and 0")
def run_test(self): # node = self.nodes[0] # mining_info = node.getmininginfo() # self.log.info('getmininginfo') #为什么这个区块数是200 # assert_equal(mining_info['blocks'], 200) # assert_equal(mining_info['chain'], 'regtest') # assert_equal(mining_info['currentblocktx'], 0) # assert_equal(mining_info['currentblockweight'], 0) # assert_equal(mining_info['difficulty'], Decimal('4.656542373906925E-10')) # assert_equal(mining_info['networkhashps'], Decimal('0.003333333333333334')) # assert_equal(mining_info['pooledtx'], 0) #logfilePath = self.options.tmpdir + '/test_framework.log' #self.log.info(logfilePath) #subprocess.call(['open', '-W', '-a', 'Terminal.app', 'tail', '-f', logfilePath]) #subprocess.call(['tail', '-f', logfilePath]) #nodetest = P2PInterface(); #node0表示测试节点 self.nodes[0]表示bitcoin实际节点 node0 = self.nodes[0].add_p2p_connection(P2PInterface()) #节点信息这里是指连上bitcoin实际节点的节点信息 #networkinfo = self.nodes[0].getnetworkinfo() #self.log.info(networkinfo) #url = urllib.parse.urlparse(self.nodes[0].url) #self.log.info(url) network_thread_start() node0.wait_for_verack() # Set node time to 60 days ago # 将时间调整到2个月之前 mocktime = int(time.time()) - 60 * 24 * 60 * 60 self.nodes[0].setmocktime(mocktime) nblocks = 10 #nblocks = 5 # Generating a chain of 10 blocks #生成10个区块链 block_hashes = self.nodes[0].generate(nblocks) #for hash in block_hashes: # self.log.info(' Node: [%d]:%s' % (i, hash)) #for i in range(block_hashes): # self.log.info(' Node: [%d]:%s' % (i, block_hashes[i])) for i, hash in enumerate(block_hashes): self.log.info('[notice] [%d]:%s' % (i, hash)) #self.log.info('%d:%s'% (i,int(hash, 16))) self.log.info('[notice] generate node %d' % len(block_hashes)) # 在regnet情况下创世块的hash是0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206 #getblockhash0 = self.nodes[0].getblockhash(0) # Create longer chain starting 2 blocks before current tip height = len(block_hashes) - 2 block_hash = block_hashes[height - 1] self.log.info('[notice] starting %d:%s' % (height, block_hash)) # median time 中位时间 block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1 new_blocks = self.build_chain(5, block_hash, height, block_time) for i, hash in enumerate(new_blocks): self.log.info('[notice] n [%d]:%s' % (i, hash.hash)) #self.log.info('%d'% (int(hash.hash, 16))) # Force reorg to a longer chain # 向self.nodes[0]实际节点发送headers消息告诉它最新的节点数据 node0.send_message(msg_headers(new_blocks)) node0.wait_for_getdata() for block in new_blocks: node0.send_and_ping(msg_block(block)) #blockcount = self.nodes[0].getblockcount() # Check that reorg succeeded # 检测self.nodes[0]该节点上区块数量 assert_equal(self.nodes[0].getblockcount(), 13) #取出block_hashes里面最后一条hash数据并且将它转化成16进制 stale_hash = int(block_hashes[-1], 16) self.log.info('[notice] stale_hash:%s' % stale_hash) # Check that getdata request for stale block succeeds # 检测getdata请求发送陈旧的块的hash给self.nodes[0] self.send_block_request(stale_hash, node0) test_function = lambda: self.last_block_equals(stale_hash, node0) wait_until(test_function, timeout=3) # Check that getheader request for stale block header succeeds self.send_header_request(stale_hash, node0) test_function = lambda: self.last_header_equals(stale_hash, node0) wait_until(test_function, timeout=3) # Longest chain is extended so stale is much older than chain tip self.nodes[0].setmocktime(0) tip = self.nodes[0].generate(nblocks=1)[0] assert_equal(self.nodes[0].getblockcount(), 14) # Send getdata & getheaders to refresh last received getheader message block_hash = int(tip, 16) self.send_block_request(block_hash, node0) self.send_header_request(block_hash, node0) node0.sync_with_ping() # Request for very old stale block should now fail self.send_block_request(stale_hash, node0) time.sleep(3) assert not self.last_block_equals(stale_hash, node0) # Request for very old stale block header should now fail self.send_header_request(stale_hash, node0) time.sleep(3) assert not self.last_header_equals(stale_hash, node0) # Verify we can fetch very old blocks and headers on the active chain block_hash = int(block_hashes[2], 16) self.send_block_request(block_hash, node0) self.send_header_request(block_hash, node0) node0.sync_with_ping() self.send_block_request(block_hash, node0) test_function = lambda: self.last_block_equals(block_hash, node0) wait_until(test_function, timeout=3) self.send_header_request(block_hash, node0) test_function = lambda: self.last_header_equals(block_hash, node0) wait_until(test_function, timeout=3)