def node_for_block(self, height): block_header = CBlockHeader() block_header.deserialize( BytesIO(hex_str_to_bytes(self.nodes[0].getblock(str(height), 0)))) sapling_root = hex_str_to_bytes(self.nodes[0].getblock( str(height))["finalsaplingroot"])[::-1] return ZcashMMRNode.from_block(block_header, height, sapling_root, 0, HEARTWOOD_BRANCH_ID)
def test_compactblock_requests(self, node, test_node, version, segwit): # Try announcing a block with an inv or header, expect a compactblock # request for announce in ["inv", "header"]: block = self.build_block_on_tip(node, segwit=segwit) with mininode_lock: test_node.last_message.pop("getdata", None) if announce == "inv": test_node.send_message(MsgInv([CInv(2, block.sha256)])) wait_until(lambda: "getheaders" in test_node.last_message, timeout=30, lock=mininode_lock, err_msg="test_compactblock_requests") test_node.send_header_for_blocks([block]) else: test_node.send_header_for_blocks([block]) wait_until(lambda: "getdata" in test_node.last_message, timeout=30, lock=mininode_lock, err_msg="test_nod.last_message getdata") assert_equal(len(test_node.last_message["getdata"].inv), 1) assert_equal(test_node.last_message["getdata"].inv[0].type, 4) assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256) # Send back a compactblock message that omits the coinbase comp_block = HeaderAndShortIDs() comp_block.header = CBlockHeader(block) comp_block.nonce = 0 [k0, k1] = comp_block.get_siphash_keys() coinbase_hash = block.vtx[0].sha256 if version == 2: coinbase_hash = block.vtx[0].calc_x16r(True) comp_block.shortids = [calculate_shortid(k0, k1, coinbase_hash)] test_node.send_and_ping(MsgCmpctBlock(comp_block.to_p2p())) assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) # Expect a getblocktxn message. with mininode_lock: assert ("getblocktxn" in test_node.last_message) absolute_indexes = test_node.last_message[ "getblocktxn"].block_txn_request.to_absolute() assert_equal(absolute_indexes, [0]) # should be a coinbase request # Send the coinbase, and verify that the tip advances. if version == 2: msg = MsgWitnessBlocktxn() else: msg = MsgBlockTxn() msg.block_transactions.blockhash = block.sha256 msg.block_transactions.transactions = [block.vtx[0]] test_node.send_and_ping(msg) assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def test_invalid_cmpctblock_message(self): self.nodes[0].generate(101) block = self.build_block_on_tip(self.nodes[0]) cmpct_block = P2PHeaderAndShortIDs() cmpct_block.header = CBlockHeader(block) cmpct_block.prefilled_txn_length = 1 # This index will be too high prefilled_txn = PrefilledTransaction(1, block.vtx[0]) cmpct_block.prefilled_txn = [prefilled_txn] self.test_node.send_await_disconnect(MsgCmpctBlock(cmpct_block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock)
def createDsDetectedMessageFromBlockTree(self, branches): blocksDetails = [] for blocks in branches: # Last block in every branch contains two transactions: a coinbase and a double-spend transaction. merkleProof = DSMerkleProof( 1, blocks[-1].vtx[1], blocks[-1].hashMerkleRoot, [MerkleProofNode(blocks[-1].vtx[0].sha256)]) # Block headers should be ordered from tip to the first block blockHeaders = [CBlockHeader(block) for block in reversed(blocks)] # Each branch is a block detail with its block headers and merkle proof for double-spend transaction blocksDetails.append(BlockDetails(blockHeaders, merkleProof)) return msg_dsdetected(blocksDetails=blocksDetails)
def run_test(self): block_count = 0 # Create a P2P connections node0 = NodeConnCB() connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0) node0.add_connection(connection) NetworkThread().start() # wait_for_verack ensures that the P2P connection is fully up. node0.wait_for_verack() # send one to get out of IBD state self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16)) ancestor_block_hash = self.nodes[0].getbestblockhash() parent_block = self.chain.next_block(block_count) block_count += 1 headers_message = msg_headers() headers_message.headers = [CBlockHeader(parent_block)] connection.cb.send_message(headers_message) child_block = self.chain.next_block(block_count) node0.send_message(msg_block(child_block)) # wait till validation of block finishes node0.sync_with_ping() assert_equal(ancestor_block_hash, self.nodes[0].getbestblockhash()) self.stop_node(0) self.start_node(0) # Create a P2P connections node0 = NodeConnCB() connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0) node0.add_connection(connection) NetworkThread().start() # wait_for_verack ensures that the P2P connection is fully up. node0.wait_for_verack() assert_equal(ancestor_block_hash, self.nodes[0].getbestblockhash()) node0.send_message(msg_block(parent_block)) # wait till validation of block finishes node0.sync_with_ping() assert_equal(child_block.hash, self.nodes[0].getbestblockhash())
def node_for_block(self, height): if height >= 35: epoch = NU5_BRANCH_ID elif height >= 32: epoch = CANOPY_BRANCH_ID else: epoch = HEARTWOOD_BRANCH_ID block_header = CBlockHeader() block_header.deserialize( BytesIO(hex_str_to_bytes(self.nodes[0].getblock(str(height), 0)))) sapling_root = hex_str_to_bytes(self.nodes[0].getblock( str(height))["finalsaplingroot"])[::-1] if height >= 35: orchard_root = hex_str_to_bytes(self.nodes[0].getblock( str(height))["finalorchardroot"])[::-1] v2_data = (orchard_root, 0) else: v2_data = None return ZcashMMRNode.from_block(block_header, height, sapling_root, 0, epoch, v2_data)
def send_header_for_blocks(self, new_blocks): headers_message = msg_headers() headers_message.headers = [CBlockHeader(b) for b in new_blocks] self.send_message(headers_message)
def run_test(self): # Setup the p2p connections and start up the network thread. test_node = TestNode() # connects to node0 (not whitelisted) white_node = TestNode() # connects to node1 (whitelisted) connections = [] connections.append( NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)) connections.append( NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node)) test_node.add_connection(connections[0]) white_node.add_connection(connections[1]) NetworkThread().start() # Start up network handling in another thread # Test logic begins here test_node.wait_for_verack() white_node.wait_for_verack() # 1. Have both nodes mine a block (leave IBD) [n.generate(1) for n in self.nodes] tips = [int("0x" + n.getbestblockhash(), 0) for n in self.nodes] # 2. Send one block that builds on each tip. # This should be accepted. blocks_h2 = [] # the height 2 blocks on each node's chain block_time = int(time.time()) + 1 for i in range(2): blocks_h2.append( create_block(tips[i], create_coinbase(2), block_time)) blocks_h2[i].solve() block_time += 1 test_node.send_message(msg_block(blocks_h2[0])) white_node.send_message(msg_block(blocks_h2[1])) [x.sync_with_ping() for x in [test_node, white_node]] assert_equal(self.nodes[0].getblockcount(), 2) assert_equal(self.nodes[1].getblockcount(), 2) print("First height 2 block accepted by both nodes") # 3. Send another block that builds on the original tip. blocks_h2f = [] # Blocks at height 2 that fork off the main chain for i in range(2): blocks_h2f.append( create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime + 1)) blocks_h2f[i].solve() test_node.send_message(msg_block(blocks_h2f[0])) white_node.send_message(msg_block(blocks_h2f[1])) [x.sync_with_ping() for x in [test_node, white_node]] for x in self.nodes[0].getchaintips(): if x['hash'] == blocks_h2f[0].hash: assert_equal(x['status'], "headers-only") for x in self.nodes[1].getchaintips(): if x['hash'] == blocks_h2f[1].hash: assert_equal(x['status'], "valid-headers") print("Second height 2 block accepted only from whitelisted peer") # 4. Now send another block that builds on the forking chain. blocks_h3 = [] for i in range(2): blocks_h3.append( create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime + 1)) blocks_h3[i].solve() test_node.send_message(msg_block(blocks_h3[0])) white_node.send_message(msg_block(blocks_h3[1])) [x.sync_with_ping() for x in [test_node, white_node]] # Since the earlier block was not processed by node0, the new block # can't be fully validated. for x in self.nodes[0].getchaintips(): if x['hash'] == blocks_h3[0].hash: assert_equal(x['status'], "headers-only") # But this block should be accepted by node0 since it has more work. try: self.nodes[0].getblock(blocks_h3[0].hash) print( "Unrequested more-work block accepted from non-whitelisted peer" ) except: raise AssertionError( "Unrequested more work block was not processed") # Node1 should have accepted and reorged. assert_equal(self.nodes[1].getblockcount(), 3) print("Successfully reorged to length 3 chain from whitelisted peer") # 4b. Now mine 288 more blocks and deliver; all should be processed but # the last (height-too-high) on node0. Node1 should process the tip if # we give it the headers chain leading to the tip. tips = blocks_h3 headers_message = msg_headers() all_blocks = [] # node0's blocks for j in range(2): for i in range(288): next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime + 1) next_block.solve() if j == 0: test_node.send_message(msg_block(next_block)) all_blocks.append(next_block) else: headers_message.headers.append(CBlockHeader(next_block)) tips[j] = next_block time.sleep(2) for x in all_blocks: try: self.nodes[0].getblock(x.hash) if x == all_blocks[287]: raise AssertionError( "Unrequested block too far-ahead should have been ignored" ) except: if x == all_blocks[287]: print("Unrequested block too far-ahead not processed") else: raise AssertionError( "Unrequested block with more work should have been accepted" ) headers_message.headers.pop() # Ensure the last block is unrequested white_node.send_message(headers_message) # Send headers leading to tip white_node.send_message(msg_block(tips[1])) # Now deliver the tip try: white_node.sync_with_ping() self.nodes[1].getblock(tips[1].hash) print( "Unrequested block far ahead of tip accepted from whitelisted peer" ) except: raise AssertionError( "Unrequested block from whitelisted peer not accepted") # 5. Test handling of unrequested block on the node that didn't process # Should still not be processed (even though it has a child that has more # work). test_node.send_message(msg_block(blocks_h2f[0])) # Here, if the sleep is too short, the test could falsely succeed (if the # node hasn't processed the block by the time the sleep returns, and then # the node processes it and incorrectly advances the tip). # But this would be caught later on, when we verify that an inv triggers # a getdata request for this block. test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) print( "Unrequested block that would complete more-work chain was ignored" ) # 6. Try to get node to request the missing block. # Poke the node with an inv for block at height 3 and see if that # triggers a getdata on block 2 (it should if block 2 is missing). with mininode_lock: # Clear state so we can check the getdata request test_node.last_getdata = None test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)])) test_node.sync_with_ping() with mininode_lock: getdata = test_node.last_getdata # Check that the getdata includes the right block assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256) print("Inv at tip triggered getdata for unprocessed block") # 7. Send the missing block for the third time (now it is requested) test_node.send_message(msg_block(blocks_h2f[0])) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 290) print("Successfully reorged to longer chain from non-whitelisted peer") [c.disconnect_node() for c in connections]
def send_header(self, block): msg = msg_headers() msg.headers = [CBlockHeader(block)] self.send_message(msg)
def _zmq_test(self): genhashes = self.nodes[0].generate(1) self.sync_all() self.log.info("Wait for tx") msg = self.zmqSubSocket.recv_multipart() topic = msg[0] assert_equal(topic, b"hashtx") txhash = msg[1] msgSequence = struct.unpack('<I', msg[-1])[-1] assert_equal(msgSequence, 0) # must be sequence 0 on hashtx # rawtx msg = self.zmqSubSocket.recv_multipart() topic = msg[0] assert_equal(topic, b"rawtx") body = msg[1] msgSequence = struct.unpack('<I', msg[-1])[-1] assert_equal(msgSequence, 0) # must be sequence 0 on rawtx # Check that the rawtx hashes to the hashtx assert_equal(self.get_hash_from_structure(CTransaction(), body), bytes_to_hex_str(txhash)) self.log.info("Wait for block") msg = self.zmqSubSocket.recv_multipart() topic = msg[0] assert_equal(topic, b"hashblock") body = msg[1] msgSequence = struct.unpack('<I', msg[-1])[-1] assert_equal(msgSequence, 0) # must be sequence 0 on hashblock blkhash = bytes_to_hex_str(body) assert_equal( genhashes[0], blkhash ) # blockhash from generate must be equal to the hash received over zmq # rawblock msg = self.zmqSubSocket.recv_multipart() topic = msg[0] assert_equal(topic, b"rawblock") body = msg[1] msgSequence = struct.unpack('<I', msg[-1])[-1] assert_equal(msgSequence, 0) #must be sequence 0 on rawblock # Check the hash of the rawblock's header matches generate assert_equal(self.get_hash_from_structure(CBlockHeader(), body), genhashes[0]) self.log.info("Generate 10 blocks (and 10 coinbase txes)") n = 10 genhashes = self.nodes[1].generate(n) self.sync_all() zmqHashes = [] zmqRawHashed = [] blockcount = 0 for x in range(n * 4): msg = self.zmqSubSocket.recv_multipart() topic = msg[0] body = msg[1] if topic == b"hashblock": zmqHashes.append(bytes_to_hex_str(body)) msgSequence = struct.unpack('<I', msg[-1])[-1] assert_equal(msgSequence, blockcount + 1) blockcount += 1 if topic == b"rawblock": zmqRawHashed.append( self.get_hash_from_structure(CBlockHeader(), body)) msgSequence = struct.unpack('<I', msg[-1])[-1] assert_equal(msgSequence, blockcount) for x in range(n): assert_equal( genhashes[x], zmqHashes[x] ) # blockhash from generate must be equal to the hash received over zmq assert_equal(genhashes[x], zmqRawHashed[x]) self.log.info("Wait for tx from second node") # test tx from a second node hashRPC = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0) self.sync_all() # now we should receive a zmq msg because the tx was broadcast msg = self.zmqSubSocket.recv_multipart() topic = msg[0] assert_equal(topic, b"hashtx") body = msg[1] hashZMQ = bytes_to_hex_str(body) msgSequence = struct.unpack('<I', msg[-1])[-1] assert_equal(msgSequence, blockcount + 1) msg = self.zmqSubSocket.recv_multipart() topic = msg[0] assert_equal(topic, b"rawtx") body = msg[1] hashedZMQ = bytes_to_hex_str(hash256(body)) msgSequence = struct.unpack('<I', msg[-1])[-1] assert_equal(msgSequence, blockcount + 1) assert_equal( hashRPC, hashZMQ ) # txid from sendtoaddress must be equal to the hash received over zmq assert_equal(hashRPC, hashedZMQ)
def get_tests(self): self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # save the current tip so it can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get an output that we previous marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # returns a test case that asserts that the current tip was accepted def accepted(): return TestInstance([[self.tip, True]]) # returns a test case that asserts that the current tip was rejected def rejected(): return TestInstance([[self.tip, False]]) # move the tip back to a previous block def tip(number): self.tip = self.blocks[number] # creates a new block and advances the tip to that block block = self.next_block # Create a new block block(0) save_spendable_output() yield accepted() # Now we need that block to mature so we can spend the coinbase. test = TestInstance(sync_every_block=False) for i in range(100): block(1000 + i) test.blocks_and_transactions.append([self.tip, True]) save_spendable_output() yield test # Start by building a couple of blocks on top (which output is spent is in parentheses): # genesis -> b1 (0) -> b2 (1) out0 = get_spendable_output() block(1, spend=out0) save_spendable_output() yield accepted() out1 = get_spendable_output() block(2, spend=out1) # Inv again, then deliver twice (shouldn't break anything). yield accepted() # so fork like this: # # genesis -> b1 (0) -> b2 (1) # \-> b3 (1) # # Nothing should happen at this point. We saw b2 first so it takes priority. tip(1) block(3, spend=out1) # Deliver twice (should still not break anything) yield rejected() # Now we add another block to make the alternative chain longer. # # genesis -> b1 (0) -> b2 (1) # \-> b3 (1) -> b4 (2) out2 = get_spendable_output() block(4, spend=out2) yield accepted() # ... and back to the first chain. # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b3 (1) -> b4 (2) tip(2) block(5, spend=out2) save_spendable_output() yield rejected() out3 = get_spendable_output() block(6, spend=out3) yield accepted() # Try to create a fork that double-spends # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b7 (2) -> b8 (4) # \-> b3 (1) -> b4 (2) tip(5) block(7, spend=out2) yield rejected() out4 = get_spendable_output() block(8, spend=out4) yield rejected() # Try to create a block that has too much fee # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b9 (4) # \-> b3 (1) -> b4 (2) tip(6) block(9, spend=out4, additional_coinbase_value=1) yield rejected() # Create a fork that ends in a block with too much fee (the one that causes the reorg) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b10 (3) -> b11 (4) # \-> b3 (1) -> b4 (2) tip(5) block(10, spend=out3) yield rejected() block(11, spend=out4, additional_coinbase_value=1) yield rejected() # Try again, but with a valid fork first # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b14 (5) # (b12 added last) # \-> b3 (1) -> b4 (2) tip(5) b12 = block(12, spend=out3) save_spendable_output() #yield TestInstance([[b12, False]]) b13 = block(13, spend=out4) # Deliver the block header for b12, and the block b13. # b13 should be accepted but the tip won't advance until b12 is delivered. yield TestInstance([[CBlockHeader(b12), None], [b13, False]]) save_spendable_output() out5 = get_spendable_output() # b14 is invalid, but the node won't know that until it tries to connect # Tip still can't advance because b12 is missing block(14, spend=out5, additional_coinbase_value=1) yield rejected() yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13. # Test that a block with a lot of checksigs is okay lots_of_checksigs = CScript([OP_CHECKSIG] * (1000000 // 50 - 1)) tip(13) block(15, spend=out5, script=lots_of_checksigs) yield accepted() # Test that a block with too many checksigs is rejected out6 = get_spendable_output() too_many_checksigs = CScript([OP_CHECKSIG] * (1000000 // 50)) block(16, spend=out6, script=too_many_checksigs) yield rejected()
def run_test(self): # Turn on a webhook server self.start_webhook_server() # Create a P2P connection node = self.nodes[0] peer = NodeConnCB() connection = NodeConn('127.0.0.1', p2p_port(0), node, peer) peer.add_connection(connection) NetworkThread().start() peer.wait_for_verack() # Create an initial block with a coinbase we will split into multiple utxos initialBlock, _ = make_block(connection) coinbaseTx = initialBlock.vtx[0] send_by_headers(connection, [initialBlock], do_send_blocks=True) wait_for_tip(connection, initialBlock.hash) node.generate(101) block101hex = node.getblock(node.getbestblockhash(), False) block101dict = node.getblock(node.getbestblockhash(), 2) block101 = FromHex(CBlock(), block101hex) block101.height = block101dict['height'] block101.rehash() # Create a block with a transaction spending coinbaseTx of a previous block and making multiple outputs for future transactions to spend utxoBlock, _ = make_block(connection, parent_block=block101) utxoTx = create_tx(coinbaseTx, 0, 1 * COIN) # Create additional 48 outputs (we let 1 COIN as fee) for _ in range(48): utxoTx.vout.append(CTxOut(1 * COIN, CScript([OP_TRUE]))) # Add to block utxoTx.rehash() utxoBlock.vtx.append(utxoTx) utxoBlock.hashMerkleRoot = utxoBlock.calc_merkle_root() utxoBlock.solve() send_by_headers(connection, [utxoBlock], do_send_blocks=True) wait_for_tip(connection, utxoBlock.hash) # Make sure serialization/deserialization works as expected # Create dsdetected message. The content is not important here. dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(utxoBlock), CBlockHeader(initialBlock)], DSMerkleProof(1, utxoTx, utxoBlock.hashMerkleRoot, [MerkleProofNode(utxoBlock.vtx[0].sha256)])) ]) dsdBytes = dsdMessage.serialize() dsdMessageDeserialized = msg_dsdetected() dsdMessageDeserialized.deserialize(BytesIO(dsdBytes)) assert_equal(str(dsdMessage), str(dsdMessageDeserialized)) # Send a message containing random bytes. Webhook should not receive the notification. peer.send_and_ping(fake_msg_dsdetected()) assert_equal(self.get_JSON_notification(), None) # Create two blocks with transactions spending the same utxo blockA, _ = make_block(connection, parent_block=utxoBlock) blockB, _ = make_block(connection, parent_block=utxoBlock) blockF, _ = make_block(connection, parent_block=utxoBlock) txA = create_tx(utxoBlock.vtx[1], 0, int(0.8 * COIN)) txB = create_tx(utxoBlock.vtx[1], 0, int(0.9 * COIN)) txF = create_tx(utxoBlock.vtx[1], 0, int(0.7 * COIN)) txA.rehash() txB.rehash() txF.rehash() blockA.vtx.append(txA) blockB.vtx.append(txB) blockF.vtx.append(txF) blockA.hashMerkleRoot = blockA.calc_merkle_root() blockB.hashMerkleRoot = blockB.calc_merkle_root() blockF.hashMerkleRoot = blockF.calc_merkle_root() blockA.calc_sha256() blockB.calc_sha256() blockF.calc_sha256() blockA.solve() blockB.solve() blockF.solve() start_banscore = node.getpeerinfo()[0]['banscore'] # Webhook should not receive the notification if we send dsdetected message with only one block detail. dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message with two block details and one is containing no headers. dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [], DSMerkleProof(1, txB, blockB.hashMerkleRoot, [MerkleProofNode(blockB.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message where last headers in block details do not have a common previous block hash. dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(utxoBlock)], DSMerkleProof(1, txB, blockB.hashMerkleRoot, [MerkleProofNode(blockB.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message where block details does not have headers in proper order. dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(utxoBlock), CBlockHeader(blockB)], DSMerkleProof(1, txB, blockB.hashMerkleRoot, [MerkleProofNode(blockB.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message with the empty merkle proof. dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails([CBlockHeader(blockB)], DSMerkleProof()) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message with the wrong index in the merkle proof (merkle root validation should fail) dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockB)], DSMerkleProof(0, txB, blockB.hashMerkleRoot, [MerkleProofNode(blockB.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message with the wrong transaction in the merkle proof (merkle root validation should fail) dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockB)], DSMerkleProof(1, txA, blockB.hashMerkleRoot, [MerkleProofNode(blockB.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message with the wrong merkle root (merkle root validation should fail) dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockB)], DSMerkleProof(1, txB, blockA.hashMerkleRoot, [MerkleProofNode(blockB.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message with the wrong merkle proof (merkle root validation should fail) dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockB)], DSMerkleProof(1, txB, blockB.hashMerkleRoot, [MerkleProofNode(blockA.hashMerkleRoot)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message with the merkle proof having an additional unexpected node (merkle root validation should fail) dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails([CBlockHeader(blockB)], DSMerkleProof(1, txB, blockB.hashMerkleRoot, [ MerkleProofNode(blockB.vtx[0].sha256), MerkleProofNode(blockA.hashMerkleRoot) ])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message with the valid proof, but transaction is a coinbase transaction dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockB)], DSMerkleProof(0, blockB.vtx[0], blockB.hashMerkleRoot, [MerkleProofNode(blockB.vtx[1].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message with transactions that are not double spending # Create a block similar as before, but with a transaction spending a different utxo blockC, _ = make_block(connection, parent_block=utxoBlock) txC = create_tx(utxoBlock.vtx[1], 1, int(0.7 * COIN)) blockC.vtx.append(txC) blockC.hashMerkleRoot = blockC.calc_merkle_root() blockC.solve() dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockC)], DSMerkleProof(1, txC, blockC.hashMerkleRoot, [MerkleProofNode(blockC.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if the two double spending transactions are actually the same transaction (having same txid) # Create a block similar as before, but with a transaction spending a different utxo blockD, _ = make_block(connection, parent_block=utxoBlock) blockD.vtx.append(txA) blockD.hashMerkleRoot = blockD.calc_merkle_root() blockD.solve() dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockD)], DSMerkleProof(1, txA, blockD.hashMerkleRoot, [MerkleProofNode(blockD.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if header cannot pow # note hat pow is so easy in regtest that nonce can often be hence we have to select the nonce carefully blockE, _ = make_block(connection, parent_block=utxoBlock) blockE.vtx.append(txB) blockE.hashMerkleRoot = blockE.calc_merkle_root() nonce = blockE.nNonce while True: blockE.solve() if blockE.nNonce > nonce: blockE.nNonce = nonce break nonce += 1 blockE.nNonce = nonce dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockE)], DSMerkleProof(1, txB, blockE.hashMerkleRoot, [MerkleProofNode(blockE.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) end_banscore = node.getpeerinfo()[0]['banscore'] assert ((end_banscore - start_banscore) / 10 == 13 ) # because we have 13 negative tests so far # Finally, webhook should receive the notification if we send a proper dsdetected message dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockB)], DSMerkleProof(1, txB, blockB.hashMerkleRoot, [MerkleProofNode(blockB.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) json_notification = self.get_JSON_notification() # remove diverentBlockHash so we can compare with the ds-message assert (json_notification != None) for e in json_notification['blocks']: del e['divergentBlockHash'] assert_equal(str(dsdMessage), str(msg_dsdetected(json_notification=json_notification))) # Repeat previous test but change the order of the BlockDetails, the node should identify this as a duplicate dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockB)], DSMerkleProof(1, txB, blockB.hashMerkleRoot, [MerkleProofNode(blockB.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # repeat previous test but generate many blocks in the node to age the notificatoin message. # very old notification messages shall be ignored. We use the same thresholds as safe mode. # We will hardcode this threshold for now until branch we depend on is merged node.generate(289) dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockF)], DSMerkleProof(1, txF, blockF.hashMerkleRoot, [MerkleProofNode(blockF.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Create number of random valid block trees and send dsdetected P2P message for each maxNumberOfBranches = 10 maxNumberOfBlocksPerBranch = 30 for _ in range(10): blockTree = self.createRandomBlockTree(maxNumberOfBranches, maxNumberOfBlocksPerBranch, utxoBlock, [utxoBlock.vtx[1]]) dsdMessage = self.createDsDetectedMessageFromBlockTree(blockTree) peer.send_and_ping(dsdMessage) # Notification should be received as generated dsdetected message is valid json_notification = self.get_JSON_notification() # remove diverentBlockHash so we can compare with the ds-message assert (json_notification != None) for e in json_notification['blocks']: del e['divergentBlockHash'] assert_equal( str(dsdMessage), str(msg_dsdetected(json_notification=json_notification))) self.stop_webhook_server()
def run_test(self): self.stop_node(0) with self.run_node_with_connections( "reject headers if previous block is missing", 0, [], self.num_peers) as p2p_connections: connection = p2p_connections[0] coinbase_height = 1 # 1. Create first block. block_0 = prepareBlock(coinbase_height, self.nodes[0].getbestblockhash()) # 2. Connection sends HEADERS msg to bitcoind and waits for GETDATA. headers_message = msg_headers() headers_message.headers = [CBlockHeader(block_0)] connection.cb.send_message(headers_message) connection.cb.wait_for_getdata() wait_until(lambda: connection.cb.last_message["getdata"].inv[0]. hash == block_0.sha256) # 3. Connection sends BLOCK to bitcoind. connection.cb.send_message(msg_block(block_0)) # 4. Bitcoind adds block to active chain. wait_for_tip(self.nodes[0], block_0.hash) # 5. Create two chained blocks. block_1 = prepareBlock(coinbase_height + 1, block_0.hash) block_2 = prepareBlock(coinbase_height + 2, block_1.hash) # 6. Connection sends HEADERS of the second block to bitcoind. It should be rejected. headers_message = msg_headers() headers_message.headers = [CBlockHeader(block_2)] connection.cb.send_message(headers_message) wait_until(lambda: check_for_log_msg( self, "received header " + block_2.hash + ": missing prev block", "/node0")) # 7. Connection sends HEADERS of the first block to bitcoind. It should be accepted. headers_message = msg_headers() headers_message.headers = [CBlockHeader(block_1)] connection.cb.send_message(headers_message) wait_until(lambda: connection.cb.last_message["getdata"].inv[0]. hash == block_1.sha256) # 8. Connection sends HEADERS of the second block to bitcoind. It should be accepted now that previous block is known. headers_message = msg_headers() headers_message.headers = [CBlockHeader(block_2)] connection.cb.send_message(headers_message) wait_until(lambda: connection.cb.last_message["getdata"].inv[0]. hash == block_2.sha256) # 9. Try to send alternative Genesis block (no previous block). It should be rejected. genesis_block = create_block(hashprev=0, coinbase=create_coinbase( height=0, outputValue=25)) genesis_block.solve() connection.cb.send_message(msg_block(genesis_block)) wait_until(lambda: check_for_log_msg( self, "ERROR: FindPreviousBlockIndex: prev block not found", "/node0"))
def CheckForDoubleSpends(self, nodes): spent_inputs = [] seen_transactions = [] ds_counter = 0 for node in nodes: for height in range(node.getblockcount() + 1): blockhash = node.getblockhash(height) block = node.getblock(blockhash, 2) blockHex = node.getblock(blockhash, False) for txraw in block['tx'][1:]: # exclude coinbase # skip the identical transactions in the two chains, they are no double spends if txraw['txid'] in seen_transactions: continue else: seen_transactions.append(txraw['txid']) for i in txraw['vin']: utxoA = (i['txid'], i['vout']) blockA = FromHex(CBlock(), blockHex) txA = FromHex(CTransaction(), txraw['hex']) foundB = [ j for j in spent_inputs if j['utxo'] == utxoA ] if foundB: ds_counter += 1 foundB = foundB[0] blockB = foundB['block'] txB = foundB['tx'] txA.rehash() txB.rehash() blockA.vtx[0].rehash() blockB.vtx[0].rehash() sha256_A = blockA.vtx[0].sha256 sha256_B = blockB.vtx[0].sha256 dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails([CBlockHeader(blockA)], DSMerkleProof( 1, txA, blockA.hashMerkleRoot, [MerkleProofNode(sha256_A)])), BlockDetails([CBlockHeader(blockB)], DSMerkleProof( 1, txB, blockB.hashMerkleRoot, [MerkleProofNode(sha256_B)])) ]) self.message = dsdMessage dsdBytes = dsdMessage.serialize() dsdMessageDeserialized = msg_dsdetected() dsdMessageDeserialized.deserialize( BytesIO(dsdBytes)) assert_equal(str(dsdMessage), str(dsdMessageDeserialized)) break else: spent_inputs.append({ 'txid': txraw['txid'], 'tx': txA, 'utxo': utxoA, 'block': blockA }) return ds_counter
def run_test(self): block_count = 0 # Create a P2P connections node0 = NodeConnCB() connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0) node0.add_connection(connection) NetworkThread().start() # wait_for_verack ensures that the P2P connection is fully up. node0.wait_for_verack() self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16)) getDataMessages = [] def on_getdata(conn, message): getDataMessages.append(message) node0.on_getdata = on_getdata # ***** 1. ***** # starting_blocks are needed to provide spendable outputs starting_blocks = MIN_TTOR_VALIDATION_DISTANCE + 1 for i in range(starting_blocks): block = self.chain.next_block(block_count) block_count += 1 self.chain.save_spendable_output() node0.send_message(msg_block(block)) out = [] for i in range(starting_blocks): out.append(self.chain.get_spendable_output()) self.nodes[0].waitforblockheight(starting_blocks) tip_block_index = block_count - 1 self.log.info("Block tip height: %d " % block_count) # ***** 2. ***** # branch with blocks that do not violate TTOR valid_ttor_branch_height = MIN_TTOR_VALIDATION_DISTANCE + 1 for i in range(0, valid_ttor_branch_height): block = self.chain.next_block(block_count, spend=out[i], extra_txns=8) block_count += 1 node0.send_message(msg_block(block)) chaintip_valid_branch = block self.nodes[0].waitforblockheight(starting_blocks + valid_ttor_branch_height) self.log.info("Node's active chain height: %d " % (starting_blocks + valid_ttor_branch_height)) # ***** 3. ***** # branch with invalid transaction order that will try to cause a reorg self.chain.set_tip(tip_block_index) blocks_invalid_ttor = [] headers_message = msg_headers() headers_message.headers = [] invalid_ttor_branch_height = MIN_TTOR_VALIDATION_DISTANCE + 1 for i in range(0, invalid_ttor_branch_height): spend = out[i] block = self.chain.next_block(block_count) add_txns = self.get_chained_transactions(spend, num_of_transactions=10) # change order of transaction that output uses transaction that comes later (makes block violate TTOR) temp1 = add_txns[1] temp2 = add_txns[2] add_txns[1] = temp2 add_txns[2] = temp1 self.chain.update_block(block_count, add_txns) blocks_invalid_ttor.append(block) block_count += 1 if (i == 0): first_block = block # wait with sending header for the last block if (i != MIN_TTOR_VALIDATION_DISTANCE): headers_message.headers.append(CBlockHeader(block)) self.log.info("Sending %d headers..." % MIN_TTOR_VALIDATION_DISTANCE) node0.send_message(headers_message) # Wait to make sure we do not receive GETDATA messages yet. time.sleep(1) # Check that getData is not received until this chain is long at least as the active chain. assert_equal(len(getDataMessages), 0) self.log.info("Sending 1 more header...") # Send HEADERS message for the last block. headers_message.headers = [CBlockHeader(block)] node0.send_message(headers_message) node0.wait_for_getdata() self.log.info("Received GETDATA.") assert_equal(len(getDataMessages), 1) # Send the first block on invalid chain. Chain should be invalidated. node0.send_message(msg_block(first_block)) def wait_to_invalidate_fork(): chaintips = self.nodes[0].getchaintips() if len(chaintips) > 1: chaintips_status = [ chaintips[0]["status"], chaintips[1]["status"] ] if "active" in chaintips_status and "invalid" in chaintips_status: active_chain_tip_hash = chaintips[0]["hash"] if chaintips[ 0]["status"] == "active" else chaintips[1]["hash"] invalid_fork_tip_hash = chaintips[0]["hash"] if chaintips[ 0]["status"] == "invalid" else chaintips[1]["hash"] assert (active_chain_tip_hash != invalid_fork_tip_hash) for block in blocks_invalid_ttor: if block.hash == invalid_fork_tip_hash: return True return False else: return False else: return False wait_until(wait_to_invalidate_fork) # chaintip of valid branch should be active assert_equal(self.nodes[0].getbestblockhash(), chaintip_valid_branch.hash) # check log file that reorg didnt happen disconnect_block_log = False for line in open( glob.glob(self.options.tmpdir + "/node0" + "/regtest/bitcoind.log")[0]): if f"Disconnect block" in line: disconnect_block_log = True self.log.info("Found line: %s", line.strip()) break # we should not find information about disconnecting blocks assert_equal(disconnect_block_log, False) # check log file that contains information about TTOR violation ttor_violation_log = False for line in open( glob.glob(self.options.tmpdir + "/node0" + "/regtest/bitcoind.log")[0]): if f"violates TTOR order" in line: ttor_violation_log = True self.log.info("Found line: %s", line.strip()) break # we should find information about TTOR being violated assert_equal(ttor_violation_log, True)