def create_and_send_transactions(self, conn, spendtx, num_of_transactions, money_to_spend=5000000000): for i in range(0, num_of_transactions): money_to_spend = money_to_spend - 500000000 # Large fee required for big txns tx = create_tx(spendtx, 0, money_to_spend, script=CScript([OP_DROP, OP_TRUE])) tx.vout.append(CTxOut(0, CScript([OP_FALSE, OP_RETURN, bytearray([0x00] * (ONE_MEGABYTE * 880))]))) self.sign_tx(tx, spendtx, 0) tx.rehash() conn.send_message(msg_tx(tx)) wait_until(lambda: tx.hash in conn.rpc.getrawmempool(), timeout=int(360 * self.options.timeoutfactor)) logger.info("Submitted txn {} of {}".format(i+1, num_of_transactions)) assert conn.rpc.getmempoolinfo()['size'] == i+1 spendtx = tx
def createRandomBlockTree(self, maxNumberOfBranches, maxNumberOfBlocksPerBranch, commonBlock, txsWithUtxos): # Select a random transaction which output we will double-spend spendTransaction = txsWithUtxos[random.randrange(len(txsWithUtxos))] spendOutput = random.randrange(len(spendTransaction.vout)) # Random number of branches (at least 2) nBranches = random.randint(2, maxNumberOfBranches) branches = [] # Each branch will spend the same output, but for diversity each will spend a different amount (fraction of an output value) valueFraction = 1.0 / (nBranches + 1.0) valueFactor = 1.0 last_block_time = commonBlock.nTime for _ in range(nBranches): branch = [] if len(branches) < 2: # First two branches will start from the commonBlock previousBlock = commonBlock else: # Other branches will be forked in random branches at random blocks randomBranch = branches[random.randrange(len(branches))] # Make sure we don't "fork" at last block, making one single branch instead of an actual fork if len(randomBranch) == 1: previousBlock = commonBlock else: # Add all blocks from first block until previous block in forked branch to the new branch # dsdetected message requires a list of block headers from double-spending block up to the common block branch = randomBranch[:random. randrange(1, len(randomBranch))] previousBlock = branch[-1] for _ in range(random.randint(1, maxNumberOfBlocksPerBranch)): # To make unique blocks we need to set the last_block_time previousBlock, last_block_time = make_block( None, parent_block=previousBlock, last_block_time=last_block_time) branch.append(previousBlock) # Last block should contain a double-spend transaction but we intentionally spend a different amount for each to make transactions unique valueFactor = valueFactor - valueFraction dsTx = create_tx( spendTransaction, spendOutput, int(valueFactor * spendTransaction.vout[spendOutput].nValue)) branch[-1].vtx.append(dsTx) branch[-1].hashMerkleRoot = branch[-1].calc_merkle_root() branch[-1].solve() branches.append(branch) return branches
def run_test(self): node = self.nodes[0] # Create spendable coinbase transaction coinbase_tx = make_coinbase(node) node.generate(99) # Create, send and mine test transaction tx_test = self.create_test_tx(coinbase_tx) node.sendrawtransaction(ToHex(tx_test), False, True) # disable fee check assert_equal(node.getrawmempool(), [tx_test.hash]) node.generate(1) assert_equal(node.getrawmempool(), []) tip_hash = node.getbestblockhash() # # Check parameter parsing # tx0 = create_tx(tx_test, 0, 1 * COIN) assert_raises_rpc_error( -8, "Missing required argument", node.verifyscript ) # 1st parameter scripts is required and must be JSON array of objects assert_raises_rpc_error(-1, None, node.verifyscript, "abc") assert_raises_rpc_error(-1, None, node.verifyscript, 123) assert_raises_rpc_error(-1, None, node.verifyscript, True) assert_raises_rpc_error(-1, None, node.verifyscript, {}) assert_raises_rpc_error(-1, None, node.verifyscript, ["abc"]) assert_raises_rpc_error(-1, None, node.verifyscript, [123]) assert_raises_rpc_error(-1, None, node.verifyscript, [True]) assert_raises_rpc_error(-1, None, node.verifyscript, [{ "tx": ToHex(tx0), "n": 0 }], "abc") # 2nd parameter stopOnFirstInvalid is boolean assert_raises_rpc_error(-1, None, node.verifyscript, [{ "tx": ToHex(tx0), "n": 0 }], 0) assert_raises_rpc_error(-1, None, node.verifyscript, [{ "tx": ToHex(tx0), "n": 0 }], []) assert_raises_rpc_error(-1, None, node.verifyscript, [{ "tx": ToHex(tx0), "n": 0 }], {}) assert_raises_rpc_error( -8, "Invalid value for totalTimeout", node.verifyscript, [{ "tx": ToHex(tx0), "n": 0 }], True, -1) # 3rd parameter totalTimeout is non-negative integer assert_raises_rpc_error(-1, None, node.verifyscript, [{ "tx": ToHex(tx0), "n": 0 }], True, "abc") assert_raises_rpc_error(-1, None, node.verifyscript, [{ "tx": ToHex(tx0), "n": 0 }], True, True) assert_raises_rpc_error(-1, None, node.verifyscript, [{ "tx": ToHex(tx0), "n": 0 }], True, []) assert_raises_rpc_error(-1, None, node.verifyscript, [{ "tx": ToHex(tx0), "n": 0 }], True, {}) assert_raises_rpc_error(-8, "Too many arguments", node.verifyscript, [{ "tx": ToHex(tx0), "n": 0 }], True, 100, "abc") # max 3 arguments # # Check scripts parameter parsing # assert_raises_rpc_error(-8, "Missing", node.verifyscript, [{}]) # tx and n fields are required assert_raises_rpc_error(-8, "Missing scripts[0].n", node.verifyscript, [{ "tx": ToHex(tx0) }]) assert_raises_rpc_error(-8, "Missing scripts[0].tx", node.verifyscript, [{ "n": 0 }]) assert_raises_rpc_error(-8, "Missing scripts[1].n", node.verifyscript, [{ "tx": ToHex(tx0), "n": 0 }, { "tx": ToHex(tx0) }]) assert_raises_rpc_error(-8, "Missing scripts[1].tx", node.verifyscript, [{ "tx": ToHex(tx0), "n": 0 }, { "n": 0 }]) assert_raises_rpc_error(-22, "TX decode failed for scripts[0].tx", node.verifyscript, [{ "tx": "", "n": 0 }]) # tx must be a hex string of a transaction assert_raises_rpc_error(-22, "TX decode failed for scripts[0].tx", node.verifyscript, [{ "tx": "01abc", "n": 0 }]) assert_raises_rpc_error(-22, "TX decode failed for scripts[0].tx", node.verifyscript, [{ "tx": "00", "n": 0 }]) assert_raises_rpc_error(-8, "Invalid value for n in scripts[0]", node.verifyscript, [{ "tx": ToHex(tx0), "n": -1 }]) # n must be non-negative integer assert_raises_rpc_error( -8, "Both flags and prevblockhash specified in scripts[0]", node.verifyscript, [{ "tx": ToHex(tx0), "n": 0, "flags": 0, "prevblockhash": tip_hash }]) # both flags and prevblockhash are not allowed assert_raises_rpc_error(-8, "Unknown block", node.verifyscript, [{ "tx": ToHex(tx0), "n": 0, "prevblockhash": "0000000000000000000000000000000000000000000000000000000000000000" }]) # invalid block hash assert_raises_rpc_error(-3, None, node.verifyscript, [{ "tx": ToHex(tx0), "n": 0, "txo": 0 }]) # txo must be JSON object with three fields assert_raises_rpc_error(-3, None, node.verifyscript, [{ "tx": ToHex(tx0), "n": 0, "txo": "abc" }]) assert_raises_rpc_error(-3, None, node.verifyscript, [{ "tx": ToHex(tx0), "n": 0, "txo": True }]) assert_raises_rpc_error(-8, "Missing scripts[0].txo.lock", node.verifyscript, [{ "tx": ToHex(tx0), "n": 0, "txo": { "value": 1, "height": 0 } }]) assert_raises_rpc_error(-8, "Missing scripts[0].txo.value", node.verifyscript, [{ "tx": ToHex(tx0), "n": 0, "txo": { "lock": "00", "height": 0 } }]) assert_raises_rpc_error(-8, "Missing scripts[0].txo.height", node.verifyscript, [{ "tx": ToHex(tx0), "n": 0, "txo": { "lock": "00", "value": 1 } }]) assert_raises_rpc_error(-8, "must be hexadecimal string", node.verifyscript, [{ "tx": ToHex(tx0), "n": 0, "txo": { "lock": "01abc", "value": 1, "height": 0 } }]) # lock must be hexstring self.verifyscript_check_ok(node, [{ "tx": ToHex(create_transaction(tx_test, 0, CScript([OP_TRUE]), 1 * COIN)), "n": 0, "txo": { "lock": "", "value": 1 * COIN, "height": 0 } }]) # empty lock script is valid assert_raises_rpc_error(-8, "Invalid value for scripts[0].txo.value", node.verifyscript, [{ "tx": ToHex(tx0), "n": 0, "txo": { "lock": "00", "value": -1, "height": 0 } }]) # value must be non-negative integer assert_raises_rpc_error( -8, "Invalid value for scripts[0].txo.height", node.verifyscript, [{ "tx": ToHex(tx0), "n": 0, "txo": { "lock": "00", "value": 1, "height": -2 } }]) # height must be non-negative integer or -1 assert_raises_rpc_error( -8, "Unable to find TXO spent by transaction scripts[0].tx", node.verifyscript, [{ "tx": ToHex(create_tx(tx0, 0, 1 * COIN)), "n": 0 }]) # Check that non-existent coin is detected # # Check verification of a valid P2PKH script # tx1 = create_tx(tx_test, 1, 1 * COIN) self.sign_tx(tx1, tx_test, 1) expected_flags = 81931 # this is the expected value for automatically determined script verification flags res = self.verifyscript_check_ok( node, [ # Automatically find TXO and block { "tx": ToHex(tx1), "n": 0, "reportflags": True # report actual flags used by script verification }, # Explicitly provide TXO and block { "tx": ToHex(tx1), "n": 0, "reportflags": True, "prevblockhash": tip_hash, "txo": { "lock": bytes_to_hex_str(tx_test.vout[0].scriptPubKey), "value": tx_test.vout[0].nValue, "height": node.getblockcount() } }, # Explicitly provide script verification flags { "tx": ToHex(tx1), "n": 0, "flags": expected_flags, "reportflags": True, "txo": { "lock": bytes_to_hex_str(tx_test.vout[0].scriptPubKey), "value": tx_test.vout[0].nValue } }, # Explicitly provide script verification flags and automatically determine TXO flags { "tx": ToHex(tx1), "n": 0, "flags": expected_flags ^ ( 1 << 19 ), # mess up value of SCRIPT_UTXO_AFTER_GENESIS flag that is always set from TXO "reportflags": True, "txo": { "lock": bytes_to_hex_str(tx_test.vout[0].scriptPubKey), "value": tx_test.vout[0].nValue, "height": node.getblockcount() } }, # Once more without reporting flags { "tx": ToHex(tx1), "n": 0 } ]) # Check that automatically determined script flags are as expected assert_equal(res[0]["flags"], expected_flags) assert_equal(res[1]["flags"], expected_flags) assert_equal(res[2]["flags"], expected_flags) assert_equal(res[3]["flags"], expected_flags) assert (not "flags" in res[4]) # Changing the output value must make the script invalid tx2 = create_tx(tx_test, 1, 1 * COIN) self.sign_tx(tx2, tx_test, 1) tx2.vout[0].nValue = int(0.9 * COIN) self.verifyscript_check_error(node, [{"tx": ToHex(tx2), "n": 0}]) # # Check working of stopOnFirstInvalid # self.verifyscript_check(node, ["error", "ok"], [{ "tx": ToHex(tx2), "n": 0 }, { "tx": ToHex(tx1), "n": 0 }]) self.verifyscript_check(node, ["error", "ok"], [{ "tx": ToHex(tx2), "n": 0 }, { "tx": ToHex(tx1), "n": 0 }], False) # default for stopOnFirstInvalid is False self.verifyscript_check(node, ["error", "skipped"], [{ "tx": ToHex(tx2), "n": 0 }, { "tx": ToHex(tx1), "n": 0 }], True) # # Check that TXO is also found in mempool # tx3 = create_tx(tx_test, 0, 1 * COIN) node.sendrawtransaction(ToHex(tx3), False, True) assert_equal(node.getrawmempool(), [tx3.hash]) tx4 = create_tx(tx3, 0, 1 * COIN) self.verifyscript_check_ok(node, [{"tx": ToHex(tx4), "n": 0}]) # # Check that genesis related script flags are selected after some height # # Generating one more block should place us one block below genesis activation # but mempool should be already be at genesis height. node.generate(1) assert_equal(node.getblockcount(), self.genesisactivationheight - 1) # Flags should now also include SCRIPT_GENESIS and SCRIPT_VERIFY_SIGPUSHONLY # but not SCRIPT_UTXO_AFTER_GENESIS, because TXO is still before genesis. res = self.verifyscript_check_ok(node, [{ "tx": ToHex(tx4), "n": 0, "reportflags": True }]) assert_equal(res[0]["flags"], expected_flags + 262144 + 32) # Send this transaction so that we have a spendable coin created after genesis node.sendrawtransaction(ToHex(tx4), False, True) assert_equal(node.getrawmempool(), [tx4.hash]) node.generate(1) assert_equal(node.getrawmempool(), []) assert_equal(node.getblockcount(), self.genesisactivationheight ) # tip should now be at genesis height # Transaction spending coin that was created after genesis tx5 = create_tx(tx4, 0, 1 * COIN) # Now flags should (besides SCRIPT_GENESIS and SCRIPT_VERIFY_SIGPUSHONLY) also # include SCRIPT_UTXO_AFTER_GENESIS, because TXO is also after genesis. res = self.verifyscript_check_ok(node, [{ "tx": ToHex(tx5), "n": 0, "reportflags": True }]) assert_equal(res[0]["flags"], expected_flags + 524288 + 262144 + 32) # # Check timeout detection # self.verifyscript_check(node, ["skipped", "skipped"], [{ "tx": ToHex(tx1), "n": 0 }, { "tx": ToHex(tx1), "n": 0 }], True, 0) # everything must be skipped if timeout is 0 self.verifyscript_check(node, ["skipped", "skipped"], [{ "tx": ToHex(tx1), "n": 0 }, { "tx": ToHex(tx1), "n": 0 }], False, 0) # Restart the node to allow unlimited script size self.restart_node(0, self.extra_args[0] + ["-maxscriptsizepolicy=0"]) # Create, send and mine transaction with large anyone-can-spend lock script tx6 = create_tx(tx_test, 2, 1 * COIN) tx6.vout[0] = CTxOut( int(1 * COIN), CScript([ bytearray([42] * DEFAULT_SCRIPT_NUM_LENGTH_POLICY_AFTER_GENESIS), bytearray([42] * 200 * 1000), OP_MUL, OP_DROP, OP_TRUE ])) tx6.rehash() node.sendrawtransaction(ToHex(tx6), False, True) assert_equal(node.getrawmempool(), [tx6.hash]) node.generate(1) assert_equal(node.getrawmempool(), []) # This transaction should take more than 100ms and less than 2000ms to verify # NOTE: If verification takes more or less time than this, some of the checks below will fail. # This can, for example, happen on a very fast, very slow or busy machine. tx7 = create_tx(tx6, 0, 1 * COIN) # First tx is small and should be successfully verified. # Second tx is big and its verification should timeout. # Verification of third tx should be skipped even if stopOnFirstInvalid is false because maximum allowed total verification time was already exceeded. self.verifyscript_check(node, ["ok", "timeout", "skipped"], [{ "tx": ToHex(tx1), "n": 0 }, { "tx": ToHex(tx7), "n": 0 }, { "tx": ToHex(tx1), "n": 0 }], False, 100) # If we allow enough time, verification of second tx should still timeout because of maxstdtxvalidationduration. self.verifyscript_check(node, ["ok", "timeout", "ok"], [{ "tx": ToHex(tx1), "n": 0 }, { "tx": ToHex(tx7), "n": 0 }, { "tx": ToHex(tx1), "n": 0 }], False, 2000) # Restart the node with larger value for maxstdtxvalidationduration so that its # default value does not limit maximum execution time of single script. self.restart_node( 0, self.extra_args[0] + [ "-maxstdtxvalidationduration=2000", "-maxnonstdtxvalidationduration=2001", "-maxscriptsizepolicy=0" ]) # Verification of all three scripts should now succeed if total timeout is large enough ... self.verifyscript_check(node, ["ok", "ok", "ok"], [{ "tx": ToHex(tx1), "n": 0 }, { "tx": ToHex(tx7), "n": 0 }, { "tx": ToHex(tx1), "n": 0 }], False, 2000) # ... and timeout as before if it is not self.verifyscript_check(node, ["ok", "timeout", "skipped"], [{ "tx": ToHex(tx1), "n": 0 }, { "tx": ToHex(tx7), "n": 0 }, { "tx": ToHex(tx1), "n": 0 }], False, 100)
def get_tests(self): # shorthand for functions block = lambda *a, **kw: self.chain.next_block( *a, coinbase_key=self.coinbase_key, simple_output=True, **kw) create_and_sign_tx = lambda *a, **kw: create_and_sign_transaction( *a, private_key=self.coinbase_key, **({k: v for k, v in kw.items() if not k == 'private_key'})) update_block = self.chain.update_block tip = self.chain.set_tip accepted = self.accepted rejected = self.rejected self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16)) save_spendable_output = self.chain.save_spendable_output get_spendable_output = self.chain.get_spendable_output # Create a new block block(0) yield accepted() test, out, _ = prepare_init_chain(self.chain, 99, 33) yield test # Start by building a couple of blocks on top (which output is spent is # in parentheses): # genesis -> b1 (0) -> b2 (1) block(1, spend=out[0]) save_spendable_output() yield accepted() block(2, spend=out[1]) yield accepted() save_spendable_output() # so fork like this: # # genesis -> b1 (0) -> b2 (1) # \-> b3 (1) # # Nothing should happen at this point. We saw b2 first so it takes # priority. tip(1) b3 = block(3, spend=out[1]) txout_b3 = PreviousSpendableOutput(b3.vtx[1], 0) yield rejected() # Now we add another block to make the alternative chain longer. # # genesis -> b1 (0) -> b2 (1) # \-> b3 (1) -> b4 (2) block(4, spend=out[2]) yield accepted() # ... and back to the first chain. # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b3 (1) -> b4 (2) tip(2) block(5, spend=out[2]) save_spendable_output() yield rejected() block(6, spend=out[3]) yield accepted() # Try to create a fork that double-spends # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b7 (2) -> b8 (4) # \-> b3 (1) -> b4 (2) tip(5) block(7, spend=out[2]) yield rejected() block(8, spend=out[4]) yield rejected() # Try to create a block that has too much fee # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b9 (4) # \-> b3 (1) -> b4 (2) tip(6) block(9, spend=out[4], additional_coinbase_value=1) yield rejected(RejectResult(16, b'bad-cb-amount')) # Create a fork that ends in a block with too much fee (the one that causes the reorg) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b10 (3) -> b11 (4) # \-> b3 (1) -> b4 (2) tip(5) block(10, spend=out[3]) yield rejected() block(11, spend=out[4], additional_coinbase_value=1) yield rejected(RejectResult(16, b'bad-cb-amount')) # Try again, but with a valid fork first # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b14 (5) # (b12 added last) # \-> b3 (1) -> b4 (2) tip(5) b12 = block(12, spend=out[3]) save_spendable_output() b13 = block(13, spend=out[4]) # Deliver the block header for b12, and the block b13. # b13 should be accepted but the tip won't advance until b12 is # delivered. yield TestInstance([[CBlockHeader(b12), None], [b13, False]]) save_spendable_output() # b14 is invalid, but the node won't know that until it tries to connect # Tip still can't advance because b12 is missing block(14, spend=out[5], additional_coinbase_value=1) yield rejected() yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13. # Add a block with MAX_BLOCK_SIGOPS_PER_MB and one with one more sigop # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6) # \-> b3 (1) -> b4 (2) # Test that a block with a lot of checksigs is okay lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS_PER_MB - 1)) tip(13) block(15, spend=out[5], script=lots_of_checksigs) yield accepted() save_spendable_output() # Test that a block with too many checksigs is rejected too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS_PER_MB)) block(16, spend=out[6], script=too_many_checksigs) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Attempt to spend a transaction created on a different fork # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1]) # \-> b3 (1) -> b4 (2) tip(15) block(17, spend=txout_b3) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # Attempt to spend a transaction created on a different fork (on a fork this time) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) # \-> b18 (b3.vtx[1]) -> b19 (6) # \-> b3 (1) -> b4 (2) tip(13) block(18, spend=txout_b3) yield rejected() block(19, spend=out[6]) yield rejected() # Attempt to spend a coinbase at depth too low # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7) # \-> b3 (1) -> b4 (2) tip(15) block(20, spend=out[7]) yield rejected( RejectResult(16, b'bad-txns-premature-spend-of-coinbase')) # Attempt to spend a coinbase at depth too low (on a fork this time) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) # \-> b21 (6) -> b22 (5) # \-> b3 (1) -> b4 (2) tip(13) block(21, spend=out[6]) yield rejected() block(22, spend=out[5]) yield rejected() # Create a block on either side of LEGACY_MAX_BLOCK_SIZE and make sure its accepted/rejected # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) # \-> b24 (6) -> b25 (7) # \-> b3 (1) -> b4 (2) tip(15) b23 = block(23, spend=out[6]) tx = CTransaction() script_length = LEGACY_MAX_BLOCK_SIZE - len(b23.serialize()) - 69 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0))) b23 = update_block(23, [tx]) # Make sure the math above worked out to produce a max-sized block assert_equal(len(b23.serialize()), LEGACY_MAX_BLOCK_SIZE) yield accepted() save_spendable_output() # Create blocks with a coinbase input script size out of range # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) # \-> ... (6) -> ... (7) # \-> b3 (1) -> b4 (2) tip(15) b26 = block(26, spend=out[6]) b26.vtx[0].vin[0].scriptSig = b'\x00' b26.vtx[0].rehash() # update_block causes the merkle root to get updated, even with no new # transactions, and updates the required state. b26 = update_block(26, []) yield rejected(RejectResult(16, b'bad-cb-length')) # Extend the b26 chain to make sure bitcoind isn't accepting b26 b27 = block(27, spend=out[7]) yield rejected(False) # Now try a too-large-coinbase script tip(15) b28 = block(28, spend=out[6]) b28.vtx[0].vin[0].scriptSig = b'\x00' * 101 b28.vtx[0].rehash() b28 = update_block(28, []) yield rejected(RejectResult(16, b'bad-cb-length')) # Extend the b28 chain to make sure bitcoind isn't accepting b28 b29 = block(29, spend=out[7]) yield rejected(False) # b30 has a max-sized coinbase scriptSig. tip(23) b30 = block(30) b30.vtx[0].vin[0].scriptSig = b'\x00' * 100 b30.vtx[0].rehash() b30 = update_block(30, []) yield accepted() save_spendable_output() # b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY # # genesis -> ... -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) # \-> b36 (11) # \-> b34 (10) # \-> b32 (9) # # MULTISIG: each op code counts as 20 sigops. To create the edge case, # pack another 19 sigops at the end. lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS_PER_MB - 1) // 20) + [OP_CHECKSIG] * 19) b31 = block(31, spend=out[8], script=lots_of_multisigs) assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS_PER_MB) yield accepted() save_spendable_output() # this goes over the limit because the coinbase has one sigop too_many_multisigs = CScript([OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS_PER_MB // 20)) b32 = block(32, spend=out[9], script=too_many_multisigs) assert_equal(get_legacy_sigopcount_block(b32), MAX_BLOCK_SIGOPS_PER_MB + 1) yield rejected(RejectResult(16, b'bad-blk-sigops')) # CHECKMULTISIGVERIFY tip(31) lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS_PER_MB - 1) // 20) + [OP_CHECKSIG] * 19) block(33, spend=out[9], script=lots_of_multisigs) yield accepted() save_spendable_output() too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS_PER_MB // 20)) block(34, spend=out[10], script=too_many_multisigs) yield rejected(RejectResult(16, b'bad-blk-sigops')) # CHECKSIGVERIFY tip(33) lots_of_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS_PER_MB - 1)) b35 = block(35, spend=out[10], script=lots_of_checksigs) yield accepted() save_spendable_output() too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS_PER_MB)) block(36, spend=out[11], script=too_many_checksigs) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Check spending of a transaction in a block which failed to connect # # b6 (3) # b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) # \-> b37 (11) # \-> b38 (11/37) # # save 37's spendable output, but then double-spend out11 to invalidate # the block tip(35) b37 = block(37, spend=out[11]) txout_b37 = PreviousSpendableOutput(b37.vtx[1], 0) tx = create_and_sign_tx(out[11].tx, out[11].n, 0) b37 = update_block(37, [tx]) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # attempt to spend b37's first non-coinbase tx, at which point b37 was # still considered valid tip(35) block(38, spend=txout_b37) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # Check P2SH SigOp counting # # # 13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b41 (12) # \-> b40 (12) # # b39 - create some P2SH outputs that will require 6 sigops to spend: # # redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG # p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL # tip(35) b39 = block(39) b39_outputs = 0 b39_sigops_per_output = 6 # Build the redeem script, hash it, use hash to create the p2sh script redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY] * 5 + [OP_CHECKSIG]) redeem_script_hash = hash160(redeem_script) p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL]) # Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE # This must be signed because it is spending a coinbase spend = out[11] tx = create_tx(spend.tx, spend.n, 1, p2sh_script) tx.vout.append( CTxOut(spend.tx.vout[spend.n].nValue - 1, CScript([OP_TRUE]))) sign_tx(tx, spend.tx, spend.n, self.coinbase_key) tx.rehash() b39 = update_block(39, [tx]) b39_outputs += 1 # Until block is full, add tx's with 1 satoshi to p2sh_script, the rest # to OP_TRUE tx_new = None tx_last = tx total_size = len(b39.serialize()) while (total_size < LEGACY_MAX_BLOCK_SIZE): tx_new = create_tx(tx_last, 1, 1, p2sh_script) tx_new.vout.append( CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE]))) tx_new.rehash() total_size += len(tx_new.serialize()) if total_size >= LEGACY_MAX_BLOCK_SIZE: break b39.vtx.append(tx_new) # add tx to block tx_last = tx_new b39_outputs += 1 b39 = update_block(39, []) yield accepted() save_spendable_output() # Test sigops in P2SH redeem scripts # # b40 creates 3333 tx's spending the 6-sigop P2SH outputs from b39 for a total of 19998 sigops. # The first tx has one sigop and then at the end we add 2 more to put us just over the max. # # b41 does the same, less one, so it has the maximum sigops permitted. # tip(39) b40 = block(40, spend=out[12]) sigops = get_legacy_sigopcount_block(b40) numTxes = (MAX_BLOCK_SIGOPS_PER_MB - sigops) // b39_sigops_per_output assert_equal(numTxes <= b39_outputs, True) lastOutpoint = COutPoint(b40.vtx[1].sha256, 0) lastAmount = b40.vtx[1].vout[0].nValue new_txs = [] for i in range(1, numTxes + 1): tx = CTransaction() tx.vout.append(CTxOut(1, CScript([OP_TRUE]))) tx.vin.append(CTxIn(lastOutpoint, b'')) # second input is corresponding P2SH output from b39 tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b'')) # Note: must pass the redeem_script (not p2sh_script) to the # signature hash function sighash = SignatureHashForkId(redeem_script, tx, 1, SIGHASH_ALL | SIGHASH_FORKID, lastAmount) sig = self.coinbase_key.sign(sighash) + bytes( bytearray([SIGHASH_ALL | SIGHASH_FORKID])) scriptSig = CScript([sig, redeem_script]) tx.vin[1].scriptSig = scriptSig tx.rehash() new_txs.append(tx) lastOutpoint = COutPoint(tx.sha256, 0) lastAmount = tx.vout[0].nValue b40_sigops_to_fill = MAX_BLOCK_SIGOPS_PER_MB - \ (numTxes * b39_sigops_per_output + sigops) + 1 tx = CTransaction() tx.vin.append(CTxIn(lastOutpoint, b'')) tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill))) tx.rehash() new_txs.append(tx) update_block(40, new_txs) yield rejected(RejectResult(16, b'bad-blk-sigops')) # same as b40, but one less sigop tip(39) b41 = block(41, spend=None) update_block(41, b40.vtx[1:-1]) b41_sigops_to_fill = b40_sigops_to_fill - 1 tx = CTransaction() tx.vin.append(CTxIn(lastOutpoint, b'')) tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill))) tx.rehash() update_block(41, [tx]) yield accepted() # Fork off of b39 to create a constant base again # # b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) # \-> b41 (12) # tip(39) block(42, spend=out[12]) yield rejected() save_spendable_output() block(43, spend=out[13]) yield accepted() save_spendable_output() # Test a number of really invalid scenarios # # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14) # \-> ??? (15) # The next few blocks are going to be created "by hand" since they'll do funky things, such as having # the first transaction be non-coinbase, etc. The purpose of b44 is to # make sure this works. height = self.chain.block_heights[self.chain.tip.sha256] + 1 coinbase = create_coinbase(height, self.coinbase_pubkey) b44 = CBlock() b44.nTime = self.chain.tip.nTime + 1 b44.hashPrevBlock = self.chain.tip.sha256 b44.nBits = 0x207fffff b44.vtx.append(coinbase) b44.hashMerkleRoot = b44.calc_merkle_root() b44.solve() self.chain.tip = b44 self.chain.block_heights[b44.sha256] = height self.chain.blocks[44] = b44 yield accepted() # A block with a non-coinbase as the first tx non_coinbase = create_tx(out[15].tx, out[15].n, 1) b45 = CBlock() b45.nTime = self.chain.tip.nTime + 1 b45.hashPrevBlock = self.chain.tip.sha256 b45.nBits = 0x207fffff b45.vtx.append(non_coinbase) b45.hashMerkleRoot = b45.calc_merkle_root() b45.calc_sha256() b45.solve() self.chain.block_heights[ b45.sha256] = self.chain.block_heights[self.chain.tip.sha256] + 1 self.chain.tip = b45 self.chain.blocks[45] = b45 yield rejected(RejectResult(16, b'bad-cb-missing')) # A block with no txns tip(44) b46 = CBlock() b46.nTime = b44.nTime + 1 b46.hashPrevBlock = b44.sha256 b46.nBits = 0x207fffff b46.vtx = [] b46.hashMerkleRoot = 0 b46.solve() self.chain.block_heights[ b46.sha256] = self.chain.block_heights[b44.sha256] + 1 self.chain.tip = b46 assert 46 not in self.chain.blocks self.chain.blocks[46] = b46 s = ser_uint256(b46.hashMerkleRoot) yield rejected(RejectResult(16, b'bad-cb-missing')) # A block with invalid work tip(44) b47 = block(47, do_solve_block=False) target = uint256_from_compact(b47.nBits) while b47.sha256 < target: # changed > to < b47.nNonce += 1 b47.rehash() yield rejected(RejectResult(16, b'high-hash')) # A block with timestamp > 2 hrs in the future tip(44) b48 = block(48, do_solve_block=False) b48.nTime = int(time.time()) + 60 * 60 * 3 b48.solve() yield rejected(RejectResult(16, b'time-too-new')) # A block with an invalid merkle hash tip(44) b49 = block(49) b49.hashMerkleRoot += 1 b49.solve() yield rejected(RejectResult(16, b'bad-txnmrklroot')) # A block with an incorrect POW limit tip(44) b50 = block(50) b50.nBits = b50.nBits - 1 b50.solve() yield rejected(RejectResult(16, b'bad-diffbits')) # A block with two coinbase txns tip(44) b51 = block(51) cb2 = create_coinbase(51, self.coinbase_pubkey) b51 = update_block(51, [cb2]) yield rejected(RejectResult(16, b'bad-tx-coinbase')) # A block w/ duplicate txns # Note: txns have to be in the right position in the merkle tree to # trigger this error tip(44) b52 = block(52, spend=out[15]) tx = create_tx(b52.vtx[1], 0, 1) b52 = update_block(52, [tx, tx]) yield rejected(RejectResult(16, b'bad-txns-duplicate')) # Test block timestamps # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) # \-> b54 (15) # tip(43) block(53, spend=out[14]) yield rejected() # rejected since b44 is at same height save_spendable_output() # invalid timestamp (b35 is 5 blocks back, so its time is # MedianTimePast) b54 = block(54, spend=out[15]) b54.nTime = b35.nTime - 1 b54.solve() yield rejected(RejectResult(16, b'time-too-old')) # valid timestamp tip(53) b55 = block(55, spend=out[15]) b55.nTime = b35.nTime update_block(55, []) yield accepted() save_spendable_output() # Test CVE-2012-2459 # # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16) # \-> b57 (16) # \-> b56p2 (16) # \-> b56 (16) # # Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without # affecting the merkle root of a block, while still invalidating it. # See: src/consensus/merkle.h # # b57 has three txns: coinbase, tx, tx1. The merkle root computation will duplicate tx. # Result: OK # # b56 copies b57 but duplicates tx1 and does not recalculate the block hash. So it has a valid merkle # root but duplicate transactions. # Result: Fails # # b57p2 has six transactions in its merkle tree: # - coinbase, tx, tx1, tx2, tx3, tx4 # Merkle root calculation will duplicate as necessary. # Result: OK. # # b56p2 copies b57p2 but adds both tx3 and tx4. The purpose of the test is to make sure the code catches # duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates # that the error was caught early, avoiding a DOS vulnerability.) # b57 - a good block with 2 txs, don't submit until end tip(55) b57 = block(57) tx = create_and_sign_tx(out[16].tx, out[16].n, 1) tx1 = create_tx(tx, 0, 1) b57 = update_block(57, [tx, tx1]) # b56 - copy b57, add a duplicate tx tip(55) b56 = copy.deepcopy(b57) self.chain.blocks[56] = b56 assert_equal(len(b56.vtx), 3) b56 = update_block(56, [tx1]) assert_equal(b56.hash, b57.hash) yield rejected(RejectResult(16, b'bad-txns-duplicate')) # b57p2 - a good block with 6 tx'es, don't submit until end tip(55) b57p2 = block("57p2") tx = create_and_sign_tx(out[16].tx, out[16].n, 1) tx1 = create_tx(tx, 0, 1) tx2 = create_tx(tx1, 0, 1) tx3 = create_tx(tx2, 0, 1) tx4 = create_tx(tx3, 0, 1) b57p2 = update_block("57p2", [tx, tx1, tx2, tx3, tx4]) # b56p2 - copy b57p2, duplicate two non-consecutive tx's tip(55) b56p2 = copy.deepcopy(b57p2) self.chain.blocks["b56p2"] = b56p2 assert_equal(b56p2.hash, b57p2.hash) assert_equal(len(b56p2.vtx), 6) b56p2 = update_block("b56p2", [tx3, tx4]) yield rejected(RejectResult(16, b'bad-txns-duplicate')) tip("57p2") yield accepted() tip(57) yield rejected() # rejected because 57p2 seen first save_spendable_output() # Test a few invalid tx types # # -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> ??? (17) # # tx with prevout.n out of range tip(57) b58 = block(58, spend=out[17]) tx = CTransaction() assert (len(out[17].tx.vout) < 42) tx.vin.append( CTxIn(COutPoint(out[17].tx.sha256, 42), CScript([OP_TRUE]), 0xffffffff)) tx.vout.append(CTxOut(0, b"")) tx.calc_sha256() b58 = update_block(58, [tx]) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # tx with output value > input value out of range tip(57) b59 = block(59) tx = create_and_sign_tx(out[17].tx, out[17].n, 51 * COIN) b59 = update_block(59, [tx]) yield rejected(RejectResult(16, b'bad-txns-in-belowout')) # reset to good chain tip(57) b60 = block(60, spend=out[17]) yield accepted() save_spendable_output() # Test BIP30 # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> b61 (18) # # Blocks are not allowed to contain a transaction whose id matches that of an earlier, # not-fully-spent transaction in the same chain. To test, make identical coinbases; # the second one should be rejected. # tip(60) b61 = block(61, spend=out[18]) b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[ 0].scriptSig # equalize the coinbases b61.vtx[0].rehash() b61 = update_block(61, []) assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize()) yield rejected(RejectResult(16, b'bad-txns-BIP30')) # Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests) # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> b62 (18) # tip(60) b62 = block(62) tx = CTransaction() tx.nLockTime = 0xffffffff # this locktime is non-final assert (out[18].n < len(out[18].tx.vout)) tx.vin.append(CTxIn(COutPoint(out[18].tx.sha256, out[18].n))) # don't set nSequence tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) assert (tx.vin[0].nSequence < 0xffffffff) tx.calc_sha256() b62 = update_block(62, [tx]) yield rejected(RejectResult(16, b'bad-txns-nonfinal')) # Test a non-final coinbase is also rejected # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> b63 (-) # tip(60) b63 = block(63) b63.vtx[0].nLockTime = 0xffffffff b63.vtx[0].vin[0].nSequence = 0xDEADBEEF b63.vtx[0].rehash() b63 = update_block(63, []) yield rejected(RejectResult(16, b'bad-txns-nonfinal')) # This checks that a block with a bloated VARINT between the block_header and the array of tx such that # the block is > LEGACY_MAX_BLOCK_SIZE with the bloated varint, but <= LEGACY_MAX_BLOCK_SIZE without the bloated varint, # does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not # care whether the bloated block is accepted or rejected; it only cares that the second block is accepted. # # What matters is that the receiving node should not reject the bloated block, and then reject the canonical # block on the basis that it's the same as an already-rejected block (which would be a consensus failure.) # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) # \ # b64a (18) # b64a is a bloated block (non-canonical varint) # b64 is a good block (same as b64 but w/ canonical varint) # tip(60) regular_block = block("64a", spend=out[18]) # make it a "broken_block," with non-canonical serialization b64a = CBrokenBlock(regular_block) b64a.initialize(regular_block) self.chain.blocks["64a"] = b64a self.chain.tip = b64a tx = CTransaction() # use canonical serialization to calculate size script_length = LEGACY_MAX_BLOCK_SIZE - \ len(b64a.normal_serialize()) - 69 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0))) b64a = update_block("64a", [tx]) assert_equal(len(b64a.serialize()), LEGACY_MAX_BLOCK_SIZE + 8) yield TestInstance([[self.chain.tip, None]]) # comptool workaround: to make sure b64 is delivered, manually erase # b64a from blockstore self.test.block_store.erase(b64a.sha256) tip(60) b64 = CBlock(b64a) b64.vtx = copy.deepcopy(b64a.vtx) assert_equal(b64.hash, b64a.hash) assert_equal(len(b64.serialize()), LEGACY_MAX_BLOCK_SIZE) self.chain.blocks[64] = b64 update_block(64, []) yield accepted() save_spendable_output() # Spend an output created in the block itself # # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) # tip(64) b65 = block(65) tx1 = create_and_sign_tx(out[19].tx, out[19].n, out[19].tx.vout[0].nValue) tx2 = create_and_sign_tx(tx1, 0, 0) update_block(65, [tx1, tx2]) yield accepted() save_spendable_output() # Attempt to spend an output created later in the same block # # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) # \-> b66 (20) tip(65) b66 = block(66) tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue) tx2 = create_and_sign_tx(tx1, 0, 1) update_block(66, [tx2, tx1]) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # Attempt to double-spend a transaction created in a block # # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) # \-> b67 (20) # # tip(65) b67 = block(67) tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue) tx2 = create_and_sign_tx(tx1, 0, 1) tx3 = create_and_sign_tx(tx1, 0, 2) update_block(67, [tx1, tx2, tx3]) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # More tests of block subsidy # # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) # \-> b68 (20) # # b68 - coinbase with an extra 10 satoshis, # creates a tx that has 9 satoshis from out[20] go to fees # this fails because the coinbase is trying to claim 1 satoshi too much in fees # # b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee # this succeeds # tip(65) b68 = block(68, additional_coinbase_value=10) tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue - 9) update_block(68, [tx]) yield rejected(RejectResult(16, b'bad-cb-amount')) tip(65) b69 = block(69, additional_coinbase_value=10) tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue - 10) update_block(69, [tx]) yield accepted() save_spendable_output() # Test spending the outpoint of a non-existent transaction # # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) # \-> b70 (21) # tip(69) block(70, spend=out[21]) bogus_tx = CTransaction() bogus_tx.sha256 = uint256_from_str( b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c" ) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff)) tx.vout.append(CTxOut(1, b"")) update_block(70, [tx]) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks) # # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21) # \-> b71 (21) # # b72 is a good block. # b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b71. # tip(69) b72 = block(72) tx1 = create_and_sign_tx(out[21].tx, out[21].n, 2) tx2 = create_and_sign_tx(tx1, 0, 1) b72 = update_block(72, [tx1, tx2]) # now tip is 72 b71 = copy.deepcopy(b72) b71.vtx.append(tx2) # add duplicate tx2 self.chain.block_heights[b71.sha256] = self.chain.block_heights[ b69.sha256] + 1 # b71 builds off b69 self.chain.blocks[71] = b71 assert_equal(len(b71.vtx), 4) assert_equal(len(b72.vtx), 3) assert_equal(b72.sha256, b71.sha256) tip(71) yield rejected(RejectResult(16, b'bad-txns-duplicate')) tip(72) yield accepted() save_spendable_output() # Test some invalid scripts and MAX_BLOCK_SIGOPS_PER_MB # # -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21) # \-> b** (22) # # b73 - tx with excessive sigops that are placed after an excessively large script element. # The purpose of the test is to make sure those sigops are counted. # # script is a bytearray of size 20,526 # # bytearray[0-19,998] : OP_CHECKSIG # bytearray[19,999] : OP_PUSHDATA4 # bytearray[20,000-20,003]: 521 (max_script_element_size_before_genesis+1, in little-endian format) # bytearray[20,004-20,525]: unread data (script_element) # bytearray[20,526] : OP_CHECKSIG (this puts us over the limit) # tip(72) b73 = block(73) size = MAX_BLOCK_SIGOPS_PER_MB - 1 + \ MAX_SCRIPT_ELEMENT_SIZE_BEFORE_GENESIS + 1 + 5 + 1 a = bytearray([OP_CHECKSIG] * size) a[MAX_BLOCK_SIGOPS_PER_MB - 1] = int("4e", 16) # OP_PUSHDATA4 element_size = MAX_SCRIPT_ELEMENT_SIZE_BEFORE_GENESIS + 1 a[MAX_BLOCK_SIGOPS_PER_MB] = element_size % 256 a[MAX_BLOCK_SIGOPS_PER_MB + 1] = element_size // 256 a[MAX_BLOCK_SIGOPS_PER_MB + 2] = 0 a[MAX_BLOCK_SIGOPS_PER_MB + 3] = 0 tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) b73 = update_block(73, [tx]) assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS_PER_MB + 1) yield rejected(RejectResult(16, b'bad-blk-sigops')) # b74/75 - if we push an invalid script element, all prevous sigops are counted, # but sigops after the element are not counted. # # The invalid script element is that the push_data indicates that # there will be a large amount of data (0xffffff bytes), but we only # provide a much smaller number. These bytes are CHECKSIGS so they would # cause b75 to fail for excessive sigops, if those bytes were counted. # # b74 fails because we put MAX_BLOCK_SIGOPS_PER_MB+1 before the element # b75 succeeds because we put MAX_BLOCK_SIGOPS_PER_MB before the element # # tip(72) b74 = block(74) size = MAX_BLOCK_SIGOPS_PER_MB - 1 + \ MAX_SCRIPT_ELEMENT_SIZE_BEFORE_GENESIS + 42 # total = 20,561 a = bytearray([OP_CHECKSIG] * size) a[MAX_BLOCK_SIGOPS_PER_MB] = 0x4e a[MAX_BLOCK_SIGOPS_PER_MB + 1] = 0xfe a[MAX_BLOCK_SIGOPS_PER_MB + 2] = 0xff a[MAX_BLOCK_SIGOPS_PER_MB + 3] = 0xff a[MAX_BLOCK_SIGOPS_PER_MB + 4] = 0xff tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) b74 = update_block(74, [tx]) yield rejected(RejectResult(16, b'bad-blk-sigops')) tip(72) b75 = block(75) size = MAX_BLOCK_SIGOPS_PER_MB - 1 + MAX_SCRIPT_ELEMENT_SIZE_BEFORE_GENESIS + 42 a = bytearray([OP_CHECKSIG] * size) a[MAX_BLOCK_SIGOPS_PER_MB - 1] = 0x4e a[MAX_BLOCK_SIGOPS_PER_MB] = 0xff a[MAX_BLOCK_SIGOPS_PER_MB + 1] = 0xff a[MAX_BLOCK_SIGOPS_PER_MB + 2] = 0xff a[MAX_BLOCK_SIGOPS_PER_MB + 3] = 0xff tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) b75 = update_block(75, [tx]) yield accepted() save_spendable_output() # Check that if we push an element filled with CHECKSIGs, they are not # counted tip(75) b76 = block(76) size = MAX_BLOCK_SIGOPS_PER_MB - 1 + MAX_SCRIPT_ELEMENT_SIZE_BEFORE_GENESIS + 1 + 5 a = bytearray([OP_CHECKSIG] * size) a[MAX_BLOCK_SIGOPS_PER_MB - 1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs tx = create_and_sign_tx(out[23].tx, 0, 1, CScript(a)) b76 = update_block(76, [tx]) yield accepted() save_spendable_output() # Test transaction resurrection # # -> b77 (24) -> b78 (25) -> b79 (26) # \-> b80 (25) -> b81 (26) -> b82 (27) # # b78 creates a tx, which is spent in b79. After b82, both should be in mempool # # The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the # rather obscure reason that the Python signature code does not distinguish between # Low-S and High-S values (whereas the bitcoin code has custom code which does so); # as a result of which, the odds are 50% that the python code will use the right # value and the transaction will be accepted into the mempool. Until we modify the # test framework to support low-S signing, we are out of luck. # # To get around this issue, we construct transactions which are not signed and which # spend to OP_TRUE. If the standard-ness rules change, this test would need to be # updated. (Perhaps to spend to a P2SH OP_TRUE script) # tip(76) block(77) tx77 = create_and_sign_tx(out[24].tx, out[24].n, 10 * COIN) update_block(77, [tx77]) yield accepted() save_spendable_output() block(78) tx78 = create_tx(tx77, 0, 9 * COIN) update_block(78, [tx78]) yield accepted() block(79) tx79 = create_tx(tx78, 0, 8 * COIN) update_block(79, [tx79]) yield accepted() # mempool should be empty assert_equal(len(self.nodes[0].getrawmempool()), 0) tip(77) block(80, spend=out[25]) yield rejected() save_spendable_output() block(81, spend=out[26]) yield rejected() # other chain is same length save_spendable_output() block(82, spend=out[27]) yield accepted() # now this chain is longer, triggers re-org save_spendable_output() # now check that tx78 and tx79 have been put back into the peer's # mempool mempool = self.nodes[0].getrawmempool() assert_equal(len(mempool), 2) assert (tx78.hash in mempool) assert (tx79.hash in mempool) # Test invalid opcodes in dead execution paths. # # -> b81 (26) -> b82 (27) -> b83 (28) # b83 = block(83) op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF] script = CScript(op_codes) tx1 = create_and_sign_tx(out[28].tx, out[28].n, out[28].tx.vout[0].nValue, script) tx2 = create_and_sign_tx(tx1, 0, 0, CScript([OP_TRUE])) tx2.vin[0].scriptSig = CScript([OP_FALSE]) tx2.rehash() update_block(83, [tx1, tx2]) yield accepted() save_spendable_output() # Reorg on/off blocks that have OP_RETURN in them (and try to spend them) # # -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31) # \-> b85 (29) -> b86 (30) \-> b89a (32) # # b84 = block(84) tx1 = create_tx(out[29].tx, out[29].n, 0, CScript([OP_RETURN])) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.calc_sha256() sign_tx(tx1, out[29].tx, out[29].n, self.coinbase_key) tx1.rehash() tx2 = create_tx(tx1, 1, 0, CScript([OP_RETURN])) tx2.vout.append(CTxOut(0, CScript([OP_RETURN]))) tx3 = create_tx(tx1, 2, 0, CScript([OP_RETURN])) tx3.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx4 = create_tx(tx1, 3, 0, CScript([OP_TRUE])) tx4.vout.append(CTxOut(0, CScript([OP_RETURN]))) tx5 = create_tx(tx1, 4, 0, CScript([OP_RETURN])) update_block(84, [tx1, tx2, tx3, tx4, tx5]) yield accepted() save_spendable_output() tip(83) block(85, spend=out[29]) yield rejected() block(86, spend=out[30]) yield accepted() tip(84) block(87, spend=out[30]) yield rejected() save_spendable_output() block(88, spend=out[31]) yield accepted() save_spendable_output() # trying to spend the OP_RETURN output is rejected block("89a", spend=out[32]) tx = create_tx(tx1, 0, 0, CScript([OP_TRUE])) update_block("89a", [tx]) yield rejected() # Test re-org of a week's worth of blocks (1088 blocks) # This test takes a minute or two and can be accomplished in memory # if self.options.runbarelyexpensive: tip(88) LARGE_REORG_SIZE = 1088 test1 = TestInstance(sync_every_block=False, sync_timeout=300, timeout_to_requested_block=600) spend = out[32] for i in range(89, LARGE_REORG_SIZE + 89): b = block(i, spend) tx = CTransaction() script_length = LEGACY_MAX_BLOCK_SIZE - len(b.serialize()) - 69 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0))) b = update_block(i, [tx]) assert_equal(len(b.serialize()), LEGACY_MAX_BLOCK_SIZE) test1.blocks_and_transactions.append([self.chain.tip, True]) save_spendable_output() spend = self.chain.get_spendable_output() yield test1 chain1_tip = i # now create alt chain of same length tip(88) test2 = TestInstance(sync_every_block=False) for i in range(89, LARGE_REORG_SIZE + 89): block("alt" + str(i)) test2.blocks_and_transactions.append([self.chain.tip, False]) yield test2 # extend alt chain to trigger re-org block("alt" + str(chain1_tip + 1)) yield accepted() # ... and re-org back to the first chain tip(chain1_tip) block(chain1_tip + 1) yield rejected() block(chain1_tip + 2) yield accepted() chain1_tip += 2
def run_test(self): # Turn on a webhook server self.start_webhook_server() # Create a P2P connection node = self.nodes[0] peer = NodeConnCB() connection = NodeConn('127.0.0.1', p2p_port(0), node, peer) peer.add_connection(connection) NetworkThread().start() peer.wait_for_verack() # Create an initial block with a coinbase we will split into multiple utxos initialBlock, _ = make_block(connection) coinbaseTx = initialBlock.vtx[0] send_by_headers(connection, [initialBlock], do_send_blocks=True) wait_for_tip(connection, initialBlock.hash) node.generate(101) block101hex = node.getblock(node.getbestblockhash(), False) block101dict = node.getblock(node.getbestblockhash(), 2) block101 = FromHex(CBlock(), block101hex) block101.height = block101dict['height'] block101.rehash() # Create a block with a transaction spending coinbaseTx of a previous block and making multiple outputs for future transactions to spend utxoBlock, _ = make_block(connection, parent_block=block101) utxoTx = create_tx(coinbaseTx, 0, 1 * COIN) # Create additional 48 outputs (we let 1 COIN as fee) for _ in range(48): utxoTx.vout.append(CTxOut(1 * COIN, CScript([OP_TRUE]))) # Add to block utxoTx.rehash() utxoBlock.vtx.append(utxoTx) utxoBlock.hashMerkleRoot = utxoBlock.calc_merkle_root() utxoBlock.solve() send_by_headers(connection, [utxoBlock], do_send_blocks=True) wait_for_tip(connection, utxoBlock.hash) # Make sure serialization/deserialization works as expected # Create dsdetected message. The content is not important here. dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(utxoBlock), CBlockHeader(initialBlock)], DSMerkleProof(1, utxoTx, utxoBlock.hashMerkleRoot, [MerkleProofNode(utxoBlock.vtx[0].sha256)])) ]) dsdBytes = dsdMessage.serialize() dsdMessageDeserialized = msg_dsdetected() dsdMessageDeserialized.deserialize(BytesIO(dsdBytes)) assert_equal(str(dsdMessage), str(dsdMessageDeserialized)) # Send a message containing random bytes. Webhook should not receive the notification. peer.send_and_ping(fake_msg_dsdetected()) assert_equal(self.get_JSON_notification(), None) # Create two blocks with transactions spending the same utxo blockA, _ = make_block(connection, parent_block=utxoBlock) blockB, _ = make_block(connection, parent_block=utxoBlock) blockF, _ = make_block(connection, parent_block=utxoBlock) txA = create_tx(utxoBlock.vtx[1], 0, int(0.8 * COIN)) txB = create_tx(utxoBlock.vtx[1], 0, int(0.9 * COIN)) txF = create_tx(utxoBlock.vtx[1], 0, int(0.7 * COIN)) txA.rehash() txB.rehash() txF.rehash() blockA.vtx.append(txA) blockB.vtx.append(txB) blockF.vtx.append(txF) blockA.hashMerkleRoot = blockA.calc_merkle_root() blockB.hashMerkleRoot = blockB.calc_merkle_root() blockF.hashMerkleRoot = blockF.calc_merkle_root() blockA.calc_sha256() blockB.calc_sha256() blockF.calc_sha256() blockA.solve() blockB.solve() blockF.solve() start_banscore = node.getpeerinfo()[0]['banscore'] # Webhook should not receive the notification if we send dsdetected message with only one block detail. dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message with two block details and one is containing no headers. dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [], DSMerkleProof(1, txB, blockB.hashMerkleRoot, [MerkleProofNode(blockB.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message where last headers in block details do not have a common previous block hash. dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(utxoBlock)], DSMerkleProof(1, txB, blockB.hashMerkleRoot, [MerkleProofNode(blockB.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message where block details does not have headers in proper order. dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(utxoBlock), CBlockHeader(blockB)], DSMerkleProof(1, txB, blockB.hashMerkleRoot, [MerkleProofNode(blockB.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message with the empty merkle proof. dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails([CBlockHeader(blockB)], DSMerkleProof()) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message with the wrong index in the merkle proof (merkle root validation should fail) dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockB)], DSMerkleProof(0, txB, blockB.hashMerkleRoot, [MerkleProofNode(blockB.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message with the wrong transaction in the merkle proof (merkle root validation should fail) dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockB)], DSMerkleProof(1, txA, blockB.hashMerkleRoot, [MerkleProofNode(blockB.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message with the wrong merkle root (merkle root validation should fail) dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockB)], DSMerkleProof(1, txB, blockA.hashMerkleRoot, [MerkleProofNode(blockB.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message with the wrong merkle proof (merkle root validation should fail) dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockB)], DSMerkleProof(1, txB, blockB.hashMerkleRoot, [MerkleProofNode(blockA.hashMerkleRoot)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message with the merkle proof having an additional unexpected node (merkle root validation should fail) dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails([CBlockHeader(blockB)], DSMerkleProof(1, txB, blockB.hashMerkleRoot, [ MerkleProofNode(blockB.vtx[0].sha256), MerkleProofNode(blockA.hashMerkleRoot) ])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message with the valid proof, but transaction is a coinbase transaction dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockB)], DSMerkleProof(0, blockB.vtx[0], blockB.hashMerkleRoot, [MerkleProofNode(blockB.vtx[1].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if we send dsdetected message with transactions that are not double spending # Create a block similar as before, but with a transaction spending a different utxo blockC, _ = make_block(connection, parent_block=utxoBlock) txC = create_tx(utxoBlock.vtx[1], 1, int(0.7 * COIN)) blockC.vtx.append(txC) blockC.hashMerkleRoot = blockC.calc_merkle_root() blockC.solve() dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockC)], DSMerkleProof(1, txC, blockC.hashMerkleRoot, [MerkleProofNode(blockC.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if the two double spending transactions are actually the same transaction (having same txid) # Create a block similar as before, but with a transaction spending a different utxo blockD, _ = make_block(connection, parent_block=utxoBlock) blockD.vtx.append(txA) blockD.hashMerkleRoot = blockD.calc_merkle_root() blockD.solve() dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockD)], DSMerkleProof(1, txA, blockD.hashMerkleRoot, [MerkleProofNode(blockD.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Webhook should not receive the notification if header cannot pow # note hat pow is so easy in regtest that nonce can often be hence we have to select the nonce carefully blockE, _ = make_block(connection, parent_block=utxoBlock) blockE.vtx.append(txB) blockE.hashMerkleRoot = blockE.calc_merkle_root() nonce = blockE.nNonce while True: blockE.solve() if blockE.nNonce > nonce: blockE.nNonce = nonce break nonce += 1 blockE.nNonce = nonce dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockE)], DSMerkleProof(1, txB, blockE.hashMerkleRoot, [MerkleProofNode(blockE.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) end_banscore = node.getpeerinfo()[0]['banscore'] assert ((end_banscore - start_banscore) / 10 == 13 ) # because we have 13 negative tests so far # Finally, webhook should receive the notification if we send a proper dsdetected message dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockB)], DSMerkleProof(1, txB, blockB.hashMerkleRoot, [MerkleProofNode(blockB.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) json_notification = self.get_JSON_notification() # remove diverentBlockHash so we can compare with the ds-message assert (json_notification != None) for e in json_notification['blocks']: del e['divergentBlockHash'] assert_equal(str(dsdMessage), str(msg_dsdetected(json_notification=json_notification))) # Repeat previous test but change the order of the BlockDetails, the node should identify this as a duplicate dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockB)], DSMerkleProof(1, txB, blockB.hashMerkleRoot, [MerkleProofNode(blockB.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # repeat previous test but generate many blocks in the node to age the notificatoin message. # very old notification messages shall be ignored. We use the same thresholds as safe mode. # We will hardcode this threshold for now until branch we depend on is merged node.generate(289) dsdMessage = msg_dsdetected(blocksDetails=[ BlockDetails( [CBlockHeader(blockA)], DSMerkleProof(1, txA, blockA.hashMerkleRoot, [MerkleProofNode(blockA.vtx[0].sha256)])), BlockDetails( [CBlockHeader(blockF)], DSMerkleProof(1, txF, blockF.hashMerkleRoot, [MerkleProofNode(blockF.vtx[0].sha256)])) ]) peer.send_and_ping(dsdMessage) assert_equal(self.get_JSON_notification(), None) # Create number of random valid block trees and send dsdetected P2P message for each maxNumberOfBranches = 10 maxNumberOfBlocksPerBranch = 30 for _ in range(10): blockTree = self.createRandomBlockTree(maxNumberOfBranches, maxNumberOfBlocksPerBranch, utxoBlock, [utxoBlock.vtx[1]]) dsdMessage = self.createDsDetectedMessageFromBlockTree(blockTree) peer.send_and_ping(dsdMessage) # Notification should be received as generated dsdetected message is valid json_notification = self.get_JSON_notification() # remove diverentBlockHash so we can compare with the ds-message assert (json_notification != None) for e in json_notification['blocks']: del e['divergentBlockHash'] assert_equal( str(dsdMessage), str(msg_dsdetected(json_notification=json_notification))) self.stop_webhook_server()