Esempio n. 1
0
    def check_ds_enabled(self, utxo):
        # tx1 is dsnt-enabled
        vin = [
            CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]),
                  CScript([OP_FALSE]), 0xffffffff),
        ]
        vout = [
            CTxOut(
                25,
                CScript([
                    OP_FALSE, OP_RETURN, 0x746e7364,
                    CallbackMessage(1, [LOCAL_HOST_IP], [0]).serialize()
                ]))
        ]
        tx1 = self.create_and_send_transaction(vin, vout)
        wait_until(lambda: tx1.hash in self.nodes[0].getrawmempool())

        # tx2 spends the same output as tx1 (double spend)
        vin = [
            CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]),
                  CScript([OP_FALSE]), 0xffffffff),
        ]
        vout = [CTxOut(25, CScript([OP_TRUE]))]
        tx2 = self.create_and_send_transaction(vin, vout)
        wait_until(lambda: check_for_log_msg(
            self, "txn= {} rejected txn-mempool-conflict".format(tx2.hash),
            "/node0"))
        wait_until(lambda: check_for_log_msg(
            self, "Script verification for double-spend passed", "/node0"))

        return tx1.hash
Esempio n. 2
0
    def check_ipv6(self, utxo):

        # tx1 is dsnt-enabled
        vin = [
            CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]),
                  CScript([OP_FALSE]), 0xffffffff),
        ]
        vout = [
            CTxOut(
                25,
                CScript([
                    OP_FALSE, OP_RETURN, 0x746e7364,
                    CallbackMessage(129, [LOCAL_HOST_IPV6], [0]).serialize()
                ]))
        ]
        tx1 = self.create_and_send_transaction(vin, vout)
        tx1.rehash()
        wait_until(lambda: tx1.hash in self.nodes[0].getrawmempool())

        # spend the same output as tx1 (double spend)
        vin = [
            CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]),
                  CScript([OP_FALSE]), 0xffffffff),
        ]
        vout = [CTxOut(25, CScript([OP_TRUE]))]
        tx2 = self.create_and_send_transaction(vin, vout)
        wait_until(lambda: check_for_log_msg(
            self, "txn= {} rejected txn-mempool-conflict".format(tx2.hash),
            "/node0"))
        wait_until(lambda: check_for_log_msg(self, "Submitted proof ok to ::1",
                                             "/node0"))
        wait_until(lambda: self.check_tx_received(tx1.hash))
Esempio n. 3
0
    def check_long_lasting_transactions(self):

        assert(not check_for_log_msg(self, "Script verification for double-spend cancelled", "/node0"))

        # Create funding transactions that will provide funds for other transcations
        ftx = CTransaction()
        ftx.vout.append(CTxOut(1000000, CScript([bytearray([42] * DEFAULT_SCRIPT_NUM_LENGTH_POLICY_AFTER_GENESIS), bytearray([42] * 200 * 1000), OP_MUL, OP_DROP, OP_TRUE])))
        ftxHex = self.nodes[0].fundrawtransaction(ToHex(ftx),{ 'changePosition' : len(ftx.vout)})['hex']
        ftxHex = self.nodes[0].signrawtransaction(ftxHex)['hex']
        ftx = FromHex(CTransaction(), ftxHex)
        ftx.rehash()
        self.node0.send_message(msg_tx(ftx))
        wait_until(lambda: ftx.hash in self.nodes[0].getrawmempool())
        self.nodes[0].generate(1)

        # Create transaction that depends on funding transactions that has just been submitted
        vin = [
            CTxIn(COutPoint(ftx.sha256, 0), b'')
        ]
        vout = [
            CTxOut(25, CScript([OP_FALSE, OP_RETURN, 0x746e7364, CallbackMessage(1, [LOCAL_HOST_IP], [0]).serialize()]))
        ]
        tx1 = self.create_and_send_transaction(vin, vout)
        wait_until(lambda: tx1.hash in self.nodes[0].getrawmempool())

        self.stop_node(0)
        # Restart bitcoind with parameters that reduce transaction validation time. Also set dsnotifylevel to 1, which means nonstandard transaction will not even validate.
        self.start_node(0, extra_args=['-dsendpointport=8080', '-banscore=100000', '-genesisactivationheight=1', '-maxscriptsizepolicy=0', "-maxnonstdtxvalidationduration=11", "-dsnotifylevel=1"])

        self.createConnection()
        # Create double spend of tx1
        vin = [
            CTxIn(COutPoint(ftx.sha256, 0), b'')
        ]
        vout = [
            CTxOut(25, CScript([OP_TRUE]))
        ]
        tx2 = self.create_and_send_transaction(vin, vout)
        wait_until(lambda: check_for_log_msg(self, "txn= {} rejected txn-mempool-conflict".format(tx2.hash), "/node0"))
        wait_until(lambda: check_for_log_msg(self, "Ignoring txn {} conflicting input {} because it is non-standard".format(tx2.hash, 0), "/node0"))

        self.stop_node(0)
        # Restart bitcoind with parameters that reduce transaction validation time. Also set dsnotifylevel to 2, which means nonstandard transaction will validate.
        self.start_node(0, extra_args=['-dsendpointport=8080', '-banscore=100000', '-genesisactivationheight=1', '-maxscriptsizepolicy=0', "-maxnonstdtxvalidationduration=11", "-dsnotifylevel=2"])

        self.createConnection()
        vin = [
            CTxIn(COutPoint(ftx.sha256, 0), b'')
        ]
        vout = [
            CTxOut(25, CScript([OP_TRUE]))
        ]
        tx3 = self.create_and_send_transaction(vin, vout)
        
        wait_until(lambda: check_for_log_msg(self, "Script verification for double-spend was cancelled", "/node0"))

        # Wait for the callback service to process requests
        self.check_tx_not_received(tx3.hash)
Esempio n. 4
0
    def check_doublespend_queue_size(self, utxo):
        self.stop_node(0)
        # Restart bitcoind with low limit on double-spend queue length
        self.start_node(0,
                        extra_args=[
                            '-dsendpointport=8080', '-whitelist=127.0.0.1',
                            '-genesisactivationheight=1',
                            '-maxscriptsizepolicy=0',
                            '-maxnonstdtxvalidationduration=15000',
                            '-maxtxnvalidatorasynctasksrunduration=15001',
                            '-dsattemptqueuemaxmemory=1KB', '-dsnotifylevel=2'
                        ])
        self.createConnection()

        assert (not check_for_log_msg(
            self, "Dropping new double-spend because the queue is full",
            "/node0"))

        # tx1 is dsnt-enabled
        vin = [
            CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]),
                  CScript([OP_FALSE]), 0xffffffff),
        ]
        vout = [
            CTxOut(
                25,
                CScript([
                    OP_FALSE, OP_RETURN, 0x746e7364,
                    CallbackMessage(1, [LOCAL_HOST_IP], [0]).serialize()
                ]))
        ]
        tx1 = self.create_and_send_transaction(vin, vout)
        wait_until(lambda: tx1.hash in self.nodes[0].getrawmempool())

        # tx2 spends the same output as tx1 (double spend)
        vin = [
            CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]),
                  CScript([OP_FALSE]), 0xffffffff),
        ]
        vout = [
            # Larger size than the queue is configured for
            CTxOut(25, CScript([OP_TRUE] * 1100))
        ]
        tx2 = self.create_and_send_transaction(vin, vout)
        wait_until(lambda: check_for_log_msg(
            self, "txn= {} rejected txn-mempool-conflict".format(tx2.hash),
            "/node0"))
        wait_until(lambda: check_for_log_msg(
            self, "Dropping new double-spend because the queue is full",
            "/node0"))
Esempio n. 5
0
    def check_invalid_transactions(self, utxo):

        assert (not check_for_log_msg(
            self, "Script verification for double-spend failed", "/node0"))

        vin = [
            CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]),
                  CScript([OP_FALSE]), 0xffffffff)
        ]
        vout = [
            CTxOut(
                25,
                CScript([
                    OP_FALSE, OP_RETURN, 0x746e7364,
                    CallbackMessage(1, [LOCAL_HOST_IP], [0]).serialize()
                ]))
        ]
        tx1 = self.create_and_send_transaction(vin, vout)
        wait_until(lambda: tx1.hash in self.nodes[0].getrawmempool())

        # Create tx2 manually: signrawtransaction RPC call rewrites input script.
        # Let's tweak input script to be invalid (use non push data). Double spend will still be detected and the script validated.
        tx2 = CTransaction()
        tx2.vin.append(
            CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]),
                  CScript([OP_ADD]), 0xffffffff))
        tx2.vout.append(CTxOut(25, CScript([OP_FALSE])))
        tx2.calc_sha256()
        self.node0.send_message(msg_tx(tx2))

        wait_until(lambda: check_for_log_msg(
            self, "txn= {} rejected txn-mempool-conflict".format(tx2.hash),
            "/node0"))
        wait_until(lambda: check_for_log_msg(
            self, "Script verification for double-spend failed", "/node0"))

        self.check_tx_not_received(tx1.hash)

        # Check that another correct double-spend for tx1 does trigger a notification
        vin = [
            CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]),
                  CScript([OP_FALSE]), 0xffffffff)
        ]
        vout = [CTxOut(25, CScript([OP_TRUE]))]
        tx2 = self.create_and_send_transaction(vin, vout)
        wait_until(lambda: check_for_log_msg(
            self, "txn= {} rejected txn-mempool-conflict".format(tx2.hash),
            "/node0"))
        wait_until(lambda: self.check_tx_received(tx1.hash))
    def run_test(self):
        # Start by creating some coinbases we can spend later
        self.nodes[0].generate(150)
        sync_blocks(self.nodes[0:3])

        # Connect node2 just to node1 so that it is forced to request the next blocks
        # from a slow sending peer
        disconnect_nodes_bi(self.nodes, 0, 2)
        disconnect_nodes_bi(self.nodes, 1, 2)
        connect_nodes(self.nodes, 2, 1)

        # Extend the chain with a big block and some more small blocks
        utxos = []
        mine_large_block(self.nodes[0], utxos)
        large_block_hash = self.nodes[0].getbestblockhash()
        self.nodes[0].generate(5)
        sync_blocks(self.nodes[0:2])

        # Ensure node2 has started to request the big block from slow node1
        def blockInFlight(blockNum):
            inflight = self.nodes[2].getpeerinfo()[0]["inflight"]
            return blockNum in inflight

        wait_until(lambda: blockInFlight(151), check_interval=1)

        # Reconnect node2 to node0 so that it has another option from which to fetch blocks
        connect_nodes(self.nodes, 2, 0)
        sync_blocks(self.nodes[0:3])

        # Check that a parallel fetch to node0 was triggered from node2
        assert (check_for_log_msg(
            self, "Triggering parallel block download for {}".format(
                large_block_hash), "/node2"))
Esempio n. 7
0
    def check_ds_enabled_no_proof(self, utxo):

        # tx1 is dsnt-enabled
        vin = [
            CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]),
                  CScript([OP_FALSE]), 0xffffffff)
        ]
        vout = [
            CTxOut(
                25,
                CScript([
                    OP_FALSE, OP_RETURN, 0x746e7364,
                    CallbackMessage(1, [LOCAL_HOST_IP], [0]).serialize()
                ]))
        ]
        tx1 = self.create_and_send_transaction(vin, vout)
        wait_until(lambda: tx1.hash in self.nodes[0].getrawmempool())

        # spend the same output as tx1 (double spend)
        vin = [
            CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]),
                  CScript([OP_FALSE]), 0xffffffff)
        ]
        vout = [CTxOut(25, CScript([OP_TRUE]))]
        self.create_and_send_transaction(vin, vout)
        wait_until(lambda: check_for_log_msg(
            self, "Endpoint doesn't want proof", "/node0"))
        self.check_tx_not_received(tx1.hash)
    def __test_getdata(self, node0, block_count):
        block = self.chain.next_block(block_count)
        block_count += 1
        self.log.info(f"block hash: {block.hash}")

        self.__send_blocking_validation_block(block, node0)

        receivedBlock = False

        def on_block(conn, message):
            nonlocal receivedBlock
            message.block.calc_sha256()
            if message.block.sha256 == block.sha256:
                receivedBlock = True
        node0.on_block = on_block
        node0.send_message(msg_getdata([CInv(CInv.BLOCK, int(block.hash, 16))]))

        wait_until(lambda: check_for_log_msg(self, block.hash + " is still waiting as a candidate", "/node0"))

        # remove block validating status to finish validation
        self.nodes[0].waitaftervalidatingblock(block.hash, "remove")

        def wait_for_getdata_reply():
            return receivedBlock
        wait_until(wait_for_getdata_reply)

        return block_count
Esempio n. 9
0
        def wait_for_log():
            nonsensitive_parameters = [
                "debug", "regtest=1", "excessiveblocksize=300MB"
            ]
            sensitive_parameters = [
                "rpcuser=user", "rpcpassword=password", "rpcauth=user:salt"
            ]

            for nonsensitive in nonsensitive_parameters:
                if not check_for_log_msg(self, "[main] " + nonsensitive,
                                         "/node0"):
                    return False

            for sensitive in sensitive_parameters:
                if check_for_log_msg(self, "[main] " + sensitive, "/node0"):
                    return False
            return True
Esempio n. 10
0
    def run_test(self):
        # Get out of IBD
        self.nodes[0].generate(1)
        self.sync_all()

        # Stop node so we can restart it with our connections
        self.stop_node(0)

        # Disconnect node1 and node2 for now
        disconnect_nodes_bi(self.nodes, 1, 2)

        connArgs = [ { "versionNum":MY_VERSION }, { "versionNum":70015 } ]
        with self.run_node_with_connections("Test old and new protocol versions", 0, self.nodeArgs, number_of_connections=2,
                                            connArgs=connArgs, cb_class=MyConnCB) as (newVerConn,oldVerConn):
            assert newVerConn.connected
            assert oldVerConn.connected

            # Generate small block, verify we get it over both connections
            self.nodes[0].generate(1)
            wait_until(lambda: newVerConn.cb.block_count == 1, timeout=int(30 * self.options.timeoutfactor))
            wait_until(lambda: oldVerConn.cb.block_count == 1, timeout=int(30 * self.options.timeoutfactor))

            # Get us a spendable output
            coinbase_tx = self.make_coinbase(newVerConn)
            self.nodes[0].generate(100)

            # Put some large txns into the nodes mempool until it exceeds 4GB in size
            self.create_and_send_transactions(newVerConn, coinbase_tx, 5)

            # Reconnect node0 and node2 and sync their blocks. Node2 will end up receiving the
            # large block via compact blocks
            connect_nodes(self.nodes, 0, 2)
            sync_blocks(itemgetter(0,2)(self.nodes))

            # Mine a >4GB block, verify we only get it over the new connection
            old_block_count = newVerConn.cb.block_count
            logger.info("Mining a big block")
            self.nodes[0].generate(1)
            assert(self.nodes[0].getmempoolinfo()['size'] == 0)
            logger.info("Waiting for block to arrive at test")
            wait_until(lambda: newVerConn.cb.block_count == old_block_count+1, timeout=int(1200 * self.options.timeoutfactor))

            # Look for log message saying we won't send to old peer
            wait_until(lambda: check_for_log_msg(self, "cannot be sent because it exceeds max P2P message limit", "/node0"))

            # Verify node2 gets the big block via a (not very) compact block
            wait_until(lambda: self.nodes[0].getbestblockhash() == self.nodes[2].getbestblockhash())
            peerinfo = self.nodes[2].getpeerinfo()
            assert(peerinfo[0]['bytesrecv_per_msg']['cmpctblock'] > 0)
            assert(peerinfo[0]['bytesrecv_per_msg']['blocktxn'] > 0)

            # Reconnect node0 to node1
            logger.info("Syncing bitcoind nodes to big block")
            connect_nodes(self.nodes, 0, 1)
            self.sync_all(timeout=int(1200 * self.options.timeoutfactor))

            # Verify node1 also got the big block
            assert(self.nodes[0].getbestblockhash() == self.nodes[1].getbestblockhash())
Esempio n. 11
0
    def check_multiple_callback_services(self, utxo):

        # tx1 is dsnt-enabled
        vin = [
            CTxIn(COutPoint(int(utxo[0]["txid"], 16), utxo[0]["vout"]), CScript([OP_FALSE]), 0xffffffff),
        ]
        vout = [
            CTxOut(25, CScript([OP_FALSE, OP_RETURN, 0x746e7364, CallbackMessage(1, [LOCAL_HOST_IP,WRONG_IP1,SKIP_IP,WRONG_IP2], [0]).serialize()]))
        ]
        tx1 = self.create_and_send_transaction(vin, vout)
        wait_until(lambda: tx1.hash in self.nodes[0].getrawmempool())

        # tx2 spends the same output as tx1 (double spend)
        vin = [
            CTxIn(COutPoint(int(utxo[0]["txid"], 16), utxo[0]["vout"]), CScript([OP_FALSE]), 0xffffffff),
        ]
        vout = [
            CTxOut(25, CScript([OP_TRUE]))
        ]
        tx2 = self.create_and_send_transaction(vin, vout)
        wait_until(lambda: check_for_log_msg(self, "txn= {} rejected txn-mempool-conflict".format(tx2.hash), "/node0"))
        wait_until(lambda: check_for_log_msg(self, "Submitted proof ok to 127.0.0.1 for double-spend enabled txn {}".format(tx1.hash), "/node0"))
        if(os.name == "nt"):
            wait_until(lambda: check_for_log_msg(self, "Error sending notification to endpoint 127.0.0.2", "/node0"))
        else:
            wait_until(lambda: check_for_log_msg(self, "Timeout sending slow-queue notification to endpoint 127.0.0.2", "/node0"))
        wait_until(lambda: check_for_log_msg(self, "Skipping notification to endpoint in skiplist 127.0.0.3", "/node0"))
        wait_until(lambda: check_for_log_msg(self, "Maximum number of notification endpoints reached", "/node0"))

        # tx3 has duplicate endpoint IPs
        vin = [
            CTxIn(COutPoint(int(utxo[1]["txid"], 16), utxo[1]["vout"]), CScript([OP_FALSE]), 0xffffffff),
        ]
        vout = [
            CTxOut(25, CScript([OP_FALSE, OP_RETURN, 0x746e7364, CallbackMessage(1, [LOCAL_HOST_IP,LOCAL_HOST_IP], [0]).serialize()]))
        ]
        tx3 = self.create_and_send_transaction(vin, vout)
        wait_until(lambda: tx3.hash in self.nodes[0].getrawmempool())

        # tx4 spends the same output as tx1 (double spend)
        vin = [
            CTxIn(COutPoint(int(utxo[1]["txid"], 16), utxo[1]["vout"]), CScript([OP_FALSE]), 0xffffffff),
        ]
        vout = [
            CTxOut(25, CScript([OP_TRUE]))
        ]
        tx4 = self.create_and_send_transaction(vin, vout)
        wait_until(lambda: check_for_log_msg(self, "txn= {} rejected txn-mempool-conflict".format(tx4.hash), "/node0"))
        wait_until(lambda: check_for_log_msg(self, "Submitted proof ok to 127.0.0.1 for double-spend enabled txn {}".format(tx3.hash), "/node0"))
        wait_until(lambda: check_for_log_msg(self, "Skipping notification to duplicate endpoint 127.0.0.1", "/node0"))
    def get_tests(self):
        # shorthand for functions
        block = self.chain.next_block
        node = get_rpc_proxy(self.nodes[0].url,
                             1,
                             timeout=6000,
                             coveragedir=self.nodes[0].coverage_dir)

        # Create a new block & setup initial chain with spendable outputs
        self.chain.set_genesis_hash(int(node.getbestblockhash(), 16))
        block(0)
        yield self.accepted()
        test, out, _ = prepare_init_chain(self.chain, self.num_blocks,
                                          self.num_blocks + 1)
        yield test

        # Create 1GB block
        block(1, spend=out[0], block_size=1 * ONE_GIGABYTE)
        yield self.accepted(
            420
        )  # larger timeout is needed to prevent timeouts on busy machine and debug builds

        # Create long chain of smaller blocks
        test = TestInstance(sync_every_block=False)
        for i in range(self.num_blocks):
            block(6000 + i, spend=out[i + 1], block_size=64 * ONE_KILOBYTE)
            test.blocks_and_transactions.append([self.chain.tip, True])
        yield test

        # Launch another node with config that should avoid a stall during IBD
        self.log.info("Launching extra nodes")
        self.add_node(
            2,
            extra_args=[
                '-whitelist=127.0.0.1',
                '-excessiveblocksize=%d' % (ONE_GIGABYTE * 6),
                '-blockmaxsize=%d' % (ONE_GIGABYTE * 6),
                '-maxtxsizepolicy=%d' % ONE_GIGABYTE, '-maxscriptsizepolicy=0',
                '-rpcservertimeout=1000',
                '-genesisactivationheight=%d' % self.genesisactivationheight,
                "-txindex", "-maxtipage=0", "-blockdownloadwindow=64",
                "-blockstallingtimeout=6"
            ],
            init_data_dir=True)
        self.start_node(2)

        # Connect the new nodes up so they do IBD
        self.log.info("Starting IBD")
        connect_nodes(self.nodes[0], 2)
        connect_nodes(self.nodes[1], 2)
        self.sync_all(
            timeout=240
        )  # larger timeout is needed to prevent timeouts on busy machine and debug builds

        # Check we didn't hit a stall for node2
        assert (not check_for_log_msg(self, "stalling block download",
                                      "/node2"))
    def run_test(self):
        block_count = 0

        # Create a P2P connections
        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))

        _, out, block_count = prepare_init_chain(self.chain,
                                                 101,
                                                 100,
                                                 start_block=0,
                                                 block_0=False,
                                                 node=node0)

        self.log.info("waiting for block height 101 via rpc")
        self.nodes[0].waitforblockheight(101)

        # wait till validation of block or blocks finishes
        node0.sync_with_ping()

        block1 = self.chain.next_block(block_count, spend=out[0], extra_txns=8)
        block_count += 1
        # send block but block him at validation point
        self.nodes[0].waitaftervalidatingblock(block1.hash, "add")
        node0.send_message(msg_block(block1))
        self.log.info(f"block1 hash: {block1.hash}")

        # make sure block hash is in waiting list
        wait_for_waiting_blocks({block1.hash}, self.nodes[0], self.log)

        # send child block
        block2 = self.chain.next_block(block_count,
                                       spend=out[1],
                                       extra_txns=10)
        block_count += 1
        node0.send_message(msg_block(block2))
        self.log.info(f"block2 hash: {block2.hash}")

        wait_until(lambda: check_for_log_msg(
            self, block2.hash + " will not be considered by the current",
            "/node0"))

        self.nodes[0].waitaftervalidatingblock(block1.hash, "remove")

        # wait till validation of block or blocks finishes
        node0.sync_with_ping()

        # block that arrived last on competing chain should be active
        assert_equal(block2.hash, self.nodes[0].getbestblockhash())
Esempio n. 14
0
    def check_ds_mempool_txn(self, utxo):

        # tx1
        vin = [
            CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]),
                  CScript([OP_FALSE]), 0xffffffff)
        ]
        vout = [CTxOut(10000, CScript([OP_TRUE]))]
        tx1 = self.create_and_send_transaction(vin, vout)
        wait_until(lambda: tx1.hash in self.nodes[0].getrawmempool())

        # tx2 is dsnt-enabled & spends tx1 while still in the mempool
        vin = [
            CTxIn(COutPoint(tx1.sha256, 0), CScript([OP_DROP, OP_TRUE]),
                  0xffffffff)
        ]
        vout = [
            CTxOut(
                100,
                CScript([
                    OP_FALSE, OP_RETURN, 0x746e7364,
                    CallbackMessage(1, [LOCAL_HOST_IP], [0]).serialize()
                ]))
        ]
        tx2 = self.create_and_send_transaction(vin, vout)
        wait_until(lambda: tx2.hash in self.nodes[0].getrawmempool())

        # tx3 spends the same output as tx2 (double spend)
        vin = [
            CTxIn(COutPoint(tx1.sha256, 0), CScript([OP_DROP, OP_TRUE]),
                  0xffffffff)
        ]
        vout = [CTxOut(100, CScript([OP_TRUE]))]
        tx3 = self.create_and_send_transaction(vin, vout)
        wait_until(lambda: check_for_log_msg(
            self, "txn= {} rejected txn-mempool-conflict".format(tx3.hash),
            "/node0"))
        wait_until(lambda: check_for_log_msg(
            self, "Verifying script for txn {}".format(tx3.hash), "/node0"))
        wait_until(lambda: self.check_tx_received(tx2.hash))
Esempio n. 15
0
    def run_test(self):
        block_count = 0

        # Create a P2P connections
        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        node1 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node1)
        node1.add_connection(connection)

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()
        node1.wait_for_verack()

        # send one to get out of IBD state
        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))
        block = self.chain.next_block(block_count)
        block_count += 1
        node0.send_message(msg_block(block))

        self.nodes[0].waitforblockheight(1)

        block = self.chain.next_block(block_count)

        # set block validating status to wait after validation
        self.nodes[0].waitaftervalidatingblock(block.hash, "add")

        # make sure block hashes are in waiting list
        wait_for_waiting_blocks({block.hash}, self.nodes[0], self.log)

        node0.send_message(msg_block(block))
        node1.send_message(msg_block(block))

        # make sure we started validating blocks.
        # One is validating the other is ignored.
        wait_for_validating_blocks({block.hash}, self.nodes[0], self.log)

        # wait for the log of the ignored block.
        wait_until(lambda: check_for_log_msg(self, block.hash + " will not be considered by the current", "/node0"))

        # remove block validating status to finish validation
        self.nodes[0].waitaftervalidatingblock(block.hash, "remove")

        # wait till validation of block finishes
        node0.sync_with_ping()

        self.nodes[0].waitforblockheight(2)
        assert_equal(block.hash, self.nodes[0].getbestblockhash())
Esempio n. 16
0
    def run_test(self):
        # Create all the connections we will need to node0 at the start because they all need to be
        # setup before we call NetworkThread().start()

        # Create a P2P connection just so that the test framework is happy we're connected
        dummyCB = NodeConnCB()
        dummyConn = NodeConn('127.0.0.1',
                             p2p_port(0),
                             self.nodes[0],
                             dummyCB,
                             nullAssocID=True)
        dummyCB.add_connection(dummyConn)

        # By setting the assocID on this second NodeConn we prevent it sending a version message
        badConnCB = TestNode()
        badConn = NodeConn('127.0.0.1',
                           p2p_port(0),
                           self.nodes[0],
                           badConnCB,
                           assocID=0x01)
        badConnCB.add_connection(badConn)

        # Start up network handling in another thread. This needs to be called
        # after the P2P connections have been created.
        NetworkThread().start()

        # Check initial state
        dummyCB.wait_for_protoconf()
        with mininode_lock:
            assert_equal(len(badConnCB.message_count), 0)

        # Send a badly formatted version message
        badConn.send_message(msg_version_bad())
        # Connection will be closed with a reject
        wait_until(lambda: badConnCB.last_reject is not None,
                   lock=mininode_lock,
                   timeout=5)
        wait_until(lambda: badConn.state == "closed",
                   lock=mininode_lock,
                   timeout=5)

        # Check clear log message was generated
        assert check_for_log_msg(
            self, "Failed to process version: (Badly formatted association ID",
            "/node0")
Esempio n. 17
0
    def __test_getblocks(self, node0, block_count):
        block1 = self.chain.next_block(block_count)
        block_count += 1
        self.log.info(f"block1 hash: {block1.hash}")

        self.__send_blocking_validation_block(block1, node0)

        receivedBlock = False

        def on_block(conn, message):
            nonlocal receivedBlock
            message.block.calc_sha256()
            if message.block.sha256 == block1.sha256:
                receivedBlock = True

        node0.on_block = on_block
        node0.send_message(msg_getblocks())

        block2 = self.chain.next_block(block_count)
        block_count += 1
        self.log.info(f"block2 hash: {block2.hash}")

        self.__send_blocking_validation_waiting_block(block2, node0)

        wait_until(lambda: check_for_log_msg(
            self, "Blocks that were received before getblocks message",
            "/node0"))

        # remove block validating status to finish validation
        self.nodes[0].waitaftervalidatingblock(block1.hash, "remove")

        def wait_for_getblocks_reply():
            return receivedBlock

        wait_until(wait_for_getblocks_reply)

        # remove block validating status to finish validation
        self.nodes[0].waitaftervalidatingblock(block2.hash, "remove")

        # wait till validation of block finishes
        node0.sync_with_ping()

        return block_count
Esempio n. 18
0
    def check_ds_not_enabled(self, utxo, output):

        vin = [
            CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]),
                  CScript([OP_FALSE]), 0xffffffff)
        ]
        vout = [CTxOut(25, output)]
        tx1 = self.create_and_send_transaction(vin, vout)
        wait_until(lambda: tx1.hash in self.nodes[0].getrawmempool())

        vin = [
            CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]),
                  CScript([OP_FALSE]), 0xffffffff)
        ]
        vout = [CTxOut(25, CScript([OP_TRUE]))]
        tx2 = self.create_and_send_transaction(vin, vout)

        # double spend has been detected
        wait_until(lambda: check_for_log_msg(
            self, "txn= {} rejected txn-mempool-conflict".format(tx2.hash),
            "/node0"))

        self.check_tx_not_received(tx1.hash)
Esempio n. 19
0
    def get_tests(self):
        # shorthand for functions
        block = self.chain.next_block
        node = get_rpc_proxy(self.nodes[0].url, 1, timeout=6000, coveragedir=self.nodes[0].coverage_dir)

        # Create a new block
        self.chain.set_genesis_hash(int(node.getbestblockhash(), 16))
        block(0)
        self.chain.save_spendable_output()
        yield self.accepted()

        # Now we need that block to mature so we can spend the coinbase.
        test = TestInstance(sync_every_block=False)
        for i in range(self.num_blocks):
            block(5000 + i)
            test.blocks_and_transactions.append([self.chain.tip, True])
            self.chain.save_spendable_output()
        yield test

        # Collect spendable outputs now to avoid cluttering the code later on
        out = []
        for i in range(self.num_blocks + 1):
            out.append(self.chain.get_spendable_output())

        # Create 1GB block
        block(1, spend=out[0], block_size=1*ONE_GIGABYTE)
        yield self.accepted()

        # Create long chain of smaller blocks
        test = TestInstance(sync_every_block=False)
        for i in range(self.num_blocks):
            block(6000 + i, spend=out[i + 1], block_size=64*ONE_KILOBYTE)
            test.blocks_and_transactions.append([self.chain.tip, True])
        yield test

        # Launch another node with config that should avoid a stall during IBD
        self.log.info("Launching extra nodes")
        self.add_node(2, extra_args = [
                                    '-whitelist=127.0.0.1',
                                    '-excessiveblocksize=%d' % (ONE_GIGABYTE * 6),
                                    '-blockmaxsize=%d' % (ONE_GIGABYTE * 6),
                                    '-maxtxsizepolicy=%d' % ONE_GIGABYTE * 2,
                                    '-maxscriptsizepolicy=0',
                                    '-rpcservertimeout=1000',
                                    '-genesisactivationheight=%d' % self.genesisactivationheight,
                                    "-txindex",
                                    "-maxtipage=0",
                                    "-blockdownloadwindow=64",
                                    "-blockstallingtimeout=6"
                                      ],
                      init_data_dir=True)
        self.start_node(2)

        # Connect the new nodes up so they do IBD
        self.log.info("Starting IBD")
        connect_nodes(self.nodes[0], 2)
        connect_nodes(self.nodes[1], 2)
        self.sync_all(timeout=120)

        # Check we didn't hit a stall for node2
        assert(not check_for_log_msg("stalling block download", self.options.tmpdir + "/node2"))
Esempio n. 20
0
    def get_tests(self):

        # shorthand for functions
        block = self.chain.next_block
        node = self.nodes[0]
        self.chain.set_genesis_hash(int(node.getbestblockhash(), 16))

        block(0)
        yield self.accepted()

        test, out, _ = prepare_init_chain(self.chain, 100, 100)

        yield test

        # Create transaction with OP_RETURN in the locking script.
        tx1 = create_transaction(out[1].tx, out[1].n, b"", 100000,
                                 CScript([OP_RETURN]))
        self.test.connections[0].send_message(msg_tx(tx1))
        # wait for transaction processing
        wait_until(lambda: tx1.hash in self.nodes[0].getrawmempool(),
                   timeout=10)
        # Mine block (height 102) with new transaction.
        self.nodes[0].generate(1)
        # Obtain newly mined block. It should contain new transaction tx1.
        tx = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['tx']
        assert_equal(len(tx), 2)
        assert_equal(tx1.hash, tx[1])
        self.log.info("Created transaction %s on height %d", tx1.hash,
                      self.genesisactivationheight - 2)

        # Create transaction with OP_TRUE in the unlocking that tries to spend tx1.
        tx2 = create_transaction(tx1, 0, b'\x51', 1, CScript([OP_TRUE]))
        self.test.connections[0].send_message(msg_tx(tx2))
        # wait for tx to be orphaned
        wait_until(lambda: check_for_log_msg(self, "stored orphan txn= " + tx2.
                                             hash, "/node0"))
        # Mine block (height 103).
        self.nodes[0].generate(1)
        # Obtain newly mined block. It should NOT contain new transaction tx2.
        tx = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['tx']
        assert_equal(len(tx), 1)
        self.log.info(
            "Created transaction %s on height %d that tries to spend transaction on height %d",
            tx2.hash, self.genesisactivationheight - 1,
            self.genesisactivationheight - 2)

        # Create transaction with OP_RETURN in the locking script.
        tx3 = create_transaction(out[2].tx, out[2].n, b"", 100000,
                                 CScript([OP_RETURN]))
        self.test.connections[0].send_message(msg_tx(tx3))
        # Create transaction with OP_TRUE in the unlocking that tries to spend tx3.
        tx4 = create_transaction(tx3, 0, b'\x51', 1, CScript([OP_TRUE]))
        self.test.connections[0].send_message(msg_tx(tx4))
        # Make sure transactions are in mempool
        wait_until(lambda: len(self.nodes[0].getrawmempool()) >= 2, timeout=10)
        assert ({tx3.hash, tx4.hash} == set(self.nodes[0].getrawmempool()))

        # Mine block (height 104) with new transactions.
        self.nodes[0].generate(1)
        # Obtain newly mined block. It should contain new transactions tx3 and tx4.
        tx = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['tx']
        assert_equal(len(tx), 3)
        assert_equal(tx3.hash, tx[1])
        assert_equal(tx4.hash, tx[2])
        self.log.info(
            "Created transactions %s and %s on height %d that tries to spend transaction on height %d",
            tx3.hash, tx4.hash, self.genesisactivationheight,
            self.genesisactivationheight)
Esempio n. 21
0
 def check_ds_enabled_error_msg(self, utxo, log_msg):
     assert (not check_for_log_msg(self, log_msg, "/node0"))
     tx_hash = self.check_ds_enabled(utxo)
     wait_until(lambda: check_for_log_msg(self, log_msg, "/node0"),
                timeout=70)
     return tx_hash
Esempio n. 22
0
    def run_test(self):

        self.nodes[0].generate(110)
        utxo = self.nodes[0].listunspent()

        self.stop_node(0)
        with self.run_node_with_connections("Server returning 400", 0,
                                            ['-dsendpointport=8080'],
                                            1) as p2p_connections:
            # Turn on CallbackService.
            handler = partial(CallbackService, RECEIVE.YES,
                              STATUS.CLIENT_ERROR, RESPONSE_TIME.FAST,
                              FLAG.YES)
            self.server = HTTPServer(('localhost', 8080), handler)
            self.start_server()
            self.conn = httplib.HTTPConnection(self.callback_service)

            self.node0 = p2p_connections[0]
            self.check_ds_enabled_error_msg(utxo[0],
                                            "Got 400 response from endpoint")

            self.kill_server()

        with self.run_node_with_connections("Server returning 500", 0,
                                            ['-dsendpointport=8080'],
                                            1) as p2p_connections:
            # Turn on CallbackService.
            handler = partial(CallbackService, RECEIVE.YES,
                              STATUS.SERVER_ERROR, RESPONSE_TIME.FAST,
                              FLAG.YES)
            self.server = HTTPServer(('localhost', 8080), handler)
            self.start_server()
            self.conn = httplib.HTTPConnection(self.callback_service)

            self.node0 = p2p_connections[0]
            self.check_ds_enabled_error_msg(utxo[1],
                                            "Got 500 response from endpoint")
            self.check_ds_enabled_error_msg(
                utxo[2], "Skipping notification to blacklisted endpoint")

            self.kill_server()

        with self.run_node_with_connections("Server is slow, but functional",
                                            0, ['-dsendpointport=8080'],
                                            1) as p2p_connections:
            # Turn on CallbackService.
            handler = partial(CallbackService, RECEIVE.YES, STATUS.SUCCESS,
                              RESPONSE_TIME.SLOW, FLAG.YES)
            self.server = HTTPServer(('localhost', 8080), handler)
            self.start_server()
            self.conn = httplib.HTTPConnection(self.callback_service)

            self.node0 = p2p_connections[0]
            tx_hash = self.check_ds_enabled_error_msg(
                utxo[3],
                "Timeout sending notification to endpoint 127.0.0.1, resubmitting to the slow queue"
            )

            wait_until(lambda: self.check_tx_received(tx_hash))

            self.kill_server()

        with self.run_node_with_connections(
                "Server is consistently slow, but functional", 0,
            ['-dsendpointport=8080', '-dsendpointslowrateperhour=2'],
                1) as p2p_connections:
            # Turn on CallbackService.
            handler = partial(CallbackService, RECEIVE.YES, STATUS.SUCCESS,
                              RESPONSE_TIME.SLOW, FLAG.YES)
            self.server = HTTPServer(('localhost', 8080), handler)
            self.start_server()
            self.conn = httplib.HTTPConnection(self.callback_service)
            self.node0 = p2p_connections[0]

            tx_hash = self.check_ds_enabled(utxo[0])
            wait_until(lambda: check_for_log_msg(
                self,
                "Started tracking stats for a new potentially slow endpoint 127.0.0.1",
                "/node0"))
            wait_until(lambda: self.check_tx_received(tx_hash))
            tx_hash = self.check_ds_enabled(utxo[1])
            wait_until(lambda: check_for_log_msg(
                self,
                "Updated stats for potentially slow endpoint 127.0.0.1, is slow: 0",
                "/node0"))
            wait_until(lambda: self.check_tx_received(tx_hash))
            tx_hash = self.check_ds_enabled(utxo[2])
            wait_until(lambda: check_for_log_msg(
                self,
                "Updated stats for potentially slow endpoint 127.0.0.1, is slow: 1",
                "/node0"))
            wait_until(lambda: self.check_tx_received(tx_hash))
            tx_hash = self.check_ds_enabled(utxo[3])
            wait_until(lambda: check_for_log_msg(
                self,
                "Endpoint 127.0.0.1 is currently slow, submitting via the slow queue",
                "/node0"))
            wait_until(lambda: self.check_tx_received(tx_hash))

            self.kill_server()

        with self.run_node_with_connections(
                "Server is too slow, bitcoind ignores it", 0,
            ['-dsendpointport=8080'], 1) as p2p_connections:
            # Turn on CallbackService.
            handler = partial(CallbackService, RECEIVE.YES, STATUS.SUCCESS,
                              RESPONSE_TIME.SLOWEST, FLAG.YES)
            self.server = HTTPServer(('localhost', 8080), handler)
            self.start_server()
            self.conn = httplib.HTTPConnection(self.callback_service)

            self.node0 = p2p_connections[0]
            tx_hash = self.check_ds_enabled_error_msg(
                utxo[4],
                "Timeout sending slow-queue notification to endpoint 127.0.0.1"
            )

            self.check_tx_not_received(tx_hash)

            self.kill_server()

        with self.run_node_with_connections(
                "Server has no x-bsv-dsnt in header", 0,
            ['-dsendpointport=8080'], 1) as p2p_connections:
            # Turn on CallbackService.
            handler = partial(CallbackService, RECEIVE.YES, STATUS.SUCCESS,
                              RESPONSE_TIME.FAST, FLAG.NO)
            self.server = HTTPServer(('localhost', 8080), handler)
            self.start_server()
            self.conn = httplib.HTTPConnection(self.callback_service)

            self.node0 = p2p_connections[0]
            tx_hash = self.check_ds_enabled_error_msg(
                utxo[5], "Missing x-bsv-dsnt header in response from endpoint")

            self.check_tx_not_received(tx_hash)

            self.kill_server()
Esempio n. 23
0
    def check_ds_enabled(self, utxo):

        # tx1 is dsnt-enabled
        vin = [
            CTxIn(COutPoint(int(utxo[0]["txid"], 16), utxo[0]["vout"]),
                  CScript([OP_FALSE]), 0xffffffff),
            CTxIn(COutPoint(int(utxo[1]["txid"], 16), utxo[1]["vout"]),
                  CScript([OP_FALSE]), 0xffffffff),
            CTxIn(COutPoint(int(utxo[3]["txid"], 16), utxo[3]["vout"]),
                  CScript([OP_FALSE]), 0xffffffff)
        ]
        vout = [
            # inputs 0 and 2 are valid, input 9 is out of range
            CTxOut(
                25,
                CScript([
                    OP_FALSE, OP_RETURN, 0x746e7364,
                    CallbackMessage(1, [LOCAL_HOST_IP], [0, 2, 9]).serialize()
                ]))
        ]
        tx1 = self.create_and_send_transaction(vin, vout)
        wait_until(lambda: tx1.hash in self.nodes[0].getrawmempool())

        # tx2 spends the same output as tx1 (double spend)
        vin = [
            CTxIn(COutPoint(int(utxo[0]["txid"], 16), utxo[0]["vout"]),
                  CScript([OP_FALSE]), 0xffffffff),
            CTxIn(COutPoint(int(utxo[2]["txid"], 16), utxo[2]["vout"]),
                  CScript([OP_FALSE]), 0xffffffff),
            CTxIn(COutPoint(int(utxo[3]["txid"], 16), utxo[3]["vout"]),
                  CScript([OP_FALSE]), 0xffffffff)
        ]
        vout = [CTxOut(25, CScript([OP_TRUE]))]
        tx2 = self.create_and_send_transaction(vin, vout)
        wait_until(lambda: check_for_log_msg(
            self, "txn= {} rejected txn-mempool-conflict".format(tx2.hash),
            "/node0"))
        wait_until(lambda: check_for_log_msg(
            self, "Script verification for double-spend passed", "/node0"))
        wait_until(lambda: self.check_tx_received(tx1.hash))

        # again spend the same output as tx1 and tx2 (double spend) --> callback service is not notified twice
        vin = [
            CTxIn(COutPoint(int(utxo[0]["txid"], 16), utxo[0]["vout"]),
                  CScript([OP_FALSE]), 0xffffffff),
        ]
        vout = [CTxOut(25, CScript([OP_TRUE]))]
        self.create_and_send_transaction(vin, vout)
        wait_until(lambda: check_for_log_msg(
            self, "Already notified about txn {}".format(tx1.hash), "/node0"))

        # tx4 is not dsnt enabled, and gets accepted to the mempool
        vin = [
            CTxIn(COutPoint(int(utxo[2]["txid"], 16), utxo[2]["vout"]),
                  CScript([OP_FALSE]), 0xffffffff),
        ]
        vout = [CTxOut(25, CScript([OP_TRUE]))]
        tx4 = self.create_and_send_transaction(vin, vout)
        wait_until(lambda: tx4.hash in self.nodes[0].getrawmempool())

        # tx5 is dsnt enabled and double spends tx4. In this case because the txn in the mempool is not
        # dsnt enabled, then the callback service specified by tx5 will be notified about tx4.
        # Also test notification enabled output in position other than 0.
        vin = [
            CTxIn(COutPoint(int(utxo[2]["txid"], 16), utxo[2]["vout"]),
                  CScript([OP_FALSE]), 0xffffffff),
        ]
        vout = [
            CTxOut(25, CScript([OP_TRUE])),
            CTxOut(
                25,
                CScript([
                    OP_FALSE, OP_RETURN, 0x746e7364,
                    CallbackMessage(1, [LOCAL_HOST_IP], [0]).serialize()
                ]))
        ]
        tx5 = self.create_and_send_transaction(vin, vout)
        wait_until(lambda: check_for_log_msg(
            self, "txn= {} rejected txn-mempool-conflict".format(tx5.hash),
            "/node0"))
        wait_until(lambda: check_for_log_msg(
            self, "Txn {} is DS notification enabled on output 1".format(
                tx5.hash), "/node0"))
        wait_until(lambda: self.check_tx_received(tx5.hash))

        # tx6 is dsnt enabled and double spends tx1. Both the txn in the mempool and the double-spend are dsnt
        # enabled, so in this case just the callback service specified by tx1 will get notified about tx6
        # (first seen rule).
        reset_proofs()
        self.check_tx_not_received(tx1.hash)
        vin = [
            CTxIn(COutPoint(int(utxo[3]["txid"], 16), utxo[3]["vout"]),
                  CScript([OP_FALSE]), 0xffffffff),
        ]
        vout = [
            CTxOut(
                25,
                CScript([
                    OP_FALSE, OP_RETURN, 0x746e7364,
                    CallbackMessage(1, [LOCAL_HOST_IP], [0]).serialize()
                ]))
        ]
        tx6 = self.create_and_send_transaction(vin, vout)
        wait_until(lambda: check_for_log_msg(
            self, "txn= {} rejected txn-mempool-conflict".format(tx6.hash),
            "/node0"))
        wait_until(lambda: self.check_tx_received(tx1.hash))

        # tx7 is dsnt-enabled on multiple outputs
        vin = [
            CTxIn(COutPoint(int(utxo[4]["txid"], 16), utxo[4]["vout"]),
                  CScript([OP_FALSE]), 0xffffffff),
            CTxIn(COutPoint(int(utxo[5]["txid"], 16), utxo[5]["vout"]),
                  CScript([OP_FALSE]), 0xffffffff),
        ]
        vout = [
            CTxOut(
                25,
                CScript([
                    OP_FALSE, OP_RETURN, 0x746e7364,
                    CallbackMessage(1, [LOCAL_HOST_IP], [0]).serialize()
                ])),
            CTxOut(
                25,
                CScript([
                    OP_FALSE, OP_RETURN, 0x746e7364,
                    CallbackMessage(1, [LOCAL_HOST_IP], [1]).serialize()
                ]))
        ]
        tx7 = self.create_and_send_transaction(vin, vout)
        wait_until(lambda: tx7.hash in self.nodes[0].getrawmempool())

        # tx8 spends the same outputs as tx7 (double spend)
        vin = [
            CTxIn(COutPoint(int(utxo[4]["txid"], 16), utxo[4]["vout"]),
                  CScript([OP_FALSE]), 0xffffffff),
            CTxIn(COutPoint(int(utxo[5]["txid"], 16), utxo[5]["vout"]),
                  CScript([OP_FALSE]), 0xffffffff),
        ]
        vout = [CTxOut(25, CScript([OP_TRUE]))]
        tx8 = self.create_and_send_transaction(vin, vout)
        wait_until(lambda: check_for_log_msg(
            self, "txn= {} rejected txn-mempool-conflict".format(tx8.hash),
            "/node0"))
        wait_until(lambda: self.check_tx_received(tx7.hash))
        wait_until(lambda: check_for_log_msg(
            self, "Txn {} is DS notification enabled on output 0".format(
                tx7.hash), "/node0"))
        assert (not check_for_log_msg(
            self, "Txn {} is DS notification enabled on output 1".format(
                tx7.hash), "/node0"))
Esempio n. 24
0
    def run_test(self):

        self.stop_node(0)

        with self.run_node_with_connections(
                "reject headers if previous block is missing", 0, [],
                self.num_peers) as p2p_connections:

            connection = p2p_connections[0]
            coinbase_height = 1

            # 1. Create first block.
            block_0 = prepareBlock(coinbase_height,
                                   self.nodes[0].getbestblockhash())

            # 2. Connection sends HEADERS msg to bitcoind and waits for GETDATA.
            headers_message = msg_headers()
            headers_message.headers = [CBlockHeader(block_0)]
            connection.cb.send_message(headers_message)
            connection.cb.wait_for_getdata()
            wait_until(lambda: connection.cb.last_message["getdata"].inv[0].
                       hash == block_0.sha256)

            # 3. Connection sends BLOCK to bitcoind.
            connection.cb.send_message(msg_block(block_0))

            # 4. Bitcoind adds block to active chain.
            wait_for_tip(self.nodes[0], block_0.hash)

            # 5. Create two chained blocks.
            block_1 = prepareBlock(coinbase_height + 1, block_0.hash)
            block_2 = prepareBlock(coinbase_height + 2, block_1.hash)

            # 6. Connection sends HEADERS of the second block to bitcoind. It should be rejected.
            headers_message = msg_headers()
            headers_message.headers = [CBlockHeader(block_2)]
            connection.cb.send_message(headers_message)
            wait_until(lambda: check_for_log_msg(
                self, "received header " + block_2.hash +
                ": missing prev block", "/node0"))

            # 7. Connection sends HEADERS of the first block to bitcoind. It should be accepted.
            headers_message = msg_headers()
            headers_message.headers = [CBlockHeader(block_1)]
            connection.cb.send_message(headers_message)
            wait_until(lambda: connection.cb.last_message["getdata"].inv[0].
                       hash == block_1.sha256)

            # 8. Connection sends HEADERS of the second block to bitcoind. It should be accepted now that previous block is known.
            headers_message = msg_headers()
            headers_message.headers = [CBlockHeader(block_2)]
            connection.cb.send_message(headers_message)
            wait_until(lambda: connection.cb.last_message["getdata"].inv[0].
                       hash == block_2.sha256)

            # 9. Try to send alternative Genesis block (no previous block). It should be rejected.
            genesis_block = create_block(hashprev=0,
                                         coinbase=create_coinbase(
                                             height=0, outputValue=25))
            genesis_block.solve()
            connection.cb.send_message(msg_block(genesis_block))
            wait_until(lambda: check_for_log_msg(
                self, "ERROR: FindPreviousBlockIndex: prev block not found",
                "/node0"))