def run_test(self):
        with self.run_node_with_connections("Preparation", 0, None,
                                            2) as (conn1, conn2):
            last_block_time = 0
            conn1.rpc.generate(1)

            branch_1_root, last_block_time = make_block(
                conn1, last_block_time=last_block_time)
            branch_1_blocks = [branch_1_root]
            for _ in range(SAFE_MODE_MAX_FORK_DISTANCE):
                new_block, last_block_time = make_block(
                    conn1,
                    branch_1_blocks[-1],
                    last_block_time=last_block_time)
                branch_1_blocks.append(new_block)

            branch_2_root, last_block_time = make_block(
                conn2, last_block_time=last_block_time)
            branch_2_blocks = [branch_2_root]
            for _ in range(SAFE_MODE_MAX_FORK_DISTANCE +
                           SAFE_MODE_MIN_POW_DIFFERENCE + 2):
                new_block, last_block_time = make_block(
                    conn2,
                    branch_2_blocks[-1],
                    last_block_time=last_block_time)
                branch_2_blocks.append(new_block)

            # send main branch that should be active tip
            send_by_headers(conn1,
                            branch_1_blocks[:SAFE_MODE_MAX_FORK_DISTANCE],
                            do_send_blocks=True)

            # send alternative branch - headers only
            send_by_headers(conn2, branch_2_blocks, do_send_blocks=False)

            # active tip is one before last block from branch 1 and branch 2 has status headers-only
            wait_for_tip(conn1, branch_1_blocks[-2].hash)
            wait_for_tip_status(conn1, branch_2_blocks[-1].hash,
                                "headers-only")

            # we should entered the safe mode with UNKNOWN as alternative branch is more than 6 blocks ahead
            # and still in max range of SAFE_MODE_MAX_FORK_DISTANCE blocks
            try:
                conn1.rpc.getbalance()
                assert False, "Should not come to here, should raise exception in line above."
            except JSONRPCException as e:
                assert e.error[
                    "message"] == "Safe mode: Warning: The network does not appear to fully agree! We received headers of a large fork. Still waiting for block data for more details."

            # add one more block to active chain
            send_by_headers(conn1,
                            branch_1_blocks[SAFE_MODE_MAX_FORK_DISTANCE:],
                            do_send_blocks=True)

            # active tip is last block from branch 1
            wait_for_tip(conn1, branch_1_blocks[-1].hash)

            # alternative chain is now more than 288 blocks away so we should exit safe mode
            conn1.rpc.getbalance()
Пример #2
0
    def run_test(self):

        # check tip statuses after node start
        wait_for_tip(
            self.nodes[0],
            "7da1d835f7759f97958fb878d040f25847e4c43c1081781bdf23e4d4eafb641d")
        wait_for_tip_status(
            self.nodes[0],
            "058af9eeaf5cc2916afd0e4cc37efa7dedc5038d3826e728b73eef050569a517",
            "valid-fork")
        wait_for_tip_status(
            self.nodes[0],
            "129cae8395e28cdf8acda1e78853d45b037b4945edc7f13e799db2ab5354488f",
            "invalid")
        wait_for_tip_status(
            self.nodes[0],
            "4fdd65af7b30f80d97b89d7584ac66a6e5d5f81ce971b218b7380202a076146c",
            "invalid")
    def run_test(self):
        with self.run_node_with_connections("Preparation", 0, None,
                                            2) as (conn1, conn2):
            last_block_time = 0
            conn1.rpc.generate(1)

            branch_1_root, last_block_time = make_block(
                conn1, last_block_time=last_block_time)
            branch_1_blocks = [branch_1_root]
            for _ in range(SAFE_MODE_DEFAULT_MIN_FORK_LENGTH + 1):
                new_block, last_block_time = make_block(
                    conn1,
                    branch_1_blocks[-1],
                    last_block_time=last_block_time)
                branch_1_blocks.append(new_block)

            branch_2_root, last_block_time = make_block(
                conn2, last_block_time=last_block_time)
            branch_2_blocks = [branch_2_root]
            for _ in range(SAFE_MODE_DEFAULT_MAX_FORK_DISTANCE):
                new_block, last_block_time = make_block(
                    conn2,
                    branch_2_blocks[-1],
                    last_block_time=last_block_time)
                branch_2_blocks.append(new_block)

            # send first branch that should be active tip
            send_by_headers(conn1, branch_1_blocks, do_send_blocks=True)

            # wait for active tip
            wait_for_tip(conn1, branch_1_blocks[-1].hash)

            # send second branch with more POW
            send_by_headers(
                conn2,
                branch_2_blocks[:SAFE_MODE_DEFAULT_MIN_FORK_LENGTH + 3],
                do_send_blocks=True)

            # active tip is from branch 2 and branch 1 has status valid-fork
            wait_for_tip(
                conn1,
                branch_2_blocks[SAFE_MODE_DEFAULT_MIN_FORK_LENGTH + 2].hash)
            wait_for_tip_status(conn1, branch_1_blocks[-1].hash, "valid-fork")

            # we should entered the safe mode with VALID because there is a valid fork with SAFE_MODE_DEFAULT_MIN_VALID_FORK_POW pow
            # and last common block is less than SAFE_MODE_DEFAULT_MAX_VALID_FORK_DISTANCE from active tip
            assert conn1.rpc.getsafemodeinfo()["safemodeenabled"]

            # send more blockst of second branch
            send_by_headers(conn1,
                            branch_2_blocks[SAFE_MODE_DEFAULT_MIN_FORK_LENGTH +
                                            3:],
                            do_send_blocks=True)

            # active tip is last block from branch 2
            wait_for_tip(conn1, branch_2_blocks[-1].hash)

            # we should exit safe mode because fork base is too far from active tip
            assert not conn1.rpc.getsafemodeinfo()["safemodeenabled"]
Пример #4
0
    def run_test(self):
        with self.run_node_with_connections("Preparation", 0, None, 2) as (conn1, conn2):
            last_block_time = 0
            conn1.rpc.generate(1)

            branch_1_root, last_block_time = make_block(conn1, last_block_time = last_block_time)
            branch_1_blocks = [branch_1_root]
            for _ in range(SAFE_MODE_MIN_VALID_FORK_LENGTH + 1):
                new_block, last_block_time = make_block(conn1, branch_1_blocks[-1], last_block_time = last_block_time)
                branch_1_blocks.append(new_block)

            branch_2_root, last_block_time = make_block(conn2, last_block_time = last_block_time)
            branch_2_blocks = [branch_2_root]
            for _ in range(SAFE_MODE_MAX_VALID_FORK_DISTANCE):
                new_block, last_block_time = make_block(conn2, branch_2_blocks[-1], last_block_time = last_block_time)
                branch_2_blocks.append(new_block)

            # send first branch that should be active tip
            send_by_headers(conn1, branch_1_blocks, do_send_blocks=True)

            # wait for active tip
            wait_for_tip(conn1, branch_1_blocks[-1].hash)

            # send second branch with more POW
            send_by_headers(conn2, branch_2_blocks[:SAFE_MODE_MIN_VALID_FORK_LENGTH + 3], do_send_blocks=True)

            # active tip is from branch 2 and branch 1 has status valid-fork
            wait_for_tip(conn1, branch_2_blocks[SAFE_MODE_MIN_VALID_FORK_LENGTH + 2].hash)
            wait_for_tip_status(conn1, branch_1_blocks[-1].hash, "valid-fork")

            # we should entered the safe mode with VALID because there is a valid fork with SAFE_MODE_MIN_VALID_FORK_POW pow
            # and last common block is less than SAFE_MODE_MAX_VALID_FORK_DISTANCE from active tip
            try:
                conn1.rpc.getbalance()
                assert False, "Should not come to here, should raise exception in line above."
            except JSONRPCException as e:
                assert e.error["message"] == "Safe mode: Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues. A large valid fork has been detected."

            # send more blockst of second branch
            send_by_headers(conn1, branch_2_blocks[SAFE_MODE_MIN_VALID_FORK_LENGTH + 3:], do_send_blocks=True)

            # active tip is last block from branch 2
            wait_for_tip(conn1,branch_2_blocks[-1].hash)

            # we should exit safe mode because fork base is too far from active tip
            conn1.rpc.getbalance()
    def run_test(self):
        with self.run_node_with_connections("Preparation", 0, None,
                                            2) as (conn1, conn2):
            last_block_time = 0
            conn1.rpc.generate(1)

            branch_1_root, last_block_time = make_block(
                conn1, last_block_time=last_block_time)
            branch_1_blocks = [branch_1_root]
            for _ in range(SAFE_MODE_MAX_FORK_DISTANCE):
                new_block, last_block_time = make_block(
                    conn1,
                    branch_1_blocks[-1],
                    last_block_time=last_block_time)
                branch_1_blocks.append(new_block)

            branch_2_root, last_block_time = make_block(
                conn2, makeValid=False, last_block_time=last_block_time)
            branch_2_blocks = [branch_2_root]
            for _ in range(SAFE_MODE_MAX_FORK_DISTANCE +
                           SAFE_MODE_MIN_POW_DIFFERENCE + 1):
                new_block, last_block_time = make_block(
                    conn2,
                    branch_2_blocks[-1],
                    last_block_time=last_block_time)
                branch_2_blocks.append(new_block)

            # send first branch that should be active tip
            send_by_headers(conn1, branch_1_blocks, do_send_blocks=True)

            # wait for active tip
            wait_for_tip(conn1, branch_1_blocks[-1].hash)

            # send second branch with more POW
            send_by_headers(conn2, branch_2_blocks, do_send_blocks=False)

            # active tip should be from first branch and second branch should have headers-only status
            wait_for_tip(conn1, branch_1_blocks[-1].hash)
            wait_for_tip_status(conn1, branch_2_blocks[-1].hash,
                                "headers-only")

            # we should not be in safe mode
            conn1.rpc.getbalance()

            # From time to time this test can run faster than expected and
            # the older blocks for batch 2 headers are not yet requested.
            # In that case they will be rejected due to being too far away
            # form the tip. In that case we need to send them again once they
            # are requested.
            def on_getdata(conn, msg):
                for i in msg.inv:
                    if i.type != 2:  # MSG_BLOCK
                        error_msg = f"Unexpected data requested {i}"
                        self.log.error(error_msg)
                        raise NotImplementedError(error_msg)
                    for block in branch_2_blocks:
                        if int(block.hash, 16) == i.hash:
                            conn.send_message(msg_block(block))
                            break

            conn2.cb.on_getdata = on_getdata

            # send sencond branch full blocks
            for block in branch_2_blocks:
                conn2.send_message(msg_block(block))

            # second branch should now be invalid
            wait_for_tip_status(conn1, branch_2_blocks[-1].hash, "invalid")
            wait_for_tip(conn1, branch_1_blocks[-1].hash)

            # we should not be in safe mode
            conn1.rpc.getbalance()
Пример #6
0
    def run_test(self):
        with self.run_node_with_connections("Preparation", 0, None,
                                            2) as (conn1, conn2):
            last_block_time = 0
            conn1.rpc.generate(1)

            branch_1_root, last_block_time = make_block(
                conn1, last_block_time=last_block_time)
            branch_1_blocks = [branch_1_root]
            for _ in range(10):
                new_block, last_block_time = make_block(
                    conn1,
                    branch_1_blocks[-1],
                    last_block_time=last_block_time)
                branch_1_blocks.append(new_block)

            branch_2_root, last_block_time = make_block(
                conn2, makeValid=False, last_block_time=last_block_time)
            branch_2_blocks = [branch_2_root]
            for _ in range(30):
                new_block, last_block_time = make_block(
                    conn2,
                    branch_2_blocks[-1],
                    last_block_time=last_block_time)
                branch_2_blocks.append(new_block)

            # send main branch that should be active tip
            send_by_headers(conn1, branch_1_blocks, do_send_blocks=True)

            # send block header of the first block of branch 2 but not send block itself
            send_by_headers(conn2, branch_2_blocks[:1], do_send_blocks=False)

            # send first half of the blocks from the second branch
            send_by_headers(conn2, branch_2_blocks[1:20], do_send_blocks=True)

            # active tip is last block from branch 1 and branch 2 has status headers-only
            wait_for_tip(conn1, branch_1_blocks[-1].hash)
            wait_for_tip_status(conn1, branch_2_blocks[19].hash,
                                "headers-only")

            # we should entered the safe mode with UNKNOWN because we don't have data of the first block
            try:
                conn1.rpc.getbalance()
                assert False, "Should not come to here, should raise exception in line above."
            except JSONRPCException as e:
                assert e.error[
                    "message"] == "Safe mode: Warning: The network does not appear to fully agree! We received headers of a large fork. Still waiting for block data for more details."

            # send headers only for the rest of the second branch
            send_by_headers(conn2, branch_2_blocks[20:], do_send_blocks=False)

            # we should remain in the safe mode with UNKNOWN
            try:
                conn1.rpc.getbalance()
                assert False, "Should not come to here, should raise exception in line above."
            except JSONRPCException as e:
                assert e.error[
                    "message"] == "Safe mode: Warning: The network does not appear to fully agree! We received headers of a large fork. Still waiting for block data for more details."

            # send contents of first block of second branch
            # this block is invalid and should invalidate whole second branch
            conn2.send_message(msg_block(branch_2_blocks[0]))

            # make sure that block is processed before doing any aserts by waiting for reject
            # we cannot use sync_with_ping here because we sent invalid block and connection will be banned and closed
            conn2.cb.wait_for_reject()

            # active tip should still be from branch 1 and branch 2 should be invalid
            wait_for_tip(conn1, branch_1_blocks[-1].hash)
            wait_for_tip_status(conn1, branch_2_blocks[-1].hash, "invalid")

            # safe mode message should have now changed - we have invalid chain that triggers safe mode
            try:
                conn1.rpc.getbalance()
                assert False, "Should not come to here, should raise exception in line above."
            except JSONRPCException as e:
                assert e.error[
                    "message"] == "Safe mode: Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade. A large invalid fork has been detected."

            # add more blocks to active chain so fork will no longer have more than SAFE_MODE_MIN_POW_DIFFERENCE blocks
            new_block, last_block_time = make_block(
                conn1, branch_1_blocks[-1], last_block_time=last_block_time)
            branch_1_aditional_blocks = [new_block]
            for _ in range(20 - SAFE_MODE_MIN_POW_DIFFERENCE):
                new_block, last_block_time = make_block(
                    conn1,
                    branch_1_aditional_blocks[-1],
                    last_block_time=last_block_time)
                branch_1_aditional_blocks.append(new_block)

            # send additional blocks with data to active chain
            send_by_headers(conn1,
                            branch_1_aditional_blocks,
                            do_send_blocks=True)

            # check that active tip is from branch 1
            wait_for_tip(conn1, branch_1_aditional_blocks[-1].hash)

            # we are not in the Safe mode any more fork is no longer 6 blocks ahead of
            # active chain
            conn1.rpc.getbalance()
Пример #7
0
    def run_test(self):
        self.log.info("Generating initial blockchain")
        self.nodes[0].generate(1)
        sync_blocks(self.nodes)
        self.nodes[1].generate(1)
        sync_blocks(self.nodes)
        self.nodes[2].generate(1)
        sync_blocks(self.nodes)
        self.nodes[3].generate(100)
        sync_blocks(self.nodes)

        assert_equal(self.nodes[0].getbalance(), 50)
        assert_equal(self.nodes[1].getbalance(), 50)
        assert_equal(self.nodes[2].getbalance(), 50)
        assert_equal(self.nodes[3].getbalance(), 0)

        self.log.info("Creating transactions")
        # Five rounds of sending each other transactions.
        for i in range(5):
            self.do_one_round()

        self.log.info("Backing up")
        tmpdir = self.options.tmpdir
        self.nodes[0].backupwallet(tmpdir + "/node0/wallet.bak")
        self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.dump")
        self.nodes[1].backupwallet(tmpdir + "/node1/wallet.bak")
        self.nodes[1].dumpwallet(tmpdir + "/node1/wallet.dump")
        self.nodes[2].backupwallet(tmpdir + "/node2/wallet.bak")
        self.nodes[2].dumpwallet(tmpdir + "/node2/wallet.dump")

        self.log.info("More transactions")
        for i in range(5):
            self.do_one_round()

        # Generate 101 more blocks, so any fees paid mature
        self.nodes[3].generate(101)
        self.sync_all()

        balance0 = self.nodes[0].getbalance()
        balance1 = self.nodes[1].getbalance()
        balance2 = self.nodes[2].getbalance()
        balance3 = self.nodes[3].getbalance()
        total = balance0 + balance1 + balance2 + balance3

        # At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
        # 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
        assert_equal(total, 5700)

        # Store bect block hash
        best_block_hash = self.nodes[0].getbestblockhash()
        assert_equal(best_block_hash, self.nodes[1].getbestblockhash())
        assert_equal(best_block_hash, self.nodes[2].getbestblockhash())
        assert_equal(best_block_hash, self.nodes[3].getbestblockhash())

        ##
        # Test restoring spender wallets from backups
        ##
        self.log.info("Restoring using wallet.dat")
        self.stop_three()
        self.erase_three()

        # Start node2 with no chain
        shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
        shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")

        # Restore wallets from backup
        shutil.copyfile(tmpdir + "/node0/wallet.bak",
                        tmpdir + "/node0/regtest/wallet.dat")
        shutil.copyfile(tmpdir + "/node1/wallet.bak",
                        tmpdir + "/node1/regtest/wallet.dat")
        shutil.copyfile(tmpdir + "/node2/wallet.bak",
                        tmpdir + "/node2/regtest/wallet.dat")

        self.log.info("Re-starting nodes")
        self.start_three()
        sync_blocks(self.nodes)

        assert_equal(self.nodes[0].getbalance(), balance0)
        assert_equal(self.nodes[1].getbalance(), balance1)
        assert_equal(self.nodes[2].getbalance(), balance2)

        self.log.info("Restoring using dumped wallet")
        self.stop_three()
        self.erase_three()

        # start node2 with no chain
        shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
        shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")

        self.start_three()

        # Make sure the nodes are synced
        wait_for_tip(self.nodes[0], best_block_hash)
        wait_for_tip(self.nodes[1], best_block_hash)
        wait_for_tip(self.nodes[2], best_block_hash)
        wait_for_tip(self.nodes[3], best_block_hash)

        assert_equal(self.nodes[0].getbalance(), 0)
        assert_equal(self.nodes[1].getbalance(), 0)
        assert_equal(self.nodes[2].getbalance(), 0)

        self.nodes[0].importwallet(tmpdir + "/node0/wallet.dump")
        self.nodes[1].importwallet(tmpdir + "/node1/wallet.dump")
        self.nodes[2].importwallet(tmpdir + "/node2/wallet.dump")

        sync_blocks(self.nodes)

        assert_equal(self.nodes[0].getbalance(), balance0)
        assert_equal(self.nodes[1].getbalance(), balance1)
        assert_equal(self.nodes[2].getbalance(), balance2)

        # Backup to source wallet file must fail
        sourcePaths = [
            tmpdir + "/node0/regtest/wallet.dat",
            tmpdir + "/node0/./regtest/wallet.dat", tmpdir + "/node0/regtest/",
            tmpdir + "/node0/regtest"
        ]

        for sourcePath in sourcePaths:
            assert_raises_rpc_error(-4, "backup failed",
                                    self.nodes[0].backupwallet, sourcePath)
Пример #8
0
    def run_test_case(self,
                      description,
                      order=1,
                      wait=False,
                      numberOfSafeModeLevelChanges=1):

        self.log.info("Running test case: %s", description)

        # Remove test folder to start building chain from the beginning for each case
        if os.path.exists(os.path.join(self.nodes[0].datadir, "regtest")):
            shutil.rmtree(os.path.join(self.nodes[0].datadir, "regtest"))

        with self.run_node_with_connections(description, 0, None,
                                            3) as (conn1, conn2, conn3):
            last_block_time = 0
            conn1.rpc.generate(1)

            branch_1_root, last_block_time = make_block(
                conn1, last_block_time=last_block_time)
            branch_1_blocks = [branch_1_root]
            for _ in range(10):
                new_block, last_block_time = make_block(
                    conn1,
                    branch_1_blocks[-1],
                    last_block_time=last_block_time)
                branch_1_blocks.append(new_block)

            branch_2_root, last_block_time = make_block(
                conn2, last_block_time=last_block_time)
            branch_2_blocks = [branch_2_root]
            for _ in range(20):
                new_block, last_block_time = make_block(
                    conn2,
                    branch_2_blocks[-1],
                    last_block_time=last_block_time)
                branch_2_blocks.append(new_block)

            branch_3_root, last_block_time = make_block(
                conn3, last_block_time=last_block_time)

            if order == 1:
                self.send_branches(
                    {
                        'conn': conn1,
                        'blocks': branch_1_blocks,
                        'do_send_blocks': True
                    }, {
                        'conn': conn2,
                        'blocks': branch_2_blocks,
                        'do_send_blocks': False
                    }, wait)
            else:
                self.send_branches(
                    {
                        'conn': conn2,
                        'blocks': branch_2_blocks,
                        'do_send_blocks': False
                    }, {
                        'conn': conn1,
                        'blocks': branch_1_blocks,
                        'do_send_blocks': True
                    }, wait)

            # active tip is last block from branch 1 and branch 2 has status headers-only
            wait_for_tip(conn1, branch_1_blocks[-1].hash)
            wait_for_tip_status(conn2, branch_2_blocks[-1].hash,
                                "headers-only")

            assert conn1.rpc.getsafemodeinfo(
            )["safemodeenabled"], "We should be in the safe mode"

            def wait_for_log():
                safeModeChanges = 0
                line_text = "WARNING: Safe mode level changed"
                for line in open(
                        glob.glob(self.options.tmpdir + "/node0" +
                                  "/regtest/bitcoind.log")[0]):
                    if line_text in line:
                        self.log.info("Found line: %s", line)
                        safeModeChanges += 1
                        if safeModeChanges == numberOfSafeModeLevelChanges:
                            return True
                return False

            wait_until(wait_for_log)

            conn2.send_message(msg_block(branch_2_blocks[0]))
            conn2.cb.sync_with_ping()

            # send block from the third branch
            conn3.send_message(msg_block(branch_3_root))
            conn3.cb.sync_with_ping()

            # we should still be in safe mode
            assert conn1.rpc.getsafemodeinfo(
            )["safemodeenabled"], "We should be in the safe mode"
    def run_test(self):

        self.PORT = 8765
        self.webhook_messages = []
        self.server = HTTPServer(('', self.PORT), self.make_handler)
        self.start_server()

        args = [
            f"-safemodewebhookurl=http://127.0.0.1:{self.PORT}/safemode",
        ]

        with self.run_node_with_connections("Test Reorg", 0, args,
                                            2) as (conn, conn2):
            conn.rpc.generate(1)

            root_block, root_block_time = make_block(conn, last_block_time=0)
            self.last_block_time = root_block_time
            send_by_headers(conn, [root_block], do_send_blocks=True)
            wait_for_tip(conn, root_block.hash)

            # the main chain, just enough to be able to riger the safe mode after reorg
            main_chain = self.make_chain(conn, root_block,
                                         SAFE_MODE_DEFAULT_MIN_FORK_LENGTH)
            expected_main_chain_fork_data = {
                "forkfirstblock": main_chain[0].hash,
                "tips": {main_chain[-1].hash},
                "lastcommonblock": root_block.hash
            }

            # the new chain, just enough to be able to triger the reorg
            new_chain = self.make_chain(conn, root_block, len(main_chain) + 1)
            expected_new_chain_fork_data = {
                "forkfirstblock": new_chain[0].hash,
                "tips": {new_chain[-1].hash},
                "lastcommonblock": root_block.hash
            }

            # sending the main chain
            send_by_headers(conn, main_chain, do_send_blocks=True)
            wait_for_tip(conn, main_chain[-1].hash)

            # send headers of the new chain and verify that we are in the safe mode
            send_by_headers(conn, new_chain, do_send_blocks=False)
            wait_for_tip_status(conn, new_chain[-1].hash, "headers-only")
            self.wait_for_safe_mode_data(conn.rpc,
                                         [expected_new_chain_fork_data])
            self.check_last_webhook_msg_reorged_from(None)
            self.webhook_messages = []

            # now send blocks of the new chain
            for bl in new_chain:
                conn.send_message(msg_block(bl))

            # a reorg happened, tip should be at last block of the new chain
            wait_for_tip(conn, new_chain[-1].hash)
            # still in the safe mode, but fork is the main chain
            self.wait_for_safe_mode_data(conn.rpc,
                                         [expected_main_chain_fork_data])
            # last block caused an reorg, check if got correct notification
            self.check_last_webhook_msg_reorged_from(main_chain[-1].hash,
                                                     len(main_chain))

            # extending the new chain, just enough to be able to triger the safe mode after sending headers
            new_chain_extension = self.make_chain(
                conn, new_chain[-1], SAFE_MODE_DEFAULT_MIN_FORK_LENGTH)
            expected_new_chain_ext_fork_data = {
                "forkfirstblock": new_chain_extension[0].hash,
                "tips": {new_chain_extension[-1].hash},
                "lastcommonblock": new_chain[-1].hash
            }

            # sending the new chain extension
            send_by_headers(conn, new_chain_extension, do_send_blocks=False)
            wait_for_tip_status(conn, new_chain_extension[-1].hash,
                                "headers-only")
            # two forks main chain from before and new chain extension
            self.wait_for_safe_mode_data(conn.rpc, [
                expected_main_chain_fork_data,
                expected_new_chain_ext_fork_data,
            ])
            # no reorg
            self.check_last_webhook_msg_reorged_from(None)

            # now send blocks of the new chain extension
            for bl in new_chain_extension:
                conn.send_message(msg_block(bl))
            # the tip has advanced
            wait_for_tip(conn, new_chain_extension[-1].hash)
            self.wait_for_safe_mode_data(conn.rpc, [
                expected_main_chain_fork_data,
            ])
            # still no reorg
            self.check_last_webhook_msg_reorged_from(None)

            # invalidating firs block of the new chain extension
            conn.rpc.invalidateblock(new_chain_extension[0].hash)
            # rolled back
            wait_for_tip(conn, new_chain[-1].hash)
            self.wait_for_safe_mode_data(conn.rpc, [
                expected_main_chain_fork_data,
                expected_new_chain_ext_fork_data,
            ])
            # rolling back is qualified as an reorg
            self.check_last_webhook_msg_reorged_from(
                new_chain_extension[-1].hash, len(new_chain_extension))

        self.kill_server()
Пример #10
0
    def run_test(self):

        # Turn on a webhook server
        self.start_webhook_server()

        # Create a P2P connection
        node = self.nodes[0]
        peer = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), node, peer)
        peer.add_connection(connection)
        NetworkThread().start()
        peer.wait_for_verack()

        # Create an initial block with a coinbase we will split into multiple utxos
        initialBlock, _ = make_block(connection)
        coinbaseTx = initialBlock.vtx[0]

        send_by_headers(connection, [initialBlock], do_send_blocks=True)
        wait_for_tip(connection, initialBlock.hash)

        node.generate(101)
        block101hex = node.getblock(node.getbestblockhash(), False)
        block101dict = node.getblock(node.getbestblockhash(), 2)
        block101 = FromHex(CBlock(), block101hex)
        block101.height = block101dict['height']
        block101.rehash()

        # Create a block with a transaction spending coinbaseTx of a previous block and making multiple outputs for future transactions to spend
        utxoBlock, _ = make_block(connection, parent_block=block101)
        utxoTx = create_tx(coinbaseTx, 0, 1 * COIN)

        # Create additional 48 outputs (we let 1 COIN as fee)
        for _ in range(48):
            utxoTx.vout.append(CTxOut(1 * COIN, CScript([OP_TRUE])))
        # Add to block
        utxoTx.rehash()

        utxoBlock.vtx.append(utxoTx)
        utxoBlock.hashMerkleRoot = utxoBlock.calc_merkle_root()
        utxoBlock.solve()

        send_by_headers(connection, [utxoBlock], do_send_blocks=True)
        wait_for_tip(connection, utxoBlock.hash)

        # Make sure serialization/deserialization works as expected
        # Create dsdetected message. The content is not important here.
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(utxoBlock),
                 CBlockHeader(initialBlock)],
                DSMerkleProof(1, utxoTx, utxoBlock.hashMerkleRoot,
                              [MerkleProofNode(utxoBlock.vtx[0].sha256)]))
        ])
        dsdBytes = dsdMessage.serialize()
        dsdMessageDeserialized = msg_dsdetected()
        dsdMessageDeserialized.deserialize(BytesIO(dsdBytes))
        assert_equal(str(dsdMessage), str(dsdMessageDeserialized))

        # Send a message containing random bytes. Webhook should not receive the notification.
        peer.send_and_ping(fake_msg_dsdetected())
        assert_equal(self.get_JSON_notification(), None)

        # Create two blocks with transactions spending the same utxo
        blockA, _ = make_block(connection, parent_block=utxoBlock)
        blockB, _ = make_block(connection, parent_block=utxoBlock)
        blockF, _ = make_block(connection, parent_block=utxoBlock)
        txA = create_tx(utxoBlock.vtx[1], 0, int(0.8 * COIN))
        txB = create_tx(utxoBlock.vtx[1], 0, int(0.9 * COIN))
        txF = create_tx(utxoBlock.vtx[1], 0, int(0.7 * COIN))
        txA.rehash()
        txB.rehash()
        txF.rehash()
        blockA.vtx.append(txA)
        blockB.vtx.append(txB)
        blockF.vtx.append(txF)
        blockA.hashMerkleRoot = blockA.calc_merkle_root()
        blockB.hashMerkleRoot = blockB.calc_merkle_root()
        blockF.hashMerkleRoot = blockF.calc_merkle_root()
        blockA.calc_sha256()
        blockB.calc_sha256()
        blockF.calc_sha256()
        blockA.solve()
        blockB.solve()
        blockF.solve()

        start_banscore = node.getpeerinfo()[0]['banscore']

        # Webhook should not receive the notification if we send dsdetected message with only one block detail.
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Webhook should not receive the notification if we send dsdetected message with two block details and one is containing no headers.
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails(
                [],
                DSMerkleProof(1, txB, blockB.hashMerkleRoot,
                              [MerkleProofNode(blockB.vtx[0].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Webhook should not receive the notification if we send dsdetected message where last headers in block details do not have a common previous block hash.
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails(
                [CBlockHeader(utxoBlock)],
                DSMerkleProof(1, txB, blockB.hashMerkleRoot,
                              [MerkleProofNode(blockB.vtx[0].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Webhook should not receive the notification if we send dsdetected message where block details does not have headers in proper order.
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails(
                [CBlockHeader(utxoBlock),
                 CBlockHeader(blockB)],
                DSMerkleProof(1, txB, blockB.hashMerkleRoot,
                              [MerkleProofNode(blockB.vtx[0].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Webhook should not receive the notification if we send dsdetected message with the empty merkle proof.
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails([CBlockHeader(blockB)], DSMerkleProof())
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Webhook should not receive the notification if we send dsdetected message with the wrong index in the merkle proof (merkle root validation should fail)
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails(
                [CBlockHeader(blockB)],
                DSMerkleProof(0, txB, blockB.hashMerkleRoot,
                              [MerkleProofNode(blockB.vtx[0].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Webhook should not receive the notification if we send dsdetected message with the wrong transaction in the merkle proof (merkle root validation should fail)
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails(
                [CBlockHeader(blockB)],
                DSMerkleProof(1, txA, blockB.hashMerkleRoot,
                              [MerkleProofNode(blockB.vtx[0].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Webhook should not receive the notification if we send dsdetected message with the wrong merkle root (merkle root validation should fail)
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails(
                [CBlockHeader(blockB)],
                DSMerkleProof(1, txB, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockB.vtx[0].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Webhook should not receive the notification if we send dsdetected message with the wrong merkle proof (merkle root validation should fail)
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails(
                [CBlockHeader(blockB)],
                DSMerkleProof(1, txB, blockB.hashMerkleRoot,
                              [MerkleProofNode(blockA.hashMerkleRoot)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Webhook should not receive the notification if we send dsdetected message with the merkle proof having an additional unexpected node (merkle root validation should fail)
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails([CBlockHeader(blockB)],
                         DSMerkleProof(1, txB, blockB.hashMerkleRoot, [
                             MerkleProofNode(blockB.vtx[0].sha256),
                             MerkleProofNode(blockA.hashMerkleRoot)
                         ]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Webhook should not receive the notification if we send dsdetected message with the valid proof, but transaction is a coinbase transaction
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails(
                [CBlockHeader(blockB)],
                DSMerkleProof(0, blockB.vtx[0], blockB.hashMerkleRoot,
                              [MerkleProofNode(blockB.vtx[1].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Webhook should not receive the notification if we send dsdetected message with transactions that are not double spending
        # Create a block similar as before, but with a transaction spending a different utxo
        blockC, _ = make_block(connection, parent_block=utxoBlock)
        txC = create_tx(utxoBlock.vtx[1], 1, int(0.7 * COIN))
        blockC.vtx.append(txC)
        blockC.hashMerkleRoot = blockC.calc_merkle_root()
        blockC.solve()
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails(
                [CBlockHeader(blockC)],
                DSMerkleProof(1, txC, blockC.hashMerkleRoot,
                              [MerkleProofNode(blockC.vtx[0].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Webhook should not receive the notification if the two double spending transactions are actually the same transaction (having same txid)
        # Create a block similar as before, but with a transaction spending a different utxo
        blockD, _ = make_block(connection, parent_block=utxoBlock)
        blockD.vtx.append(txA)
        blockD.hashMerkleRoot = blockD.calc_merkle_root()
        blockD.solve()
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails(
                [CBlockHeader(blockD)],
                DSMerkleProof(1, txA, blockD.hashMerkleRoot,
                              [MerkleProofNode(blockD.vtx[0].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Webhook should not receive the notification if header cannot pow
        # note hat pow is so easy in regtest that nonce can often be hence we have to select the nonce carefully
        blockE, _ = make_block(connection, parent_block=utxoBlock)
        blockE.vtx.append(txB)
        blockE.hashMerkleRoot = blockE.calc_merkle_root()
        nonce = blockE.nNonce
        while True:
            blockE.solve()
            if blockE.nNonce > nonce:
                blockE.nNonce = nonce
                break
            nonce += 1
            blockE.nNonce = nonce

        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails(
                [CBlockHeader(blockE)],
                DSMerkleProof(1, txB, blockE.hashMerkleRoot,
                              [MerkleProofNode(blockE.vtx[0].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        end_banscore = node.getpeerinfo()[0]['banscore']
        assert ((end_banscore - start_banscore) / 10 == 13
                )  # because we have 13 negative tests so far

        # Finally, webhook should receive the notification if we send a proper dsdetected message
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails(
                [CBlockHeader(blockB)],
                DSMerkleProof(1, txB, blockB.hashMerkleRoot,
                              [MerkleProofNode(blockB.vtx[0].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        json_notification = self.get_JSON_notification()
        # remove diverentBlockHash so we can compare with the ds-message
        assert (json_notification != None)
        for e in json_notification['blocks']:
            del e['divergentBlockHash']
        assert_equal(str(dsdMessage),
                     str(msg_dsdetected(json_notification=json_notification)))

        # Repeat previous test but change the order of the BlockDetails, the node should identify this as a duplicate
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockB)],
                DSMerkleProof(1, txB, blockB.hashMerkleRoot,
                              [MerkleProofNode(blockB.vtx[0].sha256)])),
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # repeat previous test but generate many blocks in the node to age the notificatoin message.
        # very old notification messages shall be ignored. We use the same thresholds as safe mode.
        # We will hardcode this threshold for now until branch we depend on is merged
        node.generate(289)
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails(
                [CBlockHeader(blockF)],
                DSMerkleProof(1, txF, blockF.hashMerkleRoot,
                              [MerkleProofNode(blockF.vtx[0].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Create number of random valid block trees and send dsdetected P2P message for each
        maxNumberOfBranches = 10
        maxNumberOfBlocksPerBranch = 30
        for _ in range(10):
            blockTree = self.createRandomBlockTree(maxNumberOfBranches,
                                                   maxNumberOfBlocksPerBranch,
                                                   utxoBlock,
                                                   [utxoBlock.vtx[1]])
            dsdMessage = self.createDsDetectedMessageFromBlockTree(blockTree)
            peer.send_and_ping(dsdMessage)
            # Notification should be received as generated dsdetected message is valid
            json_notification = self.get_JSON_notification()
            # remove diverentBlockHash so we can compare with the ds-message
            assert (json_notification != None)
            for e in json_notification['blocks']:
                del e['divergentBlockHash']
            assert_equal(
                str(dsdMessage),
                str(msg_dsdetected(json_notification=json_notification)))

        self.stop_webhook_server()
Пример #11
0
    def run_test(self):

        self.stop_node(0)

        with self.run_node_with_connections(
                "reject headers if previous block is missing", 0, [],
                self.num_peers) as p2p_connections:

            connection = p2p_connections[0]
            coinbase_height = 1

            # 1. Create first block.
            block_0 = prepareBlock(coinbase_height,
                                   self.nodes[0].getbestblockhash())

            # 2. Connection sends HEADERS msg to bitcoind and waits for GETDATA.
            headers_message = msg_headers()
            headers_message.headers = [CBlockHeader(block_0)]
            connection.cb.send_message(headers_message)
            connection.cb.wait_for_getdata()
            wait_until(lambda: connection.cb.last_message["getdata"].inv[0].
                       hash == block_0.sha256)

            # 3. Connection sends BLOCK to bitcoind.
            connection.cb.send_message(msg_block(block_0))

            # 4. Bitcoind adds block to active chain.
            wait_for_tip(self.nodes[0], block_0.hash)

            # 5. Create two chained blocks.
            block_1 = prepareBlock(coinbase_height + 1, block_0.hash)
            block_2 = prepareBlock(coinbase_height + 2, block_1.hash)

            # 6. Connection sends HEADERS of the second block to bitcoind. It should be rejected.
            headers_message = msg_headers()
            headers_message.headers = [CBlockHeader(block_2)]
            connection.cb.send_message(headers_message)
            wait_until(lambda: check_for_log_msg(
                self, "received header " + block_2.hash +
                ": missing prev block", "/node0"))

            # 7. Connection sends HEADERS of the first block to bitcoind. It should be accepted.
            headers_message = msg_headers()
            headers_message.headers = [CBlockHeader(block_1)]
            connection.cb.send_message(headers_message)
            wait_until(lambda: connection.cb.last_message["getdata"].inv[0].
                       hash == block_1.sha256)

            # 8. Connection sends HEADERS of the second block to bitcoind. It should be accepted now that previous block is known.
            headers_message = msg_headers()
            headers_message.headers = [CBlockHeader(block_2)]
            connection.cb.send_message(headers_message)
            wait_until(lambda: connection.cb.last_message["getdata"].inv[0].
                       hash == block_2.sha256)

            # 9. Try to send alternative Genesis block (no previous block). It should be rejected.
            genesis_block = create_block(hashprev=0,
                                         coinbase=create_coinbase(
                                             height=0, outputValue=25))
            genesis_block.solve()
            connection.cb.send_message(msg_block(genesis_block))
            wait_until(lambda: check_for_log_msg(
                self, "ERROR: FindPreviousBlockIndex: prev block not found",
                "/node0"))
Пример #12
0
    def run_rest_case(self, min_fork_len, max_height_difference,
                      max_fork_distance):

        args = [
            f"-safemodemaxforkdistance={max_fork_distance}",
            f"-safemodeminforklength={min_fork_len}",
            f"-safemodeminblockdifference={max_height_difference}",
            f"-safemodewebhookurl=http://127.0.0.1:{self.PORT}/safemode",
        ]

        with self.run_node_with_connections("Preparation", 0, args,
                                            2) as (conn1, conn2):
            conn1.rpc.generate(1)

            root_block, root_block_time = make_block(conn1, last_block_time=0)
            self.last_block_time = root_block_time
            send_by_headers(conn1, [root_block], do_send_blocks=True)
            wait_for_tip(conn1, root_block.hash)

            # We will create
            # ========================================================
            #  mc -> main chain mc[N] is active tip
            #  sf -> short fork
            #  df -> distant fork
            #  ld -> low height difference fork
            #
            #  |--------------max_fork_distance------------------------|
            # root - mc[0] - mc[1] - mc[2] - mc[3] - ... - mc[N-1] -  mc[N]
            #  |         \                                          \
            #  |          \                                           sf[0] - sf[1] - ... -sf[N]
            #  |           \                                          |-----min_fork_len------|
            #   \           \
            #    \           ld[0] - ... - ld[N]
            #     \                          |---max_height_difference---|  -> (if negative ld[N] is behind active tip, infront otherwise)
            #      \
            #       \
            #        \
            #         df[0] - df[1] - ... df[N]
            #

            # the main chain, make it long enough to be able to create distant fork
            main_chain = self.make_chain(conn1, root_block, max_fork_distance)

            # the distant fork, last common block is at limit of acceptance
            distant_fork_len = max(
                max_fork_distance + max_height_difference,
                min_fork_len) + 10  # make it longer than neccesary
            distant_fork = self.make_chain(conn1, root_block, distant_fork_len)
            expected_distant_fork_data = {
                "forkfirstblock": distant_fork[0].hash,
                "tips": {distant_fork[-1].hash},
                "lastcommonblock": root_block.hash
            }

            # the short fork, fork with minimal acceptable length
            short_fork = self.make_chain(conn1, main_chain[-2], min_fork_len)
            expected_short_fork_data = {
                "forkfirstblock": short_fork[0].hash,
                "tips": {short_fork[-1].hash},
                "lastcommonblock": main_chain[-2].hash
            }

            # the low height difference fork; a fork whose tip is at minimal acceptable height relative to the chain tip
            low_height_difference_fork_len = len(
                main_chain
            ) + max_height_difference - 1  # minus 1 is beacause we are starting at first block of the main chain
            low_height_difference_fork = self.make_chain(
                conn1, main_chain[0], low_height_difference_fork_len)
            expected_low_height_difference_fork_data = {
                "forkfirstblock": low_height_difference_fork[0].hash,
                "tips": {low_height_difference_fork[-1].hash},
                "lastcommonblock": main_chain[0].hash
            }

            # send main branch that should be active chain
            send_by_headers(conn1, main_chain, do_send_blocks=True)
            wait_for_tip(conn1, main_chain[-1].hash)
            # no forks yes, not in the safe mode
            self.wait_for_safe_mode_data(conn1.rpc, [])  # not in safe mode

            send_by_headers(conn1, distant_fork, do_send_blocks=False)
            wait_for_tip_status(conn1, distant_fork[-1].hash, "headers-only")
            # distant fork triggers the safe mode
            self.wait_for_safe_mode_data(conn1.rpc,
                                         [expected_distant_fork_data])

            send_by_headers(conn1, short_fork, do_send_blocks=False)
            wait_for_tip_status(conn1, short_fork[-1].hash, "headers-only")
            # two forks triggering the safe mode: distant fork and short fork
            self.wait_for_safe_mode_data(conn1.rpc, [
                expected_distant_fork_data,
                expected_short_fork_data,
            ])

            send_by_headers(conn1,
                            low_height_difference_fork,
                            do_send_blocks=False)
            wait_for_tip_status(conn1, low_height_difference_fork[-1].hash,
                                "headers-only")
            # all three forks triggering the safe mode
            self.wait_for_safe_mode_data(conn1.rpc, [
                expected_distant_fork_data,
                expected_low_height_difference_fork_data,
                expected_short_fork_data,
            ])

        # stopping the node
        self.webhook_messages = []
        args_off_by_one = [
            f"-safemodemaxforkdistance={max_fork_distance-1}",
            f"-safemodeminforklength={min_fork_len+1}",
            f"-safemodeminblockdifference={max_height_difference+1}",
            f"-safemodewebhookurl=http://127.0.0.1:{self.PORT}/safemode",
        ]

        # Restaring the node with limits off by 1 so no fork satisfies safe mode activation criteria
        with self.run_node_with_connections("Preparation", 0, args_off_by_one,
                                            2) as (conn1, conn2):

            # The node is not in the safe mode, no forks
            self.wait_for_safe_mode_data(conn1.rpc, [],
                                         check_webhook_messages=False)
            assert len(
                self.webhook_messages
            ) == 0  # we are starting without safe mode, the message is not sent

        # Restaring the node with original params, the node should be in the safe mode again
        with self.run_node_with_connections("Preparation", 0, args,
                                            2) as (conn1, conn2):

            # the safe mode is at the same state as before first restart
            self.wait_for_safe_mode_data(conn1.rpc, [
                expected_distant_fork_data,
                expected_low_height_difference_fork_data,
                expected_short_fork_data,
            ])

            # We will add three more extensions to the chain
            #=====================================================
            #  ... - mc[N-1] -  mc[N] - mc_extension            sf_extension_2
            #                 \                               /
            #                   sf[0] - sf[1] - ... - sf[N-1] - sf[N]
            #                                                 \
            #                                                   sf_extension

            short_fork_extension = self.make_chain(conn1, short_fork[-2], 1)
            send_by_headers(conn1, short_fork_extension, do_send_blocks=False)

            # when adding a new tip to the short branch we will just add a new tip to an existing fork
            expected_short_fork_data["tips"].add(short_fork_extension[-1].hash)
            self.wait_for_safe_mode_data(conn1.rpc, [
                expected_distant_fork_data,
                expected_low_height_difference_fork_data,
                expected_short_fork_data,
            ])

            # ignore tips of the short short branch making it not triggering safe mode any more
            conn1.rpc.ignoresafemodeforblock(short_fork_extension[-1].hash)
            conn1.rpc.ignoresafemodeforblock(short_fork[-1].hash)
            self.wait_for_safe_mode_data(conn1.rpc, [
                expected_distant_fork_data,
                expected_low_height_difference_fork_data,
            ])

            # reconsidering previously ignored blocks
            conn1.rpc.reconsidersafemodeforblock(short_fork_extension[-1].hash)
            conn1.rpc.reconsidersafemodeforblock(short_fork[-1].hash)
            self.wait_for_safe_mode_data(conn1.rpc, [
                expected_distant_fork_data,
                expected_low_height_difference_fork_data,
                expected_short_fork_data,
            ])

            # ignoring root of the short fork, short fork will not trigger the safe mode.
            conn1.rpc.ignoresafemodeforblock(short_fork[0].hash)
            self.wait_for_safe_mode_data(conn1.rpc, [
                expected_distant_fork_data,
                expected_low_height_difference_fork_data,
            ])

            # extend ignored short fork with one more tip, we should ignore this block also because its ancestor is ignored
            short_fork_extension_2 = self.make_chain(conn1, short_fork[-2], 1)
            send_by_headers(conn1, short_fork_extension_2, do_send_blocks=True)
            wait_for_tip_status(conn1, short_fork_extension_2[-1].hash,
                                "headers-only")
            self.wait_for_safe_mode_data(conn1.rpc, [
                expected_distant_fork_data,
                expected_low_height_difference_fork_data,
            ])

            # but when it will be reconsidered the new tip should be visible
            expected_short_fork_data["tips"].add(
                short_fork_extension_2[-1].hash)

            # reconsidering one of the tips of the short fork will revert ignoring of the root block
            conn1.rpc.reconsidersafemodeforblock(short_fork[-1].hash)
            self.wait_for_safe_mode_data(conn1.rpc, [
                expected_distant_fork_data,
                expected_low_height_difference_fork_data,
                expected_short_fork_data,
            ])

            main_chain_extension = self.make_chain(conn1, main_chain[-1], 1)
            send_by_headers(conn1, main_chain_extension, do_send_blocks=True)
            # we have extended the main chain so distant fork became too distant and low height for became to low
            # not in the safe mode anymore
            self.wait_for_safe_mode_data(conn1.rpc, [
                expected_short_fork_data,
            ])

            # we are now invalidating main chain extension so distant and low fork are triggering the safe mode again
            conn1.rpc.invalidateblock(main_chain_extension[0].hash)
            self.wait_for_safe_mode_data(conn1.rpc, [
                expected_distant_fork_data,
                expected_low_height_difference_fork_data,
                expected_short_fork_data,
            ])
            pass
    def run_test(self):
        with self.run_node_with_connections("Preparation", 0, None,
                                            2) as (conn1, conn2):
            last_block_time = 0
            conn1.rpc.generate(1)

            branch_1_root, last_block_time = make_block(
                conn1, last_block_time=last_block_time)
            branch_1_blocks = [branch_1_root]
            for _ in range(SAFE_MODE_DEFAULT_MAX_FORK_DISTANCE):
                new_block, last_block_time = make_block(
                    conn1,
                    branch_1_blocks[-1],
                    last_block_time=last_block_time)
                branch_1_blocks.append(new_block)

            branch_2_root, last_block_time = make_block(
                conn2, last_block_time=last_block_time)
            branch_2_blocks = [branch_2_root]
            for _ in range(SAFE_MODE_DEFAULT_MIN_FORK_LENGTH + 1):
                new_block, last_block_time = make_block(
                    conn2,
                    branch_2_blocks[-1],
                    last_block_time=last_block_time)
                branch_2_blocks.append(new_block)

            # send main branch that should be active tip
            send_by_headers(
                conn1,
                branch_1_blocks[:SAFE_MODE_DEFAULT_MIN_FORK_LENGTH + 2],
                do_send_blocks=True)

            # send alternative branch
            send_by_headers(conn2, branch_2_blocks, do_send_blocks=True)

            # active tip is from branch 1 and brach 2 has status valid-headers
            wait_for_tip(
                conn1,
                branch_1_blocks[SAFE_MODE_DEFAULT_MIN_FORK_LENGTH + 1].hash)
            wait_for_tip_status(conn1, branch_2_blocks[-1].hash,
                                "valid-headers")

            # we should entered the safe mode with VALID because there is a valid fork with SAFE_MODE_DEFAULT_MIN_VALID_FORK_POW pow
            # and last common block is less than SAFE_MODE_DEFAULT_MAX_VALID_FORK_DISTANCE from active tip
            assert conn1.rpc.getsafemodeinfo()["safemodeenabled"]

        with self.run_node_with_connections("Restart node in safe mode", 0,
                                            None, 1) as conn:
            conn1 = conn[0]

            # check that we are in safe mode after restart
            assert conn1.rpc.getsafemodeinfo()["safemodeenabled"]

            # send main branch that should be active tip
            send_by_headers(conn1,
                            branch_1_blocks[SAFE_MODE_DEFAULT_MIN_FORK_LENGTH +
                                            2:],
                            do_send_blocks=True)

            # active tip is last block from branch 1
            wait_for_tip(conn1, branch_1_blocks[-1].hash)

            # we should exit safe mode because fork base is too far from active tip
            assert not conn1.rpc.getsafemodeinfo()["safemodeenabled"]
Пример #14
0
    def run_test_case(self,
                      description,
                      order=1,
                      wait=False,
                      numberOfSafeModeLevelChanges=1):

        self.log.info("Running test case: %s", description)

        # Remove test folder to start building chain from the beginning for each case
        if os.path.exists(os.path.join(self.nodes[0].datadir, "regtest")):
            shutil.rmtree(os.path.join(self.nodes[0].datadir, "regtest"))

        with self.run_node_with_connections(description, 0, None,
                                            3) as (conn1, conn2, conn3):
            last_block_time = 0
            conn1.rpc.generate(1)

            branch_1_root, last_block_time = make_block(
                conn1, last_block_time=last_block_time)
            branch_1_blocks = [branch_1_root]
            for _ in range(10):
                new_block, last_block_time = make_block(
                    conn1,
                    branch_1_blocks[-1],
                    last_block_time=last_block_time)
                branch_1_blocks.append(new_block)

            branch_2_root, last_block_time = make_block(
                conn2, last_block_time=last_block_time)
            branch_2_blocks = [branch_2_root]
            for _ in range(20):
                new_block, last_block_time = make_block(
                    conn2,
                    branch_2_blocks[-1],
                    last_block_time=last_block_time)
                branch_2_blocks.append(new_block)

            branch_3_root, last_block_time = make_block(
                conn3, last_block_time=last_block_time)

            if order == 1:
                self.send_branches(
                    {
                        'conn': conn1,
                        'blocks': branch_1_blocks,
                        'do_send_blocks': True
                    }, {
                        'conn': conn2,
                        'blocks': branch_2_blocks,
                        'do_send_blocks': False
                    }, wait)
            else:
                self.send_branches(
                    {
                        'conn': conn2,
                        'blocks': branch_2_blocks,
                        'do_send_blocks': False
                    }, {
                        'conn': conn1,
                        'blocks': branch_1_blocks,
                        'do_send_blocks': True
                    }, wait)

            # active tip is last block from branch 1 and branch 2 has status headers-only
            wait_for_tip(conn1, branch_1_blocks[-1].hash)
            wait_for_tip_status(conn2, branch_2_blocks[-1].hash,
                                "headers-only")

            # we should have entered the safe mode
            try:
                conn1.rpc.getbalance()
                assert False, "Should not come to here, should raise exception in line above."
            except JSONRPCException as e:
                assert e.error[
                    "message"] == "Safe mode: Warning: The network does not appear to fully agree! We received headers of a large fork. Still waiting for block data for more details."

            def wait_for_log():
                safeModeChanges = 0
                line_text = "NotifySafeModeLevelChange: Warning: Found chain at least ~6 blocks longer than our best chain."
                for line in open(
                        glob.glob(self.options.tmpdir + "/node0" +
                                  "/regtest/bitcoind.log")[0]):
                    if line_text in line:
                        self.log.info("Found line: %s", line)
                        safeModeChanges += 1
                        if safeModeChanges == numberOfSafeModeLevelChanges:
                            return True
                return False

            wait_until(wait_for_log)

            conn2.send_message(msg_block(branch_2_blocks[0]))
            conn2.cb.sync_with_ping()

            # send block from the third branch
            conn3.send_message(msg_block(branch_3_root))
            conn3.cb.sync_with_ping()

            # we should still be in safe mode
            try:
                conn1.rpc.getbalance()
                assert False, "Should not come to here, should raise exception in line above."
            except JSONRPCException as e:
                assert e.error[
                    "message"] == "Safe mode: Warning: The network does not appear to fully agree! We received headers of a large fork. Still waiting for block data for more details."
    def run_test(self):

        MAX_FORK_DISTANCE = 10
        MIN_FORK_LENGTH = 3
        MIN_FORK_DIFFERENCE = 1

        args= [f"-safemodemaxforkdistance={MAX_FORK_DISTANCE}",
               f"-safemodeminforklength={MIN_FORK_LENGTH}",
               f"-safemodeminblockdifference={MIN_FORK_DIFFERENCE}",]

        with self.run_node_with_connections("Preparation", 0, args, 2) as (conn1, conn2):
            last_block_time = 0
            conn1.rpc.generate(1)

            branch_1_root, last_block_time = make_block(conn1, last_block_time = last_block_time)
            branch_1_blocks = [branch_1_root]
            for _ in range(MAX_FORK_DISTANCE):
                new_block, last_block_time = make_block(conn1, branch_1_blocks[-1], last_block_time = last_block_time)
                branch_1_blocks.append(new_block)

            branch_2_root, last_block_time = make_block(conn2, makeValid=False, last_block_time = last_block_time)
            branch_2_blocks = [branch_2_root]
            for _ in range(MAX_FORK_DISTANCE + MIN_FORK_DIFFERENCE + 1):
                new_block, last_block_time = make_block(conn2, branch_2_blocks[-1], last_block_time = last_block_time)
                branch_2_blocks.append(new_block)

            # send first branch that should be active tip
            send_by_headers(conn1, branch_1_blocks, do_send_blocks=True)
            wait_for_tip(conn1, branch_1_blocks[-1].hash)

            # send second branch with more POW
            send_by_headers(conn2, branch_2_blocks, do_send_blocks=False)
            wait_for_tip(conn1, branch_1_blocks[-1].hash)
            wait_for_tip_status(conn1, branch_2_blocks[-1].hash, "headers-only")
    
            # we should not be in safe mode (distance to the fork is too large)
            assert not conn1.rpc.getsafemodeinfo()["safemodeenabled"]

            conn1.rpc.invalidateblock(branch_1_blocks[-1].hash)
            wait_for_tip(conn1, branch_1_blocks[-2].hash)
            # here we have shortened distance from the active tip to the fork root so the safe mode should be activated
            assert conn1.rpc.getsafemodeinfo()["safemodeenabled"]

            conn1.rpc.reconsiderblock(branch_1_blocks[-1].hash)
            wait_for_tip(conn1, branch_1_blocks[-1].hash)
            # returning to the old state (distance to the fork is too large)
            assert not conn1.rpc.getsafemodeinfo()["safemodeenabled"]

            # From time to time this test can run faster than expected and
            # the older blocks for batch 2 headers are not yet requested.
            # In that case they will be rejected due to being too far away
            # form the tip. In that case we need to send them again once they
            # are requested.
            def on_getdata(conn, msg):
                for i in msg.inv:
                    if i.type != 2: # MSG_BLOCK
                        error_msg = f"Unexpected data requested {i}"
                        self.log.error(error_msg)
                        raise NotImplementedError(error_msg)
                    for block in branch_2_blocks:
                        if int(block.hash, 16) == i.hash:
                            conn.send_message(msg_block(block))
                            break

            conn2.cb.on_getdata = on_getdata

            # send sencond branch full blocks
            for block in branch_2_blocks:
                conn2.send_message(msg_block(block))

            tips = conn2.rpc.getchaintips()

            # second branch should now be invalid
            wait_for_tip_status(conn1, branch_2_blocks[-1].hash, "invalid")
            wait_for_tip(conn1, branch_1_blocks[-1].hash)
            
            # we should not be in safe mode
            assert not conn1.rpc.getsafemodeinfo()["safemodeenabled"]