コード例 #1
0
ファイル: nodehandling.py プロジェクト: 8bitcoder/myriadcoin
    def run_test(self):
        ###########################
        # setban/listbanned tests #
        ###########################
        assert_equal(len(self.nodes[1].getpeerinfo()), 2)  # node1 should have 2 connections to node0 at this point
        self.nodes[1].setban("127.0.0.1", "add")
        assert wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10)
        assert_equal(len(self.nodes[1].getpeerinfo()), 0)  # all nodes must be disconnected at this point
        assert_equal(len(self.nodes[1].listbanned()), 1)
        self.nodes[1].clearbanned()
        assert_equal(len(self.nodes[1].listbanned()), 0)
        self.nodes[1].setban("127.0.0.0/24", "add")
        assert_equal(len(self.nodes[1].listbanned()), 1)
        # This will throw an exception because 127.0.0.1 is within range 127.0.0.0/24
        assert_raises_jsonrpc(-23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add")
        # This will throw an exception because 127.0.0.1/42 is not a real subnet
        assert_raises_jsonrpc(-30, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add")
        assert_equal(len(self.nodes[1].listbanned()), 1)  # still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
        # This will throw an exception because 127.0.0.1 was not added above
        assert_raises_jsonrpc(-30, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove")
        assert_equal(len(self.nodes[1].listbanned()), 1)
        self.nodes[1].setban("127.0.0.0/24", "remove")
        assert_equal(len(self.nodes[1].listbanned()), 0)
        self.nodes[1].clearbanned()
        assert_equal(len(self.nodes[1].listbanned()), 0)

        # test persisted banlist
        self.nodes[1].setban("127.0.0.0/32", "add")
        self.nodes[1].setban("127.0.0.0/24", "add")
        self.nodes[1].setban("192.168.0.1", "add", 1)  # ban for 1 seconds
        self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000)  # ban for 1000 seconds
        listBeforeShutdown = self.nodes[1].listbanned()
        assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address'])
        assert wait_until(lambda: len(self.nodes[1].listbanned()) == 3, timeout=10)

        stop_node(self.nodes[1], 1)

        self.nodes[1] = start_node(1, self.options.tmpdir)
        listAfterShutdown = self.nodes[1].listbanned()
        assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
        assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
        assert_equal("/19" in listAfterShutdown[2]['address'], True)

        # Clear ban lists
        self.nodes[1].clearbanned()
        connect_nodes_bi(self.nodes, 0, 1)

        ###########################
        # RPC disconnectnode test #
        ###########################
        address1 = self.nodes[0].getpeerinfo()[0]['addr']
        self.nodes[0].disconnectnode(address=address1)
        assert wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
        assert not [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]

        connect_nodes_bi(self.nodes, 0, 1)  # reconnect the node
        assert [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
コード例 #2
0
    def run_test(self):
        # Create all the connections we will need to node0 at the start because they all need to be
        # setup before we call NetworkThread().start()

        # Create a P2P connection just so that the test framework is happy we're connected
        dummyCB = NodeConnCB()
        dummyConn = NodeConn('127.0.0.1',
                             p2p_port(0),
                             self.nodes[0],
                             dummyCB,
                             nullAssocID=True)
        dummyCB.add_connection(dummyConn)

        # By setting the assocID on this second NodeConn we prevent it sending a version message
        badConnCB = TestNode()
        badConn = NodeConn('127.0.0.1',
                           p2p_port(0),
                           self.nodes[0],
                           badConnCB,
                           assocID=0x01)
        badConnCB.add_connection(badConn)

        # Start up network handling in another thread. This needs to be called
        # after the P2P connections have been created.
        NetworkThread().start()

        # Check initial state
        dummyCB.wait_for_protoconf()
        with mininode_lock:
            assert_equal(len(badConnCB.message_count), 0)

        # Send a badly formatted version message
        badConn.send_message(msg_version_bad())
        # Connection will be closed with a reject
        wait_until(lambda: badConnCB.last_reject is not None,
                   lock=mininode_lock,
                   timeout=5)
        wait_until(lambda: badConn.state == "closed",
                   lock=mininode_lock,
                   timeout=5)

        # Check clear log message was generated
        assert check_for_log_msg(
            self, "Failed to process version: (Badly formatted association ID",
            "/node0")
コード例 #3
0
 def wait_for_mnauth(self, node, count, timeout=10):
     def test():
         pi = node.getpeerinfo()
         c = 0
         for p in pi:
             if "verified_proregtx_hash" in p and p["verified_proregtx_hash"] != "":
                 c += 1
         return c >= count
     assert wait_until(test, timeout=timeout)
コード例 #4
0
 def wait_for_tx(self, txid, node, expected=True, timeout=15):
     def check_tx():
         try:
             return node.getrawtransaction(txid)
         except:
             return False
     w = wait_until(check_tx, timeout=timeout, sleep=0.5)
     if not w and expected:
         raise AssertionError("wait_for_instantlock failed")
     elif w and not expected:
         raise AssertionError("waiting unexpectedly succeeded")
コード例 #5
0
 def wait_for_chainlocked_block(self, node, block_hash, expected=True, timeout=15):
     def check_chainlocked_block():
         try:
             block = node.getblock(block_hash)
             return block["confirmations"] > 0 and block["chainlock"]
         except:
             return False
     w = wait_until(check_chainlocked_block, timeout=timeout, sleep=0.1)
     if not w and expected:
         raise AssertionError("wait_for_chainlocked_block failed")
     elif w and not expected:
         raise AssertionError("waiting unexpectedly succeeded")
コード例 #6
0
    def run_test(self):
        chain_height = self.nodes[0].getblockcount()
        assert_equal(chain_height, 200)

        self.log.debug("Mine a single block to get out of IBD")
        self.nodes[0].generate(1)
        self.sync_all()

        self.log.debug("Send 5 transactions from node2 (to its own address)")
        for i in range(5):
            self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("10"))
        self.sync_all()

        self.log.debug("Verify that node0 and node1 have 5 transactions in their mempools")
        assert_equal(len(self.nodes[0].getrawmempool()), 5)
        assert_equal(len(self.nodes[1].getrawmempool()), 5)

        self.log.debug("Stop-start node0 and node1. Verify that node0 has the transactions in its mempool and node1 does not.")
        stop_nodes(self.nodes)
        self.nodes = []
        self.nodes.append(start_node(0, self.options.tmpdir))
        self.nodes.append(start_node(1, self.options.tmpdir))
        # Give bitcoind a second to reload the mempool
        time.sleep(1)
        assert wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5)
        assert_equal(len(self.nodes[1].getrawmempool()), 0)

        self.log.debug("Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file.")
        stop_nodes(self.nodes)
        self.nodes = []
        self.nodes.append(start_node(0, self.options.tmpdir, ["-persistmempool=0"]))
        # Give bitcoind a second to reload the mempool
        time.sleep(1)
        assert_equal(len(self.nodes[0].getrawmempool()), 0)

        self.log.debug("Stop-start node0. Verify that it has the transactions in its mempool.")
        stop_nodes(self.nodes)
        self.nodes = []
        self.nodes.append(start_node(0, self.options.tmpdir))
        assert wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5)
コード例 #7
0
ファイル: mempool_persist.py プロジェクト: boxhock/ion
    def run_test(self):
        chain_height = self.nodes[0].getblockcount()
        assert_equal(chain_height, 200)

        self.log.debug("Mine a single block to get out of IBD")
        self.nodes[0].generate(1)
        self.sync_all()

        self.log.debug("Send 5 transactions from node2 (to its own address)")
        for i in range(5):
            self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("10"))
        self.sync_all()

        self.log.debug("Verify that node0 and node1 have 5 transactions in their mempools")
        assert_equal(len(self.nodes[0].getrawmempool()), 5)
        assert_equal(len(self.nodes[1].getrawmempool()), 5)

        self.log.debug("Stop-start node0 and node1. Verify that node0 has the transactions in its mempool and node1 does not.")
        self.stop_nodes()
        self.nodes = []
        self.nodes.append(self.start_node(0, self.options.tmpdir))
        self.nodes.append(self.start_node(1, self.options.tmpdir))
        # Give iond a second to reload the mempool
        time.sleep(1)
        assert wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5)
        assert_equal(len(self.nodes[1].getrawmempool()), 0)

        self.log.debug("Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file.")
        self.stop_nodes()
        self.nodes = []
        self.nodes.append(self.start_node(0, self.options.tmpdir, ["-persistmempool=0"]))
        # Give iond a second to reload the mempool
        time.sleep(1)
        assert_equal(len(self.nodes[0].getrawmempool()), 0)

        self.log.debug("Stop-start node0. Verify that it has the transactions in its mempool.")
        self.stop_nodes()
        self.nodes = []
        self.nodes.append(self.start_node(0, self.options.tmpdir))
        assert wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5)
    def get_tests(self):

        # shorthand for functions
        block = self.chain.next_block
        node = self.nodes[0]
        self.chain.set_genesis_hash(int(node.getbestblockhash(), 16))

        block(0)
        yield self.accepted()

        test, out, _ = prepare_init_chain(self.chain, 100, 100)

        yield test

        # Create transaction with OP_RETURN in the locking script.
        tx1 = create_transaction(out[0].tx, out[0].n, b'', 100000,
                                 CScript([OP_RETURN]))
        self.test.connections[0].send_message(msg_tx(tx1))
        # wait for transaction processing
        wait_until(lambda: tx1.hash in node.getrawmempool(), timeout=5)

        # generate an empty block, height is 102
        block(1, spend=out[1])
        yield self.accepted()

        tx2 = create_transaction(tx1, 0, b'\x51', 1, CScript([OP_TRUE]))
        self.test.connections[0].send_message(msg_tx(tx2))
        # wait for transaction processing
        wait_until(lambda: tx2.hash in node.getrawmempool(), timeout=5)

        # Mine block (height 103) with new transactions.
        self.nodes[0].generate(1)
        tx = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['tx']
        assert_equal(len(tx), 3)
        assert_equal(tx1.hash, tx[1])
        assert_equal(tx2.hash, tx[2])
コード例 #9
0
 def wait_for_instantlock(self, txid, node, expected=True, timeout=15, do_assert=False):
     def check_instantlock():
         try:
             return node.getrawtransaction(txid, True)["instantlock"]
         except:
             return False
     w = wait_until(check_instantlock, timeout=timeout, sleep=0.1)
     if not w and expected:
         if do_assert:
             raise AssertionError("wait_for_instantlock failed")
         else:
             return False
     elif w and not expected:
         if do_assert:
             raise AssertionError("waiting unexpectedly succeeded")
         else:
             return False
     return True
コード例 #10
0
    def run_test(self):

        self.stop_node(0)

        with self.run_node_with_connections("send GETDATA messages and check responses", 0, [], 1) as p2p_connections:

            receivedBlocks = set()
            def on_block(conn, message):
                nonlocal receivedBlocks
                receivedBlocks.add(message.block.hash)

            receivedTxs = set()
            def on_tx(conn, message):
                nonlocal receivedTxs
                receivedTxs.add(message.tx.hash)

            receivedTxsNotFound = set()
            def on_notfound(conn, message):
                nonlocal receivedTxsNotFound
                for inv in message.inv:
                    receivedTxsNotFound.add(inv.hash)

            self.nodes[0].generate(5)

            connection = p2p_connections[0]
            connection.cb.on_block = on_block
            connection.cb.on_tx = on_tx
            connection.cb.on_notfound = on_notfound

            # 1. Check that sending GETDATA of unknown block does no action.
            unknown_hash = 0xdecaf
            connection.cb.send_message(msg_getdata([CInv(CInv.BLOCK, unknown_hash)]))

            # 2. Check that sending GETDATA of known block returns BLOCK message.
            known_hash = self.nodes[0].getbestblockhash()
            connection.cb.send_message(msg_getdata([CInv(CInv.BLOCK, int(known_hash, 16))]))
            wait_until(lambda: known_hash in receivedBlocks)
            # previously requested unknown block is not in the received list
            assert_equal(unknown_hash not in receivedBlocks, True)
            assert_equal(len(receivedBlocks), 1)

            # 3. Check that sending GETDATA of unknown transaction returns NOTFOUND message.
            connection.cb.send_message(msg_getdata([CInv(CInv.TX, unknown_hash)]))
            wait_until(lambda: unknown_hash in receivedTxsNotFound)

            # 4. Check that sending GETDATA of known transaction returns TX message.
            known_hash = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
            connection.cb.send_message(msg_getdata([CInv(CInv.TX, int(known_hash, 16))]))
            wait_until(lambda: known_hash in receivedTxs)
            assert_equal(len(receivedTxs), 1)
コード例 #11
0
    def reorg_test(self):
        height = int(self.options.height)
        peers = self.num_nodes
        tip_age = int(self.options.tip_age)
        should_reorg = int(self.options.should_reorg)

        self.log.info(f"Doing a reorg test with height: {height}, peers: {peers}, tip_age: {tip_age}.  " + \
                      f"Should reorg? *{should_reorg}*")

        asset_name = "MOON_STONES"
        adversary = self.nodes[0]
        subject = self.nodes[-1]

        # enough to activate assets
        start = 432

        self.log.info(f"Setting all node times to {tip_age} seconds ago...")
        now = int(round(time.time()))
        set_node_times(self.nodes, now - tip_age)

        self.log.info(
            f"Mining {start} starter blocks on all nodes and syncing...")
        subject.generate(round(start / 2))
        self.sync_all()
        adversary.generate(round(start / 2))
        self.sync_all()

        self.log.info("Stopping adversary node...")
        self.stop_node(0)

        self.log.info(f"Subject is issuing asset: {asset_name}...")
        subject.issue(asset_name)

        self.log.info(f"Miners are mining {height} blocks...")
        subject.generate(height)
        wait_until(lambda: [n.getblockcount()
                            for n in self.nodes[1:]] == [height + start] *
                   (peers - 1),
                   err_msg="Wait for BlockCount")
        self.log.info("BlockCount: " +
                      str([start] +
                          [n.getblockcount() for n in self.nodes[1:]]))

        self.log.info("Restarting adversary node...")
        self.start_node(0)

        self.log.info(f"Adversary is issuing asset: {asset_name}...")
        adversary.issue(asset_name)

        self.log.info(
            f"Adversary is mining {height*2} (2 x {height}) blocks over the next ~{tip_age} seconds..."
        )
        interval = round(tip_age / (height * 2)) + 1
        for i in range(0, height * 2):
            set_node_times(self.nodes, (now - tip_age) + ((i + 1) * interval))
            adversary.generate(1)
        assert (adversary.getblockcount() -
                start == (subject.getblockcount() - start) * 2)
        besttimes = [
            n.getblock(n.getbestblockhash())['time'] for n in self.nodes
        ]
        self.log.info("BestTimes: " + str(besttimes))
        self.log.info(
            f"Adversary: {besttimes[0]}; subject: {besttimes[-1]}; difference: {besttimes[0] - besttimes[-1]}; expected gte: {tip_age}"
        )
        assert (besttimes[0] - besttimes[-1] >= tip_age)

        self.log.info("BlockCount: " +
                      str([n.getblockcount() for n in self.nodes]))

        self.log.info("Reconnecting the network and syncing the chain...")
        for i in range(1, peers):
            connect_nodes_bi(self.nodes, 0, i, should_reorg)

        expected_height = start + height
        subject_owns_asset = True
        if should_reorg > 0:
            self.log.info(
                f"Expected a reorg -- blockcount should be {expected_height} and subject should own {asset_name} (waiting 5 seconds)..."
            )
            expected_height += height
            subject_owns_asset = False
        else:
            self.log.info(
                f"Didn't expect a reorg -- blockcount should remain {expected_height} and both subject and adversary should own {asset_name} (waiting 5 seconds)..."
            )

        # noinspection PyBroadException
        try:
            wait_until(
                lambda: [n.getblockcount()
                         for n in self.nodes] == [expected_height] * peers,
                timeout=5,
                err_msg="getblockcount")
        except:
            pass
        self.log.info("BlockCount: " +
                      str([n.getblockcount() for n in self.nodes]))
        assert_equal(subject.getblockcount(), expected_height)
        assert_contains_pair(asset_name + '!', 1, adversary.listmyassets())
        if subject_owns_asset:
            assert_contains_pair(asset_name + '!', 1, subject.listmyassets())
        else:
            assert_does_not_contain_key(asset_name + '!',
                                        subject.listmyassets())
コード例 #12
0
ファイル: disconnect_ban.py プロジェクト: boxhock/ion
    def run_test(self):
        self.log.info("Test setban and listbanned RPCs")

        self.log.info("setban: successfully ban single IP address")
        assert_equal(len(self.nodes[1].getpeerinfo()), 2)  # node1 should have 2 connections to node0 at this point
        self.nodes[1].setban("127.0.0.1", "add")
        assert wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10)
        assert_equal(len(self.nodes[1].getpeerinfo()), 0)  # all nodes must be disconnected at this point
        assert_equal(len(self.nodes[1].listbanned()), 1)

        self.log.info("clearbanned: successfully clear ban list")
        self.nodes[1].clearbanned()
        assert_equal(len(self.nodes[1].listbanned()), 0)
        self.nodes[1].setban("127.0.0.0/24", "add")

        self.log.info("setban: fail to ban an already banned subnet")
        assert_equal(len(self.nodes[1].listbanned()), 1)
        assert_raises_jsonrpc(-23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add")

        self.log.info("setban: fail to ban an invalid subnet")
        assert_raises_jsonrpc(-30, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add")
        assert_equal(len(self.nodes[1].listbanned()), 1)  # still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24

        self.log.info("setban remove: fail to unban a non-banned subnet")
        assert_raises_jsonrpc(-30, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove")
        assert_equal(len(self.nodes[1].listbanned()), 1)

        self.log.info("setban remove: successfully unban subnet")
        self.nodes[1].setban("127.0.0.0/24", "remove")
        assert_equal(len(self.nodes[1].listbanned()), 0)
        self.nodes[1].clearbanned()
        assert_equal(len(self.nodes[1].listbanned()), 0)

        self.log.info("setban: test persistence across node restart")
        self.nodes[1].setban("127.0.0.0/32", "add")
        self.nodes[1].setban("127.0.0.0/24", "add")
        # Set the mocktime so we can control when bans expire
        old_time = int(time.time())
        self.nodes[1].setmocktime(old_time)
        self.nodes[1].setban("192.168.0.1", "add", 1)  # ban for 1 seconds
        self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000)  # ban for 1000 seconds
        listBeforeShutdown = self.nodes[1].listbanned()
        assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address'])
        # Move time forward by 3 seconds so the third ban has expired
        self.nodes[1].setmocktime(old_time + 3)
        assert_equal(len(self.nodes[1].listbanned()), 3)

        self.stop_node(1)

        self.nodes[1] = self.start_node(1, self.options.tmpdir)
        listAfterShutdown = self.nodes[1].listbanned()
        assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
        assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
        assert_equal("/19" in listAfterShutdown[2]['address'], True)

        # Clear ban lists
        self.nodes[1].clearbanned()
        connect_nodes_bi(self.nodes, 0, 1)

        self.log.info("Test disconnectnode RPCs")

        self.log.info("disconnectnode: fail to disconnect when calling with address and nodeid")
        address1 = self.nodes[0].getpeerinfo()[0]['addr']
        node1 = self.nodes[0].getpeerinfo()[0]['addr']
        assert_raises_jsonrpc(-32602, "Only one of address and nodeid should be provided.", self.nodes[0].disconnectnode, address=address1, nodeid=node1)

        self.log.info("disconnectnode: fail to disconnect when calling with junk address")
        assert_raises_jsonrpc(-29, "Node not found in connected nodes", self.nodes[0].disconnectnode, address="221B Baker Street")

        self.log.info("disconnectnode: successfully disconnect node by address")
        address1 = self.nodes[0].getpeerinfo()[0]['addr']
        self.nodes[0].disconnectnode(address=address1)
        assert wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
        assert not [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]

        self.log.info("disconnectnode: successfully reconnect node")
        connect_nodes_bi(self.nodes, 0, 1)  # reconnect the node
        assert_equal(len(self.nodes[0].getpeerinfo()), 2)
        assert [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]

        self.log.info("disconnectnode: successfully disconnect node by node id")
        id1 = self.nodes[0].getpeerinfo()[0]['id']
        self.nodes[0].disconnectnode(nodeid=id1)
        assert wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
        assert not [node for node in self.nodes[0].getpeerinfo() if node['id'] == id1]
コード例 #13
0
    def run_test(self):
        ###########################
        # setban/listbanned tests #
        ###########################
        assert_equal(
            len(self.nodes[1].getpeerinfo()),
            2)  # node1 should have 2 connections to node0 at this point
        self.nodes[1].setban("127.0.0.1", "add")
        assert wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0,
                          timeout=10)
        assert_equal(len(self.nodes[1].getpeerinfo()),
                     0)  # all nodes must be disconnected at this point
        assert_equal(len(self.nodes[1].listbanned()), 1)
        self.nodes[1].clearbanned()
        assert_equal(len(self.nodes[1].listbanned()), 0)
        self.nodes[1].setban("127.0.0.0/24", "add")
        assert_equal(len(self.nodes[1].listbanned()), 1)
        # This will throw an exception because 127.0.0.1 is within range 127.0.0.0/24
        assert_raises_jsonrpc(-23, "IP/Subnet already banned",
                              self.nodes[1].setban, "127.0.0.1", "add")
        # This will throw an exception because 127.0.0.1/42 is not a real subnet
        assert_raises_jsonrpc(-30, "Error: Invalid IP/Subnet",
                              self.nodes[1].setban, "127.0.0.1/42", "add")
        assert_equal(
            len(self.nodes[1].listbanned()), 1
        )  # still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
        # This will throw an exception because 127.0.0.1 was not added above
        assert_raises_jsonrpc(-30, "Error: Unban failed", self.nodes[1].setban,
                              "127.0.0.1", "remove")
        assert_equal(len(self.nodes[1].listbanned()), 1)
        self.nodes[1].setban("127.0.0.0/24", "remove")
        assert_equal(len(self.nodes[1].listbanned()), 0)
        self.nodes[1].clearbanned()
        assert_equal(len(self.nodes[1].listbanned()), 0)

        # test persisted banlist
        self.nodes[1].setban("127.0.0.0/32", "add")
        self.nodes[1].setban("127.0.0.0/24", "add")
        self.nodes[1].setban("192.168.0.1", "add", 1)  # ban for 1 seconds
        self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19",
                             "add", 1000)  # ban for 1000 seconds
        listBeforeShutdown = self.nodes[1].listbanned()
        assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address'])
        assert wait_until(lambda: len(self.nodes[1].listbanned()) == 3,
                          timeout=10)

        stop_node(self.nodes[1], 1)

        self.nodes[1] = start_node(1, self.options.tmpdir)
        listAfterShutdown = self.nodes[1].listbanned()
        assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
        assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
        assert_equal("/19" in listAfterShutdown[2]['address'], True)

        # Clear ban lists
        self.nodes[1].clearbanned()
        connect_nodes_bi(self.nodes, 0, 1)

        ###########################
        # RPC disconnectnode test #
        ###########################
        address1 = self.nodes[0].getpeerinfo()[0]['addr']
        self.nodes[0].disconnectnode(address=address1)
        assert wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1,
                          timeout=10)
        assert not [
            node
            for node in self.nodes[0].getpeerinfo() if node['addr'] == address1
        ]

        connect_nodes_bi(self.nodes, 0, 1)  # reconnect the node
        assert [
            node for node in self.nodes[0].getpeerinfo()
            if node['addr'] == address1
        ]
コード例 #14
0
ファイル: bip148-segwit-uasf.py プロジェクト: stockay/bitcoin
    def sync_chaintips(self, nodes=None, timeout=60, extra_check=None):
        if nodes is None:
            nodes = list(range(len(self.nodes)))
        statuses_where_node_has_the_block = ('valid-fork', 'active')

        def chaintip_check():
            if extra_check is not None:
                if not extra_check():
                    return False
            all_known_tips = {}
            chaintips_replies = [(r, self.nodes[r].getchaintips())
                                 for r in nodes]
            for (r, tips) in chaintips_replies:
                for tip in tips:
                    if tip['hash'] in all_known_tips and tip[
                            'status'] not in statuses_where_node_has_the_block:
                        continue
                    all_known_tips[tip['hash']] = (r, tip)

            # Make sure we know a node we can fetch the block from
            for tip in all_known_tips.keys():
                if all_known_tips[tip][1][
                        'status'] not in statuses_where_node_has_the_block:
                    for r in nodes:
                        try:
                            all_known_tips[tip] = (r,
                                                   self.nodes[r].getblock(tip))
                            break
                        except:
                            pass

            self.log.debug(
                'There are %d tips: %s' %
                (len(all_known_tips), tuple(sorted(all_known_tips.keys()))))
            for (r, tips) in chaintips_replies:
                invalid_blocks = []
                my_known_tips = set()
                active = None
                # Ideally, best should use chainwork, but that's not in getchaintips...
                best = {'height': 0}
                for tip in tips:
                    my_known_tips.add(tip['hash'])
                    if tip['status'] == 'invalid':
                        invalid_blocks.append(tip)
                    else:
                        if tip['height'] > best['height']:
                            best = tip
                    if tip['status'] == 'active':
                        active = tip
                        if tip['height'] == best['height']:
                            best = tip
                if best != active:
                    self.log.debug(
                        "Best potentially-valid block is not active on node %s"
                        % (r, ))
                    return False
                missing_tips = all_known_tips.keys() - my_known_tips
                for tip in set(missing_tips):
                    for inv_tip in invalid_blocks:
                        if self.is_block_ancestor(
                                self.nodes[all_known_tips[tip][0]], tip,
                                inv_tip['hash'], inv_tip['height']):
                            # One of our invalid tips is a parent of the missing tip
                            missing_tips.remove(tip)
                            break
                    for known_tip in my_known_tips:
                        # NOTE: Can't assume this node has the block, in case it's invalid
                        if self.is_block_ancestor(
                                self.nodes[all_known_tips[known_tip][0]],
                                known_tip, tip,
                                all_known_tips[tip][1]['height']):
                            # We have a valid tip that descends from the missing tip
                            missing_tips.remove(tip)
                            break
                    if tip in missing_tips:
                        self.log.debug('Node %s missing tip %s' % (r, tip))
                        return False
            self.log.debug('All nodes have all syncable tips')
            return True

        assert wait_until(chaintip_check, timeout=timeout)
コード例 #15
0
    def run_test(self):
        node0 = NodeConnCB()
        connections = [
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        ]
        node0.add_connection(connections[0])
        NetworkThread().start()  # Start up network handling in another thread

        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2)
        self.coinbase_blocks = self.nodes[0].generate(DERSIG_HEIGHT - 2)
        self.nodeaddress = self.nodes[0].getnewaddress()

        self.log.info(
            "Test that a transaction with non-DER signature can still appear in a block"
        )

        spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[0],
                                     self.nodeaddress, 1.0)
        un_der_ify(spendtx)
        spendtx.rehash()

        tip = self.nodes[0].getbestblockhash()
        block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
        block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT - 1),
                             block_time)
        block.nVersion = 2
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        node0.send_and_ping(MsgBlock(block))
        assert_equal(self.nodes[0].getbestblockhash(), block.hash)

        self.log.info("Test that blocks must now be at least version 3")
        tip = block.sha256
        block_time += 1
        block = create_block(tip, create_coinbase(DERSIG_HEIGHT), block_time)
        block.nVersion = 2
        block.rehash()
        block.solve()
        node0.send_and_ping(MsgBlock(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)

        wait_until(lambda: "reject" in node0.last_message.keys(),
                   lock=mininode_lock,
                   err_msg="last_message")
        with mininode_lock:
            assert_equal(node0.last_message["reject"].code, REJECT_OBSOLETE)
            assert_equal(node0.last_message["reject"].reason,
                         b'bad-version(0x00000002)')
            assert_equal(node0.last_message["reject"].data, block.sha256)
            del node0.last_message["reject"]

        self.log.info(
            "Test that transactions with non-DER signatures cannot appear in a block"
        )
        block.nVersion = 3

        spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1],
                                     self.nodeaddress, 1.0)
        un_der_ify(spendtx)
        spendtx.rehash()

        # First we show that this tx is valid except for DERSIG by getting it
        # accepted to the mempool (which we can achieve with
        # -promiscuousmempoolflags).
        node0.send_and_ping(MsgTx(spendtx))
        assert spendtx.hash in self.nodes[0].getrawmempool()

        # Now we verify that a block with this transaction is invalid.
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        node0.send_and_ping(MsgBlock(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)

        wait_until(lambda: "reject" in node0.last_message.keys(),
                   lock=mininode_lock,
                   err_msg="last_message")
        with mininode_lock:
            # We can receive different reject messages depending on whether
            # Krond is running with multiple script check threads. If script
            # check threads are not in use, then transaction script validation
            # happens sequentially, and Krond produces more specific reject
            # reasons.
            assert node0.last_message["reject"].code in [
                REJECT_INVALID, REJECT_NONSTANDARD
            ]
            assert_equal(node0.last_message["reject"].data, block.sha256)
            if node0.last_message["reject"].code == REJECT_INVALID:
                # Generic rejection when a block is invalid
                assert_equal(node0.last_message["reject"].reason,
                             b'block-validation-failed')
            else:
                assert b'Non-canonical DER signature' in node0.last_message[
                    "reject"].reason

        self.log.info(
            "Test that a version 3 block with a DERSIG-compliant transaction is accepted"
        )
        block.vtx[1] = create_transaction(self.nodes[0],
                                          self.coinbase_blocks[1],
                                          self.nodeaddress, 1.0)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        node0.send_and_ping(MsgBlock(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
コード例 #16
0
ファイル: example_test.py プロジェクト: oipwg/witseg
    def run_test(self):
        """Main test logic"""

        # Create a P2P connection to one of the nodes
        node0 = BaseNode()
        connections = []
        connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
        node0.add_connection(connections[0])

        # Start up network handling in another thread. This needs to be called
        # after the P2P connections have been created.
        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        # Generating a block on one of the nodes will get us out of IBD
        blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
        self.sync_all([self.nodes[0:1]])

        # Notice above how we called an RPC by calling a method with the same
        # name on the node object. Notice also how we used a keyword argument
        # to specify a named RPC argument. Neither of those are defined on the
        # node object. Instead there's some __getattr__() magic going on under
        # the covers to dispatch unrecognised attribute calls to the RPC
        # interface.

        # Logs are nice. Do plenty of them. They can be used in place of comments for
        # breaking the test into sub-sections.
        self.log.info("Starting test!")

        self.log.info("Calling a custom function")
        custom_function()

        self.log.info("Calling a custom method")
        self.custom_method()

        self.log.info("Create some blocks")
        self.tip = int(self.nodes[0].getbestblockhash(), 16)
        self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1

        height = 1

        for i in range(10):
            # Use the mininode and blocktools functionality to manually build a block
            # Calling the generate() rpc is easier, but this allows us to exactly
            # control the blocks and transactions.
            block = create_block(self.tip, create_coinbase(height), self.block_time)
            block.solve()
            block_message = msg_block(block)
            # Send message is used to send a P2P message to the node over our NodeConn connection
            node0.send_message(block_message)
            self.tip = block.sha256
            blocks.append(self.tip)
            self.block_time += 1
            height += 1

        self.log.info("Wait for node1 to reach current tip (height 11) using RPC")
        self.nodes[1].waitforblockheight(11)

        self.log.info("Connect node2 and node1")
        connect_nodes(self.nodes[1], 2)

        self.log.info("Add P2P connection to node2")
        node2 = BaseNode()
        connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))
        node2.add_connection(connections[1])
        node2.wait_for_verack()

        self.log.info("Wait for node2 reach current tip. Test that it has propogated all the blocks to us")

        getdata_request = msg_getdata()
        for block in blocks:
            getdata_request.inv.append(CInv(2, block))
        node2.send_message(getdata_request)

        # wait_until() will loop until a predicate condition is met. Use it to test properties of the
        # NodeConnCB objects.
        assert wait_until(lambda: sorted(blocks) == sorted(list(node2.block_receive_map.keys())), timeout=5)

        self.log.info("Check that each block was received only once")
        # The network thread uses a global lock on data access to the NodeConn objects when sending and receiving
        # messages. The test thread should acquire the global lock before accessing any NodeConn data to avoid locking
        # and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
        with mininode_lock:
            for block in node2.block_receive_map.values():
                assert_equal(block, 1)
コード例 #17
0
    def run_test(self):
        self.log.info("Test setban and listbanned RPCs")

        self.log.info("setban: successfully ban single IP address")
        assert_equal(
            len(self.nodes[1].getpeerinfo()),
            2)  # node1 should have 2 connections to node0 at this point
        self.nodes[1].setban("127.0.0.1", "add")
        assert wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0,
                          timeout=10)
        assert_equal(len(self.nodes[1].getpeerinfo()),
                     0)  # all nodes must be disconnected at this point
        assert_equal(len(self.nodes[1].listbanned()), 1)

        self.log.info("clearbanned: successfully clear ban list")
        self.nodes[1].clearbanned()
        assert_equal(len(self.nodes[1].listbanned()), 0)
        self.nodes[1].setban("127.0.0.0/24", "add")

        self.log.info("setban: fail to ban an already banned subnet")
        assert_equal(len(self.nodes[1].listbanned()), 1)
        assert_raises_jsonrpc(-23, "IP/Subnet already banned",
                              self.nodes[1].setban, "127.0.0.1", "add")

        self.log.info("setban: fail to ban an invalid subnet")
        assert_raises_jsonrpc(-30, "Error: Invalid IP/Subnet",
                              self.nodes[1].setban, "127.0.0.1/42", "add")
        assert_equal(
            len(self.nodes[1].listbanned()), 1
        )  # still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24

        self.log.info("setban remove: fail to unban a non-banned subnet")
        assert_raises_jsonrpc(-30, "Error: Unban failed", self.nodes[1].setban,
                              "127.0.0.1", "remove")
        assert_equal(len(self.nodes[1].listbanned()), 1)

        self.log.info("setban remove: successfully unban subnet")
        self.nodes[1].setban("127.0.0.0/24", "remove")
        assert_equal(len(self.nodes[1].listbanned()), 0)
        self.nodes[1].clearbanned()
        assert_equal(len(self.nodes[1].listbanned()), 0)

        self.log.info("setban: test persistence across node restart")
        self.nodes[1].setban("127.0.0.0/32", "add")
        self.nodes[1].setban("127.0.0.0/24", "add")
        # Set the mocktime so we can control when bans expire
        old_time = int(time.time())
        self.nodes[1].setmocktime(old_time)
        self.nodes[1].setban("192.168.0.1", "add", 1)  # ban for 1 seconds
        self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19",
                             "add", 1000)  # ban for 1000 seconds
        listBeforeShutdown = self.nodes[1].listbanned()
        assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address'])
        # Move time forward by 3 seconds so the third ban has expired
        self.nodes[1].setmocktime(old_time + 3)
        assert_equal(len(self.nodes[1].listbanned()), 3)

        self.stop_node(1)

        self.nodes[1] = self.start_node(1, self.options.tmpdir)
        listAfterShutdown = self.nodes[1].listbanned()
        assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
        assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
        assert_equal("/19" in listAfterShutdown[2]['address'], True)

        # Clear ban lists
        self.nodes[1].clearbanned()
        connect_nodes_bi(self.nodes, 0, 1)

        self.log.info("Test disconnectnode RPCs")

        self.log.info(
            "disconnectnode: fail to disconnect when calling with address and nodeid"
        )
        address1 = self.nodes[0].getpeerinfo()[0]['addr']
        node1 = self.nodes[0].getpeerinfo()[0]['addr']
        assert_raises_jsonrpc(
            -32602,
            "Only one of address and nodeid should be provided.",
            self.nodes[0].disconnectnode,
            address=address1,
            nodeid=node1)

        self.log.info(
            "disconnectnode: fail to disconnect when calling with junk address"
        )
        assert_raises_jsonrpc(-29,
                              "Node not found in connected nodes",
                              self.nodes[0].disconnectnode,
                              address="221B Baker Street")

        self.log.info(
            "disconnectnode: successfully disconnect node by address")
        address1 = self.nodes[0].getpeerinfo()[0]['addr']
        self.nodes[0].disconnectnode(address=address1)
        assert wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1,
                          timeout=10)
        assert not [
            node
            for node in self.nodes[0].getpeerinfo() if node['addr'] == address1
        ]

        self.log.info("disconnectnode: successfully reconnect node")
        connect_nodes_bi(self.nodes, 0, 1)  # reconnect the node
        assert_equal(len(self.nodes[0].getpeerinfo()), 2)
        assert [
            node for node in self.nodes[0].getpeerinfo()
            if node['addr'] == address1
        ]

        self.log.info(
            "disconnectnode: successfully disconnect node by node id")
        id1 = self.nodes[0].getpeerinfo()[0]['id']
        self.nodes[0].disconnectnode(nodeid=id1)
        assert wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1,
                          timeout=10)
        assert not [
            node for node in self.nodes[0].getpeerinfo() if node['id'] == id1
        ]
コード例 #18
0
 def wait_for_addr(self, timeout=5):
     test_function = lambda: self.last_message.get("addr")
     wait_until(test_function, timeout=timeout, lock=mininode_lock)
コード例 #19
0
    def run_test(self):
        # TODO remove this when mininode is up-to-date with Bitcoin
        class MyNodeConnCB(SingleNodeConnCB):
            def __init__(self):
                SingleNodeConnCB.__init__(self)
                self.cond = threading.Condition()
                self.last_message = {}

            def deliver(self, conn, message):
                SingleNodeConnCB.deliver(self, conn, message)
                command = message.command.decode('ascii')
                self.last_message[command] = message
                with self.cond:
                    self.cond.notify_all()

            def wait_for_getdata(self):
                with self.cond:
                    assert (self.cond.wait_for(
                        lambda: "getdata" in self.last_message, timeout=15))

        node0 = MyNodeConnCB()

        connections = []
        connections.append(
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
        node0.add_connection(connections[0])

        NetworkThread().start()
        node0.wait_for_verack()

        # Set node time to 60 days ago
        self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 60)

        # Generating a chain of 10 blocks
        block_hashes = self.nodes[0].generate(nblocks=10)

        # Create longer chain starting 2 blocks before current tip
        height = len(block_hashes) - 2
        block_hash = block_hashes[height - 1]
        block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1
        new_blocks = self.build_chain(5, block_hash, height, block_time)

        # Force reorg to a longer chain
        node0.send_message(msg_headers(new_blocks))
        node0.wait_for_getdata()
        for block in new_blocks:
            node0.send_and_ping(msg_block(block))

        # Check that reorg succeeded
        assert_equal(self.nodes[0].getblockcount(), 13)

        stale_hash = int(block_hashes[-1], 16)

        # Check that getdata request for stale block succeeds
        self.send_block_request(stale_hash, node0)
        test_function = lambda: self.last_block_equals(stale_hash, node0)
        wait_until(test_function, timeout=3)

        # Check that getheader request for stale block header succeeds
        self.send_header_request(stale_hash, node0)
        test_function = lambda: self.last_header_equals(stale_hash, node0)
        wait_until(test_function, timeout=3)

        # Longest chain is extended so stale is much older than chain tip
        self.nodes[0].setmocktime(0)
        tip = self.nodes[0].generate(nblocks=1)[0]
        assert_equal(self.nodes[0].getblockcount(), 14)

        # Send getdata & getheaders to refresh last received getheader message
        block_hash = int(tip, 16)
        self.send_block_request(block_hash, node0)
        self.send_header_request(block_hash, node0)
        node0.sync_with_ping()

        # Request for very old stale block should now fail
        self.send_block_request(stale_hash, node0)
        time.sleep(3)
        assert not self.last_block_equals(stale_hash, node0)

        # Request for very old stale block header should now fail
        self.send_header_request(stale_hash, node0)
        time.sleep(3)
        assert not self.last_header_equals(stale_hash, node0)

        # Verify we can fetch very old blocks and headers on the active chain
        block_hash = int(block_hashes[2], 16)
        self.send_block_request(block_hash, node0)
        self.send_header_request(block_hash, node0)
        node0.sync_with_ping()

        self.send_block_request(block_hash, node0)
        test_function = lambda: self.last_block_equals(block_hash, node0)
        wait_until(test_function, timeout=3)

        self.send_header_request(block_hash, node0)
        test_function = lambda: self.last_header_equals(block_hash, node0)
        wait_until(test_function, timeout=3)
コード例 #20
0
 def wait_for_addr(self, timeout=5):
     def test_function(): return self.last_message.get("addr")
     wait_until(test_function, timeout=timeout, lock=mininode_lock)
コード例 #21
0
    def run_test(self):
        node0 = NodeConnCB()
        connections = [
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        ]
        node0.add_connection(connections[0])

        NetworkThread().start()  # Start up network handling in another thread

        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        self.log.info("Mining %d blocks", CLTV_HEIGHT - 2)
        self.coinbase_blocks = self.nodes[0].generate(CLTV_HEIGHT - 2)
        self.nodeaddress = self.nodes[0].getnewaddress()

        self.log.info(
            "Test that an invalid-according-to-CLTV transaction can still appear in a block"
        )

        spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[0],
                                     self.nodeaddress, 1.0)
        cltv_invalidate(spendtx)
        spendtx.rehash()

        tip = self.nodes[0].getbestblockhash()
        block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
        block = create_block(int(tip, 16), create_coinbase(CLTV_HEIGHT - 1),
                             block_time)
        block.nVersion = 3
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.solve()

        node0.send_and_ping(MsgBlock(block))
        assert_equal(self.nodes[0].getbestblockhash(), block.hash)

        self.log.info("Test that blocks must now be at least version 4")
        tip = block.sha256
        block_time += 1
        block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time)
        block.nVersion = 3
        block.solve()
        node0.send_and_ping(MsgBlock(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)

        wait_until(lambda: "reject" in node0.last_message.keys(),
                   lock=mininode_lock,
                   err_msg="last_message")
        with mininode_lock:
            assert_equal(node0.last_message["reject"].code, REJECT_OBSOLETE)
            assert_equal(node0.last_message["reject"].reason,
                         b'bad-version(0x00000003)')
            assert_equal(node0.last_message["reject"].data, block.sha256)
            del node0.last_message["reject"]

        self.log.info(
            "Test that invalid-according-to-cltv transactions cannot appear in a block"
        )
        block.nVersion = 4

        spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1],
                                     self.nodeaddress, 1.0)
        cltv_invalidate(spendtx)
        spendtx.rehash()

        # First we show that this tx is valid except for CLTV by getting it
        # accepted to the mempool (which we can achieve with
        # -promiscuousmempoolflags).
        node0.send_and_ping(MsgTx(spendtx))
        assert spendtx.hash in self.nodes[0].getrawmempool()

        # Now we verify that a block with this transaction is invalid.
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.solve()

        node0.send_and_ping(MsgBlock(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)

        wait_until(lambda: "reject" in node0.last_message.keys(),
                   lock=mininode_lock,
                   err_msg="last_message")
        with mininode_lock:
            assert node0.last_message["reject"].code in [
                REJECT_INVALID, REJECT_NONSTANDARD
            ]
            assert_equal(node0.last_message["reject"].data, block.sha256)
            if node0.last_message["reject"].code == REJECT_INVALID:
                # Generic rejection when a block is invalid
                assert_equal(node0.last_message["reject"].reason,
                             b'block-validation-failed')
            else:
                assert b'Negative locktime' in node0.last_message[
                    "reject"].reason

        self.log.info(
            "Test that a version 4 block with a valid-according-to-CLTV transaction is accepted"
        )
        spendtx = cltv_validate(self.nodes[0], spendtx, CLTV_HEIGHT - 1)
        spendtx.rehash()

        block.vtx.pop(1)
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.solve()

        node0.send_and_ping(MsgBlock(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
コード例 #22
0
    def run_test(self):
        chain_height = self.nodes[0].getblockcount()
        assert_equal(chain_height, 200)

        self.log.debug("Mine a single block to get out of IBD")
        self.nodes[0].generate(1)
        self.sync_all()

        self.log.debug("Send 5 transactions from node2 (to its own address)")
        for i in range(5):
            self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(),
                                        Decimal("10"))
        self.sync_all()

        self.log.debug(
            "Verify that node0 and node1 have 5 transactions in their mempools"
        )
        assert_equal(len(self.nodes[0].getrawmempool()), 5)
        assert_equal(len(self.nodes[1].getrawmempool()), 5)

        self.log.debug(
            "Stop-start node0 and node1. Verify that node0 has the transactions in its mempool and node1 does not."
        )
        self.stop_nodes()
        self.nodes = []
        self.nodes.append(self.start_node(0, self.options.tmpdir))
        self.nodes.append(self.start_node(1, self.options.tmpdir))
        # Give bitcoind a second to reload the mempool
        time.sleep(1)
        assert wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5)
        assert_equal(len(self.nodes[1].getrawmempool()), 0)

        self.log.debug(
            "Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file."
        )
        self.stop_nodes()
        self.nodes = []
        self.nodes.append(
            self.start_node(0, self.options.tmpdir, ["-persistmempool=0"]))
        # Give bitcoind a second to reload the mempool
        time.sleep(1)
        assert_equal(len(self.nodes[0].getrawmempool()), 0)

        self.log.debug(
            "Stop-start node0. Verify that it has the transactions in its mempool."
        )
        self.stop_nodes()
        self.nodes = []
        self.nodes.append(self.start_node(0, self.options.tmpdir))
        assert wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5)

        mempooldat0 = os.path.join(self.options.tmpdir, 'node0', 'regtest',
                                   'mempool.dat')
        mempooldat1 = os.path.join(self.options.tmpdir, 'node1', 'regtest',
                                   'mempool.dat')
        self.log.debug(
            "Remove the mempool.dat file. Verify that savemempool to disk via RPC re-creates it"
        )
        os.remove(mempooldat0)
        self.nodes[0].savemempool()
        assert os.path.isfile(mempooldat0)

        self.log.debug(
            "Make node1 use mempool.dat from node0. Verify it has 5 transactions"
        )
        os.rename(mempooldat0, mempooldat1)
        self.nodes.append(self.start_node(1, self.options.tmpdir))
        wait_until(lambda: len(self.nodes[1].getrawmempool()) == 5)

        self.log.debug(
            "Prevent bitcoind from writing mempool.dat to disk. Verify that `savemempool` fails"
        )
        # to test the exception we are setting bad permissions on a tmp file called mempool.dat.new
        # which is an implementation detail that could change and break this test
        mempooldotnew1 = mempooldat1 + '.new'
        with os.fdopen(os.open(mempooldotnew1, os.O_CREAT, 0o000), 'w'):
            pass
        assert_raises_jsonrpc(-1, "Unable to dump mempool to disk",
                              self.nodes[1].savemempool)
        os.remove(mempooldotnew1)
コード例 #23
0
 def wait_for_addr(self, timeout=5):
     test_function = lambda: self.last_message.get("addr")
     wait_until(test_function, timeout=timeout, lock=mininode_lock)
コード例 #24
0
 def wait_for_utxo(self):
     got_utxos = wait_until(lambda: self.test_node.utxos != None)
     assert (got_utxos)
     assert_equal(self.test_node.utxos.hash,
                  int(self.xt_node.getbestblockhash(), 16))
     assert_equal(self.test_node.utxos.height, self.xt_node.getblockcount())
コード例 #25
0
    def run_test(self):
        # Create all the connections we will need to node0 at the start because they all need to be
        # setup before we call NetworkThread().start()

        # Create a P2P connection with no association ID (old style)
        oldStyleConnCB = TestNode()
        oldStyleConn = NodeConn('127.0.0.1',
                                p2p_port(0),
                                self.nodes[0],
                                oldStyleConnCB,
                                nullAssocID=True)
        oldStyleConnCB.add_connection(oldStyleConn)

        # Create a P2P connection with a new association ID
        newStyleConnCB = TestNode()
        newStyleConn = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0],
                                newStyleConnCB)
        newStyleConnCB.add_connection(newStyleConn)

        # Create a P2P connection with a new association ID and another connection that uses the same ID
        newStyleFirstConnCB = TestNode()
        newStyleFirstConn = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0],
                                     newStyleFirstConnCB)
        newStyleFirstConnCB.add_connection(newStyleFirstConn)
        # By setting the assocID on this second NodeConn we prevent it sending a version message
        newStyleSecondConnCB = TestNode()
        newStyleSecondConn = NodeConn('127.0.0.1',
                                      p2p_port(0),
                                      self.nodes[0],
                                      newStyleSecondConnCB,
                                      assocID=newStyleFirstConn.assocID)
        newStyleSecondConnCB.add_connection(newStyleSecondConn)

        # Some connections we will use to test setup of DATA2, DATA3, DATA4 streams
        newStyleSecondConnCB_Data2 = TestNode()
        newStyleSecondConn_Data2 = NodeConn('127.0.0.1',
                                            p2p_port(0),
                                            self.nodes[0],
                                            newStyleSecondConnCB_Data2,
                                            assocID=newStyleFirstConn.assocID)
        newStyleSecondConnCB_Data2.add_connection(newStyleSecondConn_Data2)
        newStyleSecondConnCB_Data3 = TestNode()
        newStyleSecondConn_Data3 = NodeConn('127.0.0.1',
                                            p2p_port(0),
                                            self.nodes[0],
                                            newStyleSecondConnCB_Data3,
                                            assocID=newStyleFirstConn.assocID)
        newStyleSecondConnCB_Data3.add_connection(newStyleSecondConn_Data3)
        newStyleSecondConnCB_Data4 = TestNode()
        newStyleSecondConn_Data4 = NodeConn('127.0.0.1',
                                            p2p_port(0),
                                            self.nodes[0],
                                            newStyleSecondConnCB_Data4,
                                            assocID=newStyleFirstConn.assocID)
        newStyleSecondConnCB_Data4.add_connection(newStyleSecondConn_Data4)

        # Some connections we will use to test error scenarios
        newStyleThirdConnCB = TestNode()
        badStreamConn1 = NodeConn('127.0.0.1',
                                  p2p_port(0),
                                  self.nodes[0],
                                  newStyleThirdConnCB,
                                  assocID=create_association_id())
        newStyleThirdConnCB.add_connection(badStreamConn1)
        newStyleFourthConnCB = TestNode()
        badStreamConn2 = NodeConn('127.0.0.1',
                                  p2p_port(0),
                                  self.nodes[0],
                                  newStyleFourthConnCB,
                                  assocID=newStyleFirstConn.assocID)
        newStyleFourthConnCB.add_connection(badStreamConn2)
        newStyleFifthConnCB = TestNode()
        badStreamConn3 = NodeConn('127.0.0.1',
                                  p2p_port(0),
                                  self.nodes[0],
                                  newStyleFifthConnCB,
                                  assocID=newStyleFirstConn.assocID)
        newStyleFifthConnCB.add_connection(badStreamConn3)
        newStyleSixthConnCB = TestNode()
        badStreamConn4 = NodeConn('127.0.0.1',
                                  p2p_port(0),
                                  self.nodes[0],
                                  newStyleSixthConnCB,
                                  assocID=newStyleFirstConn.assocID)
        newStyleSixthConnCB.add_connection(badStreamConn4)
        newStyleSeventhConnCB = TestNode()
        badStreamConn5 = NodeConn('127.0.0.1',
                                  p2p_port(0),
                                  self.nodes[0],
                                  newStyleSeventhConnCB,
                                  assocID=newStyleFirstConn.assocID)
        newStyleSeventhConnCB.add_connection(badStreamConn5)

        # Start up network handling in another thread. This needs to be called
        # after the P2P connections have been created.
        NetworkThread().start()

        # Wait for all connections to come up to the required initial state
        oldStyleConnCB.wait_for_protoconf()
        newStyleConnCB.wait_for_protoconf()
        newStyleFirstConnCB.wait_for_protoconf()

        # Check initial state
        with mininode_lock:
            assert_equal(oldStyleConnCB.recvAssocID, None)
        with mininode_lock:
            assert_equal(oldStyleConnCB.recvStreamPolicies,
                         b'BlockPriority,Default')
        with mininode_lock:
            assert_equal(newStyleConnCB.recvAssocID, newStyleConn.assocID)
        with mininode_lock:
            assert_equal(newStyleConnCB.recvStreamPolicies,
                         b'BlockPriority,Default')
        with mininode_lock:
            assert_equal(newStyleFirstConnCB.recvAssocID,
                         newStyleFirstConn.assocID)
        with mininode_lock:
            assert_equal(newStyleFirstConnCB.recvStreamPolicies,
                         b'BlockPriority,Default')
        with mininode_lock:
            assert_equal(len(newStyleSecondConnCB.message_count), 0)
        with mininode_lock:
            assert_equal(len(newStyleSecondConnCB_Data2.message_count), 0)
        with mininode_lock:
            assert_equal(len(newStyleSecondConnCB_Data3.message_count), 0)
        with mininode_lock:
            assert_equal(len(newStyleSecondConnCB_Data4.message_count), 0)
        expected = [
            {
                'id': 0,  # oldStyleConn
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 1,  # newStyleConn
                'associd': str(newStyleConn.assocID),
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 2,  # newStyleFirstConn
                'associd': str(newStyleFirstConn.assocID),
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 3,  # newStyleSecondConn
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 4,  # newStyleSecondConn_Data2
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 5,  # newStyleSecondConn_Data3
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 6,  # newStyleSecondConn_Data4
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 7,  # badStreamConn1
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 8,  # badStreamConn2
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 9,  # badStreamConn3
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 10,  # badStreamConn4
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 11,  # badStreamConn5
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
        ]
        wait_until(lambda: self.check_peer_info(self.nodes[0], expected),
                   timeout=5)

        # Check a new block is recieved by all connections
        self.nodes[0].generate(1)
        tip = self.nodes[0].getbestblockhash()
        wait_until(lambda: oldStyleConnCB.seen_block(tip),
                   lock=mininode_lock,
                   timeout=5)
        wait_until(lambda: newStyleConnCB.seen_block(tip),
                   lock=mininode_lock,
                   timeout=5)
        wait_until(lambda: newStyleFirstConnCB.seen_block(tip),
                   lock=mininode_lock,
                   timeout=5)
        with mininode_lock:
            assert (not newStyleSecondConnCB.seen_block(tip))
        with mininode_lock:
            assert (not newStyleSecondConnCB_Data2.seen_block(tip))
        with mininode_lock:
            assert (not newStyleSecondConnCB_Data3.seen_block(tip))
        with mininode_lock:
            assert (not newStyleSecondConnCB_Data4.seen_block(tip))

        # Send create new stream message
        newStyleSecondConn.send_message(
            msg_createstream(stream_type=StreamType.DATA1.value,
                             stream_policy=b"BlockPriority",
                             assocID=newStyleFirstConn.assocID))
        expected = [
            {
                'id': 0,  # oldStyleConn
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 1,
                'associd': str(newStyleConn.assocID),  # newStyleConn
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 2,
                'associd': str(newStyleFirstConn.assocID
                               ),  # newStyleFirstConn & newStyleSecondConn
                'streampolicy': 'BlockPriority',
                'streams': ['GENERAL', 'DATA1']
            },
            {
                'id': 4,  # newStyleSecondConn_Data2
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 5,  # newStyleSecondConn_Data3
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 6,  # newStyleSecondConn_Data4
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 7,  # badStreamConn1
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 8,  # badStreamConn2
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 9,  # badStreamConn3
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 10,  # badStreamConn4
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 11,  # badStreamConn5
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
        ]
        wait_until(lambda: self.check_peer_info(self.nodes[0], expected),
                   timeout=5)
        with mininode_lock:
            assert (newStyleSecondConnCB.last_streamack is not None)

        # Send create stream with wrong association ID
        badStreamConn1.send_message(
            msg_createstream(stream_type=StreamType.DATA2.value,
                             assocID=badStreamConn1.assocID))
        # Should receive reject, no streamack
        wait_until(lambda: newStyleThirdConnCB.last_reject is not None,
                   lock=mininode_lock,
                   timeout=5)
        with mininode_lock:
            assert (newStyleThirdConnCB.last_streamack is None)
        assert ("No node found with association ID"
                in str(newStyleThirdConnCB.last_reject.reason))
        # Connection will be closed
        wait_until(lambda: badStreamConn1.state == "closed",
                   lock=mininode_lock,
                   timeout=5)

        # Send create stream with missing association ID
        badStreamConn5.send_message(
            msg_createstream(stream_type=StreamType.DATA2.value, assocID=""))
        # Should receive reject, no streamack
        wait_until(lambda: newStyleSeventhConnCB.last_reject is not None,
                   lock=mininode_lock,
                   timeout=5)
        with mininode_lock:
            assert (newStyleSeventhConnCB.last_streamack is None)
        assert ("Badly formatted message"
                in str(newStyleSeventhConnCB.last_reject.reason))
        # Connection will be closed
        wait_until(lambda: badStreamConn5.state == "closed",
                   lock=mininode_lock,
                   timeout=5)

        # Send create stream for unknown stream type
        badStreamConn2.send_message(
            msg_createstream(stream_type=9, assocID=badStreamConn2.assocID))
        # Should receive reject, no streamack
        wait_until(lambda: newStyleFourthConnCB.last_reject is not None,
                   lock=mininode_lock,
                   timeout=5)
        with mininode_lock:
            assert (newStyleFourthConnCB.last_streamack is None)
        assert ("StreamType out of range"
                in str(newStyleFourthConnCB.last_reject.reason))
        # Connection will be closed
        wait_until(lambda: badStreamConn2.state == "closed",
                   lock=mininode_lock,
                   timeout=5)

        # Send create stream for existing stream type
        badStreamConn3.send_message(
            msg_createstream(stream_type=StreamType.GENERAL.value,
                             assocID=badStreamConn3.assocID))
        # Should receive reject, no streamack
        wait_until(lambda: newStyleFifthConnCB.last_reject is not None,
                   lock=mininode_lock,
                   timeout=5)
        with mininode_lock:
            assert (newStyleFifthConnCB.last_streamack is None)
        assert ("Attempt to overwrite existing stream"
                in str(newStyleFifthConnCB.last_reject.reason))
        # Connection will be closed
        wait_until(lambda: badStreamConn3.state == "closed",
                   lock=mininode_lock,
                   timeout=5)

        # Send create stream with unknown stream policy specified
        badStreamConn4.send_message(
            msg_createstream(stream_type=StreamType.GENERAL.value,
                             stream_policy=b"UnknownPolicy",
                             assocID=badStreamConn3.assocID))
        # Should receive reject, no streamack
        wait_until(lambda: newStyleSixthConnCB.last_reject is not None,
                   lock=mininode_lock,
                   timeout=5)
        with mininode_lock:
            assert (newStyleSixthConnCB.last_streamack is None)
        assert ("Unknown stream policy name"
                in str(newStyleSixthConnCB.last_reject.reason))
        # Connection will be closed
        wait_until(lambda: badStreamConn4.state == "closed",
                   lock=mininode_lock,
                   timeout=5)

        # Check streams are in the expected state after all those errors
        expected = [
            {
                'id': 0,  # oldStyleConn
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 1,  # newStyleConn
                'associd': str(newStyleConn.assocID),
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 2,  # newStyleFirstConn & newStyleSecondConn
                'associd': str(newStyleFirstConn.assocID),
                'streampolicy': 'BlockPriority',
                'streams': ['GENERAL', 'DATA1']
            },
            {
                'id': 4,  # newStyleSecondConn_Data2
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 5,  # newStyleSecondConn_Data3
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 6,  # newStyleSecondConn_Data4
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
        ]
        wait_until(lambda: self.check_peer_info(self.nodes[0], expected),
                   timeout=5)

        # See if we can establish all the possible stream types
        newStyleSecondConn_Data2.send_message(
            msg_createstream(stream_type=StreamType.DATA2.value,
                             assocID=newStyleFirstConn.assocID))
        newStyleSecondConn_Data3.send_message(
            msg_createstream(stream_type=StreamType.DATA3.value,
                             assocID=newStyleFirstConn.assocID))
        newStyleSecondConn_Data4.send_message(
            msg_createstream(stream_type=StreamType.DATA4.value,
                             assocID=newStyleFirstConn.assocID))
        expected = [
            {
                'id': 0,  # oldStyleConn
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 1,
                'associd': str(newStyleConn.assocID),  # newStyleConn
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id':
                2,  # newStyleFirstConn, newStyleSecondConn, newStyleSecondConn_Data2,
                'associd':
                str(newStyleFirstConn.assocID
                    ),  # newStyleSecondConn_Data3, newStyleSecondConn_Data4
                'streampolicy': 'BlockPriority',
                'streams': ['GENERAL', 'DATA1', 'DATA2', 'DATA3', 'DATA4']
            },
        ]
        wait_until(lambda: self.check_peer_info(self.nodes[0], expected),
                   timeout=5)

        # Connect 2 nodes and check they establish the expected streams
        connect_nodes(self.nodes[0], 1)
        expected0 = [
            {
                'id': 0,  # oldStyleConn
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 1,  # newStyleConn
                'associd': str(newStyleConn.assocID),
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id':
                2,  # newStyleFirstConn, newStyleSecondConn, newStyleSecondConn_Data2,
                'associd':
                str(newStyleFirstConn.assocID
                    ),  # newStyleSecondConn_Data3, newStyleSecondConn_Data4
                'streampolicy': 'BlockPriority',
                'streams': ['GENERAL', 'DATA1', 'DATA2', 'DATA3', 'DATA4']
            },
            {
                'id': 12,  # A new association established to node1
                'associd': '<UNKNOWN>',
                'streampolicy': 'BlockPriority',
                'streams': ['GENERAL', 'DATA1']
            },
        ]
        wait_until(lambda: self.check_peer_info(self.nodes[0], expected0),
                   timeout=5)
        expected1 = [
            {
                'id': 0,  # An association to node0
                'associd': '<UNKNOWN>',
                'streampolicy': 'BlockPriority',
                'streams': ['GENERAL', 'DATA1']
            },
        ]
        wait_until(lambda: self.check_peer_info(self.nodes[1], expected1),
                   timeout=5)

        # Connect 2 nodes, one of which has streams disabled, and check they establish the expected streams
        connect_nodes(self.nodes[0], 2)
        expected0 = [
            {
                'id': 0,  # oldStyleConn
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 1,  # newStyleConn
                'associd': str(newStyleConn.assocID),
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id':
                2,  # newStyleFirstConn, newStyleSecondConn, newStyleSecondConn_Data2,
                'associd':
                str(newStyleFirstConn.assocID
                    ),  # newStyleSecondConn_Data3, newStyleSecondConn_Data4
                'streampolicy': 'BlockPriority',
                'streams': ['GENERAL', 'DATA1', 'DATA2', 'DATA3', 'DATA4']
            },
            {
                'id': 12,  # Association to node 1
                'associd': '<UNKNOWN>',
                'streampolicy': 'BlockPriority',
                'streams': ['GENERAL', 'DATA1']
            },
            {
                'id': 14,  # Old style association to node 2
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
        ]
        wait_until(lambda: self.check_peer_info(self.nodes[0], expected0),
                   timeout=5)
        expected2 = [
            {
                'id': 0,  # An association to node0
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
        ]
        wait_until(lambda: self.check_peer_info(self.nodes[2], expected2),
                   timeout=5)

        # Make sure everyone sees all blocks over whatever stream
        self.nodes[0].generate(1)
        tip = self.nodes[0].getbestblockhash()
        wait_until(lambda: self.nodes[1].getbestblockhash() == tip, timeout=5)
        wait_until(lambda: self.nodes[2].getbestblockhash() == tip, timeout=5)

        self.nodes[1].generate(1)
        tip = self.nodes[1].getbestblockhash()
        wait_until(lambda: self.nodes[0].getbestblockhash() == tip, timeout=5)
        wait_until(lambda: self.nodes[2].getbestblockhash() == tip, timeout=5)

        self.nodes[2].generate(1)
        tip = self.nodes[2].getbestblockhash()
        wait_until(lambda: self.nodes[0].getbestblockhash() == tip, timeout=5)
        wait_until(lambda: self.nodes[1].getbestblockhash() == tip, timeout=5)

        # Add another node, configured to only support the Default stream policy
        self.add_node(3,
                      extra_args=[
                          '-whitelist=127.0.0.1',
                          '-multistreampolicies=Default'
                      ],
                      init_data_dir=True)
        self.start_node(3)

        # Check streampolicies field from getnetworkinfo
        assert_equal(self.nodes[0].getnetworkinfo()["streampolicies"],
                     "BlockPriority,Default")
        assert_equal(self.nodes[1].getnetworkinfo()["streampolicies"],
                     "BlockPriority,Default")
        assert_equal(self.nodes[2].getnetworkinfo()["streampolicies"],
                     "BlockPriority,Default")
        assert_equal(self.nodes[3].getnetworkinfo()["streampolicies"],
                     "Default")

        # Connect the new node to one of the existing nodes and check that they establish a Default association
        connect_nodes(self.nodes[1], 3)
        expected1 = [
            {
                'id': 0,  # An association to node0
                'associd': '<UNKNOWN>',
                'streampolicy': 'BlockPriority',
                'streams': ['GENERAL', 'DATA1']
            },
            {
                'id': 2,  # An association to node3
                'associd': '<UNKNOWN>',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
        ]
        wait_until(lambda: self.check_peer_info(self.nodes[1], expected1),
                   timeout=5)
        expected3 = [
            {
                'id': 0,  # An association to node1
                'associd': '<UNKNOWN>',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
        ]
        wait_until(lambda: self.check_peer_info(self.nodes[3], expected3),
                   timeout=5)
コード例 #26
0
    def run_test(self):
        node0 = self.nodes[0].add_p2p_connection(P2PInterface())

        network_thread_start()
        node0.wait_for_verack()

        # Set node time to 60 days ago
        self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 60)

        # Generating a chain of 10 blocks
        block_hashes = self.nodes[0].generate(nblocks=10)

        # Create longer chain starting 2 blocks before current tip
        height = len(block_hashes) - 2
        block_hash = block_hashes[height - 1]
        block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1
        new_blocks = self.build_chain(5, block_hash, height, block_time)

        # Force reorg to a longer chain
        node0.send_message(msg_headers(new_blocks))
        node0.wait_for_getdata()
        for block in new_blocks:
            node0.send_and_ping(msg_block(block))

        # Check that reorg succeeded
        assert_equal(self.nodes[0].getblockcount(), 13)

        stale_hash = int(block_hashes[-1], 16)

        # Check that getdata request for stale block succeeds
        self.send_block_request(stale_hash, node0)
        test_function = lambda: self.last_block_equals(stale_hash, node0)
        wait_until(test_function, timeout=3)

        # Check that getheader request for stale block header succeeds
        self.send_header_request(stale_hash, node0)
        test_function = lambda: self.last_header_equals(stale_hash, node0)
        wait_until(test_function, timeout=3)

        # Longest chain is extended so stale is much older than chain tip
        self.nodes[0].setmocktime(0)
        tip = self.nodes[0].generate(nblocks=1)[0]
        assert_equal(self.nodes[0].getblockcount(), 14)

        # Send getdata & getheaders to refresh last received getheader message
        block_hash = int(tip, 16)
        self.send_block_request(block_hash, node0)
        self.send_header_request(block_hash, node0)
        node0.sync_with_ping()

        # Request for very old stale block should now fail
        self.send_block_request(stale_hash, node0)
        time.sleep(3)
        assert not self.last_block_equals(stale_hash, node0)

        # Request for very old stale block header should now fail
        self.send_header_request(stale_hash, node0)
        time.sleep(3)
        assert not self.last_header_equals(stale_hash, node0)

        # Verify we can fetch very old blocks and headers on the active chain
        block_hash = int(block_hashes[2], 16)
        self.send_block_request(block_hash, node0)
        self.send_header_request(block_hash, node0)
        node0.sync_with_ping()

        self.send_block_request(block_hash, node0)
        test_function = lambda: self.last_block_equals(block_hash, node0)
        wait_until(test_function, timeout=3)

        self.send_header_request(block_hash, node0)
        test_function = lambda: self.last_header_equals(block_hash, node0)
        wait_until(test_function, timeout=3)
コード例 #27
0
ファイル: p2p_fingerprint.py プロジェクト: AM5800/unit-e
    def run_test(self):

        self.setup_stake_coins(self.nodes[0])

        node0 = self.nodes[0].add_p2p_connection(P2PInterface())

        network_thread_start()
        node0.wait_for_verack()

        # Set node time to 60 days ago
        self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 60)

        # Generating a chain of 10 blocks
        block_hashes = self.nodes[0].generate(nblocks=8)
        snapshot_meta = get_tip_snapshot_meta(self.nodes[0])
        unspent_outputs = get_unspent_coins(self.nodes[0], 5, lock=True)
        block_hashes += self.nodes[0].generate(nblocks=2)
        unspent_outputs = get_unspent_coins(self.nodes[0], 5)

        # Create longer chain starting 2 blocks before current tip
        height = len(block_hashes) - 2
        block_hash = block_hashes[height - 1]
        block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1
        new_blocks = self.build_chain(5, block_hash, height, block_time,
                                      unspent_outputs, snapshot_meta)

        # Force reorg to a longer chain
        node0.send_message(msg_headers(new_blocks))
        node0.wait_for_getdata()
        for block in new_blocks:
            node0.send_and_ping(msg_block(block))

        # Check that reorg succeeded
        assert_equal(self.nodes[0].getblockcount(), 13)

        stale_hash = int(block_hashes[-1], 16)

        # Check that getdata request for stale block succeeds
        self.send_block_request(stale_hash, node0)
        test_function = lambda: self.last_block_equals(stale_hash, node0)
        wait_until(test_function, timeout=3)

        # Check that getheader request for stale block header succeeds
        self.send_header_request(stale_hash, node0)
        test_function = lambda: self.last_header_equals(stale_hash, node0)
        wait_until(test_function, timeout=3)

        # Longest chain is extended so stale is much older than chain tip
        self.nodes[0].setmocktime(0)
        tip = self.nodes[0].generate(nblocks=1)[0]
        assert_equal(self.nodes[0].getblockcount(), 14)

        # Send getdata & getheaders to refresh last received getheader message
        block_hash = int(tip, 16)
        self.send_block_request(block_hash, node0)
        self.send_header_request(block_hash, node0)
        node0.sync_with_ping()

        # Request for very old stale block should now fail
        self.send_block_request(stale_hash, node0)
        time.sleep(3)
        assert not self.last_block_equals(stale_hash, node0)

        # Request for very old stale block header should now fail
        self.send_header_request(stale_hash, node0)
        time.sleep(3)
        assert not self.last_header_equals(stale_hash, node0)

        # Verify we can fetch very old blocks and headers on the active chain
        block_hash = int(block_hashes[2], 16)
        self.send_block_request(block_hash, node0)
        self.send_header_request(block_hash, node0)
        node0.sync_with_ping()

        self.send_block_request(block_hash, node0)
        test_function = lambda: self.last_block_equals(block_hash, node0)
        wait_until(test_function, timeout=3)

        self.send_header_request(block_hash, node0)
        test_function = lambda: self.last_header_equals(block_hash, node0)
        wait_until(test_function, timeout=3)
コード例 #28
0
    def run_test(self):
        """Main test logic"""

        # Create a P2P connection to one of the nodes
        node0 = BaseNode()
        connections = []
        connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
        node0.add_connection(connections[0])

        # Start up network handling in another thread. This needs to be called
        # after the P2P connections have been created.
        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        # Generating a block on one of the nodes will get us out of IBD
        blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
        self.sync_all([self.nodes[0:1]])

        # Notice above how we called an RPC by calling a method with the same
        # name on the node object. Notice also how we used a keyword argument
        # to specify a named RPC argument. Neither of those are defined on the
        # node object. Instead there's some __getattr__() magic going on under
        # the covers to dispatch unrecognised attribute calls to the RPC
        # interface.

        # Logs are nice. Do plenty of them. They can be used in place of comments for
        # breaking the test into sub-sections.
        self.log.info("Starting test!")

        self.log.info("Calling a custom function")
        custom_function()

        self.log.info("Calling a custom method")
        self.custom_method()

        self.log.info("Create some blocks")
        self.tip = int(self.nodes[0].getbestblockhash(), 16)
        self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1

        height = 1

        for i in range(10):
            # Use the mininode and blocktools functionality to manually build a block
            # Calling the generate() rpc is easier, but this allows us to exactly
            # control the blocks and transactions.
            block = create_block(self.tip, create_coinbase(height), self.block_time)
            block.solve()
            block_message = msg_block(block)
            # Send message is used to send a P2P message to the node over our NodeConn connection
            node0.send_message(block_message)
            self.tip = block.sha256
            blocks.append(self.tip)
            self.block_time += 1
            height += 1

        self.log.info("Wait for node1 to reach current tip (height 11) using RPC")
        self.nodes[1].waitforblockheight(11)

        self.log.info("Connect node2 and node1")
        connect_nodes(self.nodes[1], 2)

        self.log.info("Add P2P connection to node2")
        node2 = BaseNode()
        connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))
        node2.add_connection(connections[1])
        node2.wait_for_verack()

        self.log.info("Wait for node2 reach current tip. Test that it has propogated all the blocks to us")

        for block in blocks:
            getdata_request = msg_getdata()
            getdata_request.inv.append(CInv(2, block))
            node2.send_message(getdata_request)

        # wait_until() will loop until a predicate condition is met. Use it to test properties of the
        # NodeConnCB objects.
        assert wait_until(lambda: sorted(blocks) == sorted(list(node2.block_receive_map.keys())), timeout=5)

        self.log.info("Check that each block was received only once")
        # The network thread uses a global lock on data access to the NodeConn objects when sending and receiving
        # messages. The test thread should acquire the global lock before accessing any NodeConn data to avoid locking
        # and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
        with mininode_lock:
            for block in node2.block_receive_map.values():
                assert_equal(block, 1)
コード例 #29
0
    def run_test(self):
        self.mock_time = int(time.time())
        self.mock_forward(0)

        self.log.info(
            'Check that ping is sent after connection is established')
        no_pong_node = self.nodes[0].add_p2p_connection(NodeNoPong())
        self.mock_forward(3)
        assert no_pong_node.last_message.pop('ping').nonce != 0
        self.check_peer_info(pingtime=None, minping=None, pingwait=3)

        self.log.info('Reply without nonce cancels ping')
        with self.nodes[0].assert_debug_log(['pong peer=0: Short payload']):
            no_pong_node.send_and_ping(msg_pong_corrupt())
        self.check_peer_info(pingtime=None, minping=None, pingwait=None)

        self.log.info('Reply without ping')
        with self.nodes[0].assert_debug_log([
                'pong peer=0: Unsolicited pong without ping, 0 expected, 0 received, 8 bytes',
        ]):
            no_pong_node.send_and_ping(msg_pong())
        self.check_peer_info(pingtime=None, minping=None, pingwait=None)

        self.log.info('Reply with wrong nonce does not cancel ping')
        assert 'ping' not in no_pong_node.last_message
        with self.nodes[0].assert_debug_log(['pong peer=0: Nonce mismatch']):
            # mock time PING_INTERVAL ahead to trigger node into sending a ping
            self.mock_forward(PING_INTERVAL + 1)
            wait_until(lambda: 'ping' in no_pong_node.last_message)
            self.mock_forward(9)
            # Send the wrong pong
            no_pong_node.send_and_ping(
                msg_pong(no_pong_node.last_message.pop('ping').nonce - 1))
        self.check_peer_info(pingtime=None, minping=None, pingwait=9)

        self.log.info('Reply with zero nonce does cancel ping')
        with self.nodes[0].assert_debug_log(['pong peer=0: Nonce zero']):
            no_pong_node.send_and_ping(msg_pong(0))
        self.check_peer_info(pingtime=None, minping=None, pingwait=None)

        self.log.info('Check that ping is properly reported on RPC')
        assert 'ping' not in no_pong_node.last_message
        # mock time PING_INTERVAL ahead to trigger node into sending a ping
        self.mock_forward(PING_INTERVAL + 1)
        wait_until(lambda: 'ping' in no_pong_node.last_message)
        ping_delay = 29
        self.mock_forward(ping_delay)
        wait_until(lambda: 'ping' in no_pong_node.last_message)
        no_pong_node.send_and_ping(
            msg_pong(no_pong_node.last_message.pop('ping').nonce))
        self.check_peer_info(pingtime=ping_delay,
                             minping=ping_delay,
                             pingwait=None)

        self.log.info('Check that minping is decreased after a fast roundtrip')
        # mock time PING_INTERVAL ahead to trigger node into sending a ping
        self.mock_forward(PING_INTERVAL + 1)
        wait_until(lambda: 'ping' in no_pong_node.last_message)
        ping_delay = 9
        self.mock_forward(ping_delay)
        wait_until(lambda: 'ping' in no_pong_node.last_message)
        no_pong_node.send_and_ping(
            msg_pong(no_pong_node.last_message.pop('ping').nonce))
        self.check_peer_info(pingtime=ping_delay,
                             minping=ping_delay,
                             pingwait=None)

        self.log.info('Check that peer is disconnected after ping timeout')
        assert 'ping' not in no_pong_node.last_message
        self.nodes[0].ping()
        wait_until(lambda: 'ping' in no_pong_node.last_message)
        with self.nodes[0].assert_debug_log(['ping timeout: 1201.000000s']):
            self.mock_forward(20 * 60 + 1)
            time.sleep(4)  # peertimeout + 1
コード例 #30
0
ファイル: p2p_fingerprint.py プロジェクト: STPDevteam/stp.1.0
    def run_test(self):
        node0 = NodeConnCB()

        connections = []
        connections.append(
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
        node0.add_connection(connections[0])

        NetworkThread().start()
        node0.wait_for_verack()

        # Set node time to 60 days ago
        self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 6)

        # Generating a chain of 10 blocks
        block_hashes = self.nodes[0].generate(nblocks=10)

        # Create longer chain starting 2 blocks before current tip
        height = len(block_hashes) - 2
        block_hash = block_hashes[height - 1]
        block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1
        new_blocks = self.build_chain(5, block_hash, height, block_time)

        # Force reorg to a longer chain
        node0.send_message(msg_headers(new_blocks))
        node0.wait_for_getdata()
        for block in new_blocks:
            node0.send_and_ping(msg_block(block))

        # Check that reorg succeeded
        assert_equal(self.nodes[0].getblockcount(), 13)

        stale_hash = int(block_hashes[-1], 16)

        # Check that getdata request for stale block succeeds
        self.send_block_request(stale_hash, node0)
        test_function = lambda: self.last_block_equals(stale_hash, node0)
        wait_until(test_function, timeout=3)

        # Check that getheader request for stale block header succeeds
        self.send_header_request(stale_hash, node0)
        test_function = lambda: self.last_header_equals(stale_hash, node0)
        wait_until(test_function, timeout=3)

        # Longest chain is extended so stale is much older than chain tip
        self.nodes[0].setmocktime(0)
        tip = self.nodes[0].generate(nblocks=1)[0]
        assert_equal(self.nodes[0].getblockcount(), 14)

        # Send getdata & getheaders to refresh last received getheader message
        block_hash = int(tip, 16)
        self.send_block_request(block_hash, node0)
        self.send_header_request(block_hash, node0)
        node0.sync_with_ping()

        # Request for very old stale block should now fail
        self.send_block_request(stale_hash, node0)
        time.sleep(3)
        assert not self.last_block_equals(stale_hash, node0)

        # Request for very old stale block header should now fail
        self.send_header_request(stale_hash, node0)
        time.sleep(3)
        assert not self.last_header_equals(stale_hash, node0)

        # Verify we can fetch very old blocks and headers on the active chain
        block_hash = int(block_hashes[2], 16)
        self.send_block_request(block_hash, node0)
        self.send_header_request(block_hash, node0)
        node0.sync_with_ping()

        self.send_block_request(block_hash, node0)
        test_function = lambda: self.last_block_equals(block_hash, node0)
        wait_until(test_function, timeout=3)

        self.send_header_request(block_hash, node0)
        test_function = lambda: self.last_header_equals(block_hash, node0)
        wait_until(test_function, timeout=3)
コード例 #31
0
    def run_test(self):

        #        node = self.nodes[0]
        #        mining_info = node.getmininginfo()
        #        self.log.info('getmininginfo')

        #为什么这个区块数是200
        #        assert_equal(mining_info['blocks'], 200)
        #        assert_equal(mining_info['chain'], 'regtest')
        #        assert_equal(mining_info['currentblocktx'], 0)
        #        assert_equal(mining_info['currentblockweight'], 0)
        #        assert_equal(mining_info['difficulty'], Decimal('4.656542373906925E-10'))
        #        assert_equal(mining_info['networkhashps'], Decimal('0.003333333333333334'))
        #        assert_equal(mining_info['pooledtx'], 0)

        #logfilePath = self.options.tmpdir + '/test_framework.log'

        #self.log.info(logfilePath)

        #subprocess.call(['open', '-W', '-a', 'Terminal.app', 'tail', '-f', logfilePath])
        #subprocess.call(['tail', '-f', logfilePath])

        #nodetest = P2PInterface();
        #node0表示测试节点  self.nodes[0]表示bitcoin实际节点
        node0 = self.nodes[0].add_p2p_connection(P2PInterface())

        #节点信息这里是指连上bitcoin实际节点的节点信息
        #networkinfo = self.nodes[0].getnetworkinfo()
        #self.log.info(networkinfo)

        #url = urllib.parse.urlparse(self.nodes[0].url)
        #self.log.info(url)

        network_thread_start()
        node0.wait_for_verack()

        # Set node time to 60 days ago
        # 将时间调整到2个月之前
        mocktime = int(time.time()) - 60 * 24 * 60 * 60
        self.nodes[0].setmocktime(mocktime)

        nblocks = 10
        #nblocks = 5

        # Generating a chain of 10 blocks
        #生成10个区块链
        block_hashes = self.nodes[0].generate(nblocks)

        #for hash in block_hashes:
        #    self.log.info(' Node: [%d]:%s' % (i, hash))

        #for i in range(block_hashes):
        #    self.log.info(' Node: [%d]:%s' % (i, block_hashes[i]))

        for i, hash in enumerate(block_hashes):
            self.log.info('[notice] [%d]:%s' % (i, hash))
            #self.log.info('%d:%s'% (i,int(hash, 16)))

        self.log.info('[notice] generate node %d' % len(block_hashes))

        # 在regnet情况下创世块的hash是0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206
        #getblockhash0 = self.nodes[0].getblockhash(0)

        # Create longer chain starting 2 blocks before current tip
        height = len(block_hashes) - 2
        block_hash = block_hashes[height - 1]

        self.log.info('[notice] starting %d:%s' % (height, block_hash))

        # median time 中位时间
        block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1

        new_blocks = self.build_chain(5, block_hash, height, block_time)

        for i, hash in enumerate(new_blocks):
            self.log.info('[notice] n [%d]:%s' % (i, hash.hash))
            #self.log.info('%d'% (int(hash.hash, 16)))

        # Force reorg to a longer chain
        # 向self.nodes[0]实际节点发送headers消息告诉它最新的节点数据
        node0.send_message(msg_headers(new_blocks))
        node0.wait_for_getdata()
        for block in new_blocks:
            node0.send_and_ping(msg_block(block))

        #blockcount = self.nodes[0].getblockcount()

        # Check that reorg succeeded
        # 检测self.nodes[0]该节点上区块数量
        assert_equal(self.nodes[0].getblockcount(), 13)

        #取出block_hashes里面最后一条hash数据并且将它转化成16进制
        stale_hash = int(block_hashes[-1], 16)

        self.log.info('[notice] stale_hash:%s' % stale_hash)

        # Check that getdata request for stale block succeeds
        # 检测getdata请求发送陈旧的块的hash给self.nodes[0]
        self.send_block_request(stale_hash, node0)
        test_function = lambda: self.last_block_equals(stale_hash, node0)
        wait_until(test_function, timeout=3)

        # Check that getheader request for stale block header succeeds
        self.send_header_request(stale_hash, node0)
        test_function = lambda: self.last_header_equals(stale_hash, node0)
        wait_until(test_function, timeout=3)

        # Longest chain is extended so stale is much older than chain tip
        self.nodes[0].setmocktime(0)
        tip = self.nodes[0].generate(nblocks=1)[0]
        assert_equal(self.nodes[0].getblockcount(), 14)

        # Send getdata & getheaders to refresh last received getheader message
        block_hash = int(tip, 16)
        self.send_block_request(block_hash, node0)
        self.send_header_request(block_hash, node0)
        node0.sync_with_ping()

        # Request for very old stale block should now fail
        self.send_block_request(stale_hash, node0)
        time.sleep(3)
        assert not self.last_block_equals(stale_hash, node0)

        # Request for very old stale block header should now fail
        self.send_header_request(stale_hash, node0)
        time.sleep(3)
        assert not self.last_header_equals(stale_hash, node0)

        # Verify we can fetch very old blocks and headers on the active chain
        block_hash = int(block_hashes[2], 16)
        self.send_block_request(block_hash, node0)
        self.send_header_request(block_hash, node0)
        node0.sync_with_ping()

        self.send_block_request(block_hash, node0)
        test_function = lambda: self.last_block_equals(block_hash, node0)
        wait_until(test_function, timeout=3)

        self.send_header_request(block_hash, node0)
        test_function = lambda: self.last_header_equals(block_hash, node0)
        wait_until(test_function, timeout=3)