Ejemplo n.º 1
0
 def send_data_message(self, testnode, tx):
     # Send p2p message "getdata" to verify tx gets sent in "tx" message
     getdatamsg = msg_getdata()
     getdatamsg.inv = [CInv(1, tx.sha256)]
     with mininode_lock:
         testnode.last_notfound = None
         testnode.last_tx = None
         testnode.send_message(getdatamsg)
Ejemplo n.º 2
0
 def send_get_data(self, block_hashes):
     """Request data for a list of block hashes."""
     msg = msg_getdata()
     for x in block_hashes:
         msg.inv.append(CInv(2, x))
     self.send_message(msg)
Ejemplo n.º 3
0
 def send_get_data(self, block_hashes):
     """Request data for a list of block hashes."""
     msg = msg_getdata()
     for x in block_hashes:
         msg.inv.append(CInv(2, x))
     self.send_message(msg)
Ejemplo n.º 4
0
    def run_test(self):
        """Main test logic"""

        # Create a P2P connection to one of the nodes
        node0 = BaseNode()
        connections = []
        connections.append(
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
        node0.add_connection(connections[0])

        # Start up network handling in another thread. This needs to be called
        # after the P2P connections have been created.
        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        # Generating a block on one of the nodes will get us out of IBD
        blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
        self.sync_all([self.nodes[0:1]])

        # Notice above how we called an RPC by calling a method with the same
        # name on the node object. Notice also how we used a keyword argument
        # to specify a named RPC argument. Neither of those are defined on the
        # node object. Instead there's some __getattr__() magic going on under
        # the covers to dispatch unrecognised attribute calls to the RPC
        # interface.

        # Logs are nice. Do plenty of them. They can be used in place of comments for
        # breaking the test into sub-sections.
        self.log.info("Starting test!")

        self.log.info("Calling a custom function")
        custom_function()

        self.log.info("Calling a custom method")
        self.custom_method()

        self.log.info("Create some blocks")
        self.tip = int(self.nodes[0].getbestblockhash(), 16)
        self.block_time = self.nodes[0].getblock(
            self.nodes[0].getbestblockhash())['time'] + 1

        height = 1

        for i in range(10):
            # Use the mininode and blocktools functionality to manually build a block
            # Calling the generate() rpc is easier, but this allows us to exactly
            # control the blocks and transactions.
            block = create_block(self.tip, create_coinbase(height),
                                 self.block_time)
            block.solve()
            block_message = msg_block(block)
            # Send message is used to send a P2P message to the node over our NodeConn connection
            node0.send_message(block_message)
            self.tip = block.sha256
            blocks.append(self.tip)
            self.block_time += 1
            height += 1

        self.log.info(
            "Wait for node1 to reach current tip (height 11) using RPC")
        self.nodes[1].waitforblockheight(11)

        self.log.info("Connect node2 and node1")
        connect_nodes(self.nodes[1], 2)

        self.log.info("Add P2P connection to node2")
        node2 = BaseNode()
        connections.append(
            NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))
        node2.add_connection(connections[1])
        node2.wait_for_verack()

        self.log.info(
            "Wait for node2 reach current tip. Test that it has propagated all the blocks to us"
        )

        getdata_request = msg_getdata()
        for block in blocks:
            getdata_request.inv.append(CInv(2, block))
        node2.send_message(getdata_request)

        # wait_until() will loop until a predicate condition is met. Use it to test properties of the
        # NodeConnCB objects.
        wait_until(lambda: sorted(blocks) == sorted(
            list(node2.block_receive_map.keys())),
                   timeout=5,
                   lock=mininode_lock)

        self.log.info("Check that each block was received only once")
        # The network thread uses a global lock on data access to the NodeConn objects when sending and receiving
        # messages. The test thread should acquire the global lock before accessing any NodeConn data to avoid locking
        # and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
        with mininode_lock:
            for block in node2.block_receive_map.values():
                assert_equal(block, 1)
Ejemplo n.º 5
0
 def send_block_request(self, block_hash, node):
     msg = msg_getdata()
     msg.inv.append(CInv(2, block_hash))  # 2 == "Block"
     node.send_message(msg)
    def run_test(self):
        testnode0 = TestNode()
        connections = []
        connections.append(
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], testnode0,
                     "regtest", OVERWINTER_PROTO_VERSION))
        testnode0.add_connection(connections[0])

        # Start up network handling in another thread
        NetworkThread().start()
        testnode0.wait_for_verack()

        # Verify mininodes are connected to litecoinzd nodes
        peerinfo = self.nodes[0].getpeerinfo()
        versions = [x["version"] for x in peerinfo]
        assert_equal(1, versions.count(OVERWINTER_PROTO_VERSION))
        assert_equal(0, peerinfo[0]["banscore"])

        # Mine some blocks so we can spend
        self.coinbase_blocks = self.nodes[0].generate(200)
        self.nodeaddress = self.nodes[0].getnewaddress()

        # Sync nodes 0 and 1
        sync_blocks(self.nodes[:2])
        sync_mempools(self.nodes[:2])

        # Verify block count
        assert_equal(self.nodes[0].getblockcount(), 200)
        assert_equal(self.nodes[1].getblockcount(), 200)
        assert_equal(self.nodes[2].getblockcount(), 0)

        # Mininodes send expiring soon transaction in "tx" message to litecoinzd node
        tx1 = self.create_transaction(self.nodes[0], self.coinbase_blocks[0],
                                      self.nodeaddress, 10.0, 203)
        testnode0.send_message(msg_tx(tx1))

        # Mininodes send transaction in "tx" message to litecoinzd node
        tx2 = self.create_transaction(self.nodes[0], self.coinbase_blocks[1],
                                      self.nodeaddress, 10.0, 204)
        testnode0.send_message(msg_tx(tx2))

        # Sync up with node after p2p messages delivered
        testnode0.sync_with_ping()

        # Sync nodes 0 and 1
        sync_blocks(self.nodes[:2])
        sync_mempools(self.nodes[:2])

        # Verify contents of mempool
        assert (tx1.hash not in self.nodes[0].getrawmempool()
                )  # tx1 rejected as expiring soon
        assert (tx1.hash not in self.nodes[1].getrawmempool())
        assert (tx2.hash in self.nodes[0].getrawmempool())  # tx2 accepted
        assert (tx2.hash in self.nodes[1].getrawmempool())
        assert_equal(len(self.nodes[2].getrawmempool()),
                     0)  # node 2 is isolated and empty

        # Send p2p message "mempool" to receive contents from litecoinzd node in "inv" message
        with mininode_lock:
            testnode0.last_inv = None
            testnode0.send_message(msg_mempool())

        # Sync up with node after p2p messages delivered
        testnode0.sync_with_ping()

        # Verify txid for tx2
        with mininode_lock:
            msg = testnode0.last_inv
            assert_equal(len(msg.inv), 1)
            assert_equal(tx2.sha256, msg.inv[0].hash)

        # Send p2p message "getdata" to verify tx2 gets sent in "tx" message
        getdatamsg = msg_getdata()
        getdatamsg.inv = [CInv(1, tx2.sha256)]
        with mininode_lock:
            testnode0.last_tx = None
            testnode0.send_message(getdatamsg)

        # Sync up with node after p2p messages delivered
        testnode0.sync_with_ping()

        # Verify data received in "tx" message is for tx2
        with mininode_lock:
            incoming_tx = testnode0.last_tx.tx
            incoming_tx.rehash()
            assert_equal(tx2.sha256, incoming_tx.sha256)

        # Sync and mine an empty block with node 2, leaving tx in the mempool of node0 and node1
        for blkhash in self.coinbase_blocks:
            blk = self.nodes[0].getblock(blkhash, 0)
            self.nodes[2].submitblock(blk)
        self.nodes[2].generate(1)

        # Verify block count
        assert_equal(self.nodes[0].getblockcount(), 200)
        assert_equal(self.nodes[1].getblockcount(), 200)
        assert_equal(self.nodes[2].getblockcount(), 201)

        # Reconnect node 2 to the network
        connect_nodes_bi(self.nodes, 1, 2)

        # Set up test node for node 2
        testnode2 = TestNode()
        connections.append(
            NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], testnode2,
                     "regtest", OVERWINTER_PROTO_VERSION))
        testnode2.add_connection(connections[-1])

        # Verify block count
        sync_blocks(self.nodes[:3])
        assert_equal(self.nodes[0].getblockcount(), 201)
        assert_equal(self.nodes[1].getblockcount(), 201)
        assert_equal(self.nodes[2].getblockcount(), 201)

        # Verify contents of mempool
        assert (tx2.hash in self.nodes[0].getrawmempool())
        assert (tx2.hash in self.nodes[1].getrawmempool())
        assert (tx2.hash not in self.nodes[2].getrawmempool())

        # Confirm tx2 cannot be submitted to a mempool because it is expiring soon.
        try:
            rawtx2 = hexlify(tx2.serialize())
            self.nodes[2].sendrawtransaction(rawtx2)
            assert (False)
        except JSONRPCException as e:
            errorString = e.error['message']
            assert ("tx-expiring-soon" in errorString)

        # Ask node 0 for tx2...
        with mininode_lock:
            testnode0.last_notfound = None
            testnode0.last_tx = None
            testnode0.send_message(getdatamsg)

        # Sync up with node after p2p messages delivered
        [x.sync_with_ping() for x in [testnode0, testnode2]]

        # Verify node 0 does not reply to "getdata" by sending "tx" message, as tx2 is expiring soon
        with mininode_lock:
            assert_equal(testnode0.last_tx, None)

        # Verify mininode received a "notfound" message containing the txid of tx2
        with mininode_lock:
            msg = testnode0.last_notfound
            assert_equal(len(msg.inv), 1)
            assert_equal(tx2.sha256, msg.inv[0].hash)

        # Create a transaction to verify that processing of "getdata" messages is functioning
        tx3 = self.create_transaction(self.nodes[0], self.coinbase_blocks[2],
                                      self.nodeaddress, 10.0, 999)

        # Mininodes send tx3 to litecoinzd node
        testnode0.send_message(msg_tx(tx3))
        getdatamsg = msg_getdata()
        getdatamsg.inv = [CInv(1, tx3.sha256)]
        with mininode_lock:
            testnode0.last_tx = None
            testnode0.send_message(getdatamsg)

        # Sync up with node after p2p messages delivered
        [x.sync_with_ping() for x in [testnode0, testnode2]]

        # Verify we received a "tx" message for tx3
        with mininode_lock:
            incoming_tx = testnode0.last_tx.tx
            incoming_tx.rehash()
            assert_equal(tx3.sha256, incoming_tx.sha256)

        # Send p2p message "mempool" to receive contents from litecoinzd node in "inv" message
        with mininode_lock:
            testnode0.last_inv = None
            testnode0.send_message(msg_mempool())

        # Sync up with node after p2p messages delivered
        [x.sync_with_ping() for x in [testnode0, testnode2]]

        # Verify txid for tx3 is returned in "inv", but tx2 which is expiring soon is not returned
        with mininode_lock:
            msg = testnode0.last_inv
            assert_equal(len(msg.inv), 1)
            assert_equal(tx3.sha256, msg.inv[0].hash)

        # Verify contents of mempool
        assert_equal({tx2.hash, tx3.hash}, set(self.nodes[0].getrawmempool()))
        assert_equal({tx2.hash, tx3.hash}, set(self.nodes[1].getrawmempool()))
        assert_equal({tx3.hash}, set(self.nodes[2].getrawmempool()))

        # Verify banscore for nodes are still zero
        assert_equal(
            0, sum(peer["banscore"] for peer in self.nodes[0].getpeerinfo()))
        assert_equal(
            0, sum(peer["banscore"] for peer in self.nodes[2].getpeerinfo()))

        [c.disconnect_node() for c in connections]
Ejemplo n.º 7
0
    def run_test(self):
        """Main test logic"""

        # Create P2P connections will wait for a verack to make sure the connection is fully up
        # This does the version handshake nodes send version messages to negotiate whether or not they
        # want to talk to each other. If yes, they send verack.
        # Before the verack, they won't send anything else to each other.
        # add_p2p_connection has a wait_for_verack option - you can also toggle to false
        self.nodes[0].add_p2p_connection(BaseNode())

        # Generating a block on one of the nodes will get us out of IBD
        # IBD is initial block download. While nodes are in IBD, they don't
        # think they have the most up-to-date blockchain and are only
        # interested in downloading blocks from their peers.
        # generate() is an easy way to make blocks (can be found in blocktools)
        # there are other ways too!
        blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
        # Sync all means we sync blocks (everyone has same chain tip) and mempools.
        # If you try to sync while nodes aren't connected, it will time out.
        self.sync_all(self.nodes[0:2])

        # Notice above how we called an RPC by calling a method with the same
        # name on the node object. Notice also how we used a keyword argument
        # to specify a named RPC argument. Neither of those are defined on the
        # node object. Instead there's some __getattr__() magic going on under
        # the covers to dispatch unrecognised attribute calls to the RPC
        # interface.

        # Logs are nice. Do plenty of them. They can be used in place of comments for
        # breaking the test into sub-sections.
        self.log.info("Starting test!")

        self.log.info("Calling a custom function")
        custom_function()

        self.log.info("Calling a custom method")
        self.custom_method()

        self.log.info("Create some blocks")
        # getblockcount, getbestblockhash, and getblock are RPCs in src/rpc/blockchain.cpp
        # we need to convert to an int with base 16 (hex) to use later
        self.tip = int(self.nodes[0].getbestblockhash(), 16)
        self.block_time = self.nodes[0].getblock(
            self.nodes[0].getbestblockhash())['time'] + 1

        height = self.nodes[0].getblockcount()

        for _ in range(10):
            # Use the mininode and blocktools functionality to manually build a block
            # Calling the generate() rpc is easier, but this allows us to exactly
            # control the blocks and transactions.
            # you can create_coinbase() without a funded wallet because they don't require any inputs
            block = create_block(self.tip, create_coinbase(height + 1),
                                 self.block_time)
            # we can do this instantaneously (no actual PoW) because we are in regtest mode
            block.solve()
            # a msg_block is a p2p message that contains an entire block, serialized
            block_message = msg_block(block)
            # Send message is used to send a P2P message to the node over our P2PInterface
            # p2p in this case is the BaseNode, not the TestNode peers that node0 is connected to
            self.nodes[0].p2p.send_message(block_message)
            self.tip = block.sha256
            blocks.append(self.tip)
            self.block_time += 1
            height += 1

        self.log.info(
            "Wait for node1 to reach current tip (height 11) using RPC")
        self.nodes[1].waitforblockheight(11)

        self.log.info("Connect node2 and node1")
        connect_nodes(self.nodes[1], 2)

        self.log.info("Wait for node2 to receive all the blocks from node1")
        self.sync_all()

        self.log.info("Add P2P connection to node2")
        self.nodes[0].disconnect_p2ps()

        self.nodes[2].add_p2p_connection(BaseNode())
        # After node2 connects to BaseNode, it will send invs for the blocks it knows about.
        # They don't just send everything directly because it would congest the network,
        # likely with a lot of redundant information.
        # inv message has a type (transaction, block, blockheader, etc.) and the hash
        # a peer would use the inv to see if it already has the data or needs to request
        # it from its peers.

        self.log.info("Test that node2 propagates all the blocks to us")

        # peers must send a getdata request for each of the invs it receives
        getdata_request = msg_getdata()
        for block in blocks:
            getdata_request.inv.append(CInv(MSG_BLOCK, block))
        self.nodes[2].p2p.send_message(getdata_request)
        # node2 will respond by sending all of these blocks

        # wait_until() will loop until a predicate condition is met. Use it to test properties of the
        # P2PInterface objects.
        # mininode_lock is needed because we're accessing the BaseNode's map from the test logic thread
        # A race condition occurs if we try to read it while BaseNode is writing to it without a lock
        wait_until(lambda: sorted(blocks) == sorted(
            list(self.nodes[2].p2p.block_receive_map.keys())),
                   timeout=5,
                   lock=mininode_lock)

        self.log.info("Check that each block was received only once")
        # The network thread uses a global lock on data access to the P2PConnection objects when sending and receiving
        # messages. The test thread should acquire the global lock before accessing any P2PConnection data to avoid locking
        # and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
        with mininode_lock:
            for block in self.nodes[2].p2p.block_receive_map.values():
                # BaseNode should have received exactly 1 of each block
                assert_equal(block, 1)
Ejemplo n.º 8
0
    def test_compactblocks_not_at_tip(self, node, test_node):
        # Test that requesting old compactblocks doesn't work.
        MAX_CMPCTBLOCK_DEPTH = 5
        new_blocks = []
        for _ in range(MAX_CMPCTBLOCK_DEPTH + 1):
            test_node.clear_block_announcement()
            new_blocks.append(node.generate(1)[0])
            wait_until(test_node.received_block_announcement,
                       timeout=30,
                       lock=mininode_lock)

        test_node.clear_block_announcement()
        test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
        wait_until(lambda: "cmpctblock" in test_node.last_message,
                   timeout=30,
                   lock=mininode_lock)

        test_node.clear_block_announcement()
        node.generate(1)
        wait_until(test_node.received_block_announcement,
                   timeout=30,
                   lock=mininode_lock)
        test_node.clear_block_announcement()
        with mininode_lock:
            test_node.last_message.pop("block", None)
        test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
        wait_until(lambda: "block" in test_node.last_message,
                   timeout=30,
                   lock=mininode_lock)
        with mininode_lock:
            test_node.last_message["block"].block.calc_sha256()
            assert_equal(test_node.last_message["block"].block.sha256,
                         int(new_blocks[0], 16))

        # Generate an old compactblock, and verify that it's not accepted.
        cur_height = node.getblockcount()
        hashPrevBlock = int(node.getblockhash(cur_height - 5), 16)
        block = self.build_block_on_tip(node)
        block.hashPrevBlock = hashPrevBlock
        block.solve()

        comp_block = HeaderAndShortIDs()
        comp_block.initialize_from_block(block)
        test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))

        tips = node.getchaintips()
        found = False
        for x in tips:
            if x["hash"] == block.hash:
                assert_equal(x["status"], "headers-only")
                found = True
                break
        assert (found)

        # Requesting this block via getblocktxn should silently fail
        # (to avoid fingerprinting attacks).
        msg = msg_getblocktxn()
        msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
        with mininode_lock:
            test_node.last_message.pop("blocktxn", None)
        test_node.send_and_ping(msg)
        with mininode_lock:
            assert "blocktxn" not in test_node.last_message
Ejemplo n.º 9
0
    def run_test(self):
        # Before we connect anything, we first set the time on the node
        # to be in the past, otherwise things break because the CNode
        # time counters can't be reset backward after initialization
        old_time = int(time.time() - 60 * 60 * 24 * 9)
        self.nodes[0].setmocktime(old_time)

        # Generate some old blocks
        self.nodes[0].generate(260)

        # test_nodes[0] will only request old blocks
        # test_nodes[1] will only request new blocks
        # test_nodes[2] will test resetting the counters
        test_nodes = []
        connections = []

        for i in range(3):
            test_nodes.append(TestNode())
            connections.append(
                NodeConn('127.0.0.1',
                         p2p_port(0),
                         self.nodes[0],
                         test_nodes[i],
                         protocol_version=BLOSSOM_PROTO_VERSION))
            test_nodes[i].add_connection(connections[i])

        NetworkThread().start()  # Start up network handling in another thread
        [x.wait_for_verack() for x in test_nodes]

        # Test logic begins here

        # Now mine a big block
        self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())

        # Store the hash; we'll request this later
        big_old_block = self.nodes[0].getbestblockhash()
        old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
        big_old_block = int(big_old_block, 16)

        # Advance to two days ago
        self.nodes[0].setmocktime(int(time.time()) - 2 * 60 * 60 * 24)

        # Generate interim blocks. Due to the "max MTP" soft-forked rule, block timestamps
        # can be no more than 1.5 hours ahead of the chain tip's MTP. Thus we need to mine
        # enough blocks to advance the MTP forward to the desired mocked time.
        self.nodes[0].generate(1000)

        # Mine one more block, so that the prior block looks old
        self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())

        # We'll be requesting this new block too
        big_new_block = self.nodes[0].getbestblockhash()
        big_new_block = int(big_new_block, 16)

        # test_nodes[0] will test what happens if we just keep requesting the
        # the same big old block too many times (expect: disconnect)

        getdata_request = msg_getdata()
        getdata_request.inv.append(CInv(2, big_old_block))

        max_bytes_per_day = 2200 * 1024 * 1024
        daily_buffer = 1152 * 2000000
        max_bytes_available = max_bytes_per_day - daily_buffer
        success_count = max_bytes_available / old_block_size

        # 2304GB will be reserved for relaying new blocks, so expect this to
        # succeed for ~14 tries.
        for i in range(int(success_count)):
            test_nodes[0].send_message(getdata_request)
            test_nodes[0].sync_with_ping()
            assert_equal(test_nodes[0].block_receive_map[big_old_block], i + 1)

        assert_equal(len(self.nodes[0].getpeerinfo()), 3)
        # At most a couple more tries should succeed (depending on how long
        # the test has been running so far).
        for i in range(3):
            test_nodes[0].send_message(getdata_request)
        test_nodes[0].wait_for_disconnect()
        assert_equal(len(self.nodes[0].getpeerinfo()), 2)
        print("Peer 0 disconnected after downloading old block too many times")

        # Requesting the current block on test_nodes[1] should succeed indefinitely,
        # even when over the max upload target.
        # We'll try 200 times
        getdata_request.inv = [CInv(2, big_new_block)]
        for i in range(200):
            test_nodes[1].send_message(getdata_request)
            test_nodes[1].sync_with_ping()
            assert_equal(test_nodes[1].block_receive_map[big_new_block], i + 1)

        print("Peer 1 able to repeatedly download new block")

        # But if test_nodes[1] tries for an old block, it gets disconnected too.
        getdata_request.inv = [CInv(2, big_old_block)]
        test_nodes[1].send_message(getdata_request)
        test_nodes[1].wait_for_disconnect()
        assert_equal(len(self.nodes[0].getpeerinfo()), 1)

        print("Peer 1 disconnected after trying to download old block")

        print("Advancing system time on node to clear counters...")

        # If we advance the time by 24 hours, then the counters should reset,
        # and test_nodes[2] should be able to retrieve the old block.
        self.nodes[0].setmocktime(int(time.time()))
        test_nodes[2].sync_with_ping()
        test_nodes[2].send_message(getdata_request)
        test_nodes[2].sync_with_ping()
        assert_equal(test_nodes[2].block_receive_map[big_old_block], 1)

        print("Peer 2 able to download old block")

        [c.disconnect_node() for c in connections]

        #stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
        print("Restarting nodes with -whitelist=127.0.0.1")
        stop_node(self.nodes[0], 0)
        self.nodes[0] = start_node(
            0,
            self.options.tmpdir,
            [
                "-debug",
                '-nuparams=2bb40e60:1',  # Blossom
                "-whitelist=127.0.0.1",
                "-maxuploadtarget=1",
            ])

        #recreate/reconnect 3 test nodes
        test_nodes = []
        connections = []

        for i in range(3):
            test_nodes.append(TestNode())
            connections.append(
                NodeConn('127.0.0.1',
                         p2p_port(0),
                         self.nodes[0],
                         test_nodes[i],
                         protocol_version=BLOSSOM_PROTO_VERSION))
            test_nodes[i].add_connection(connections[i])

        NetworkThread().start()  # Start up network handling in another thread
        [x.wait_for_verack() for x in test_nodes]

        #retrieve 20 blocks which should be enough to break the 1MB limit
        getdata_request.inv = [CInv(2, big_new_block)]
        for i in range(20):
            test_nodes[1].send_message(getdata_request)
            test_nodes[1].sync_with_ping()
            assert_equal(test_nodes[1].block_receive_map[big_new_block], i + 1)

        getdata_request.inv = [CInv(2, big_old_block)]
        test_nodes[1].send_message(getdata_request)
        test_nodes[1].wait_for_disconnect()
        assert_equal(len(self.nodes[0].getpeerinfo()),
                     3)  #node is still connected because of the whitelist

        print(
            "Peer 1 still connected after trying to download old block (whitelisted)"
        )

        [c.disconnect_node() for c in connections]
Ejemplo n.º 10
0
    def run_test(self):
        # Before we connect anything, we first set the time on the node
        # to be in the past, otherwise things break because the CNode
        # time counters can't be reset backward after initialization
        old_time = int(time.time() - 2 * 60 * 60 * 24 * 7)
        self.nodes[0].setmocktime(old_time)

        # Generate some old blocks
        self.nodes[0].generate(130)

        # test_nodes[0] will only request old blocks
        # test_nodes[1] will only request new blocks
        # test_nodes[2] will test resetting the counters
        test_nodes = []
        connections = []

        for i in range(3):
            test_nodes.append(TestNode())
            connections.append(
                NodeConn('127.0.0.1', p2p_port(0), self.nodes[0],
                         test_nodes[i]))
            test_nodes[i].add_connection(connections[i])

        NetworkThread().start()  # Start up network handling in another thread
        [x.wait_for_verack() for x in test_nodes]

        # Test logic begins here

        # Now mine a big block
        mine_large_block(self.nodes[0], self.utxo_cache)

        # Store the hash; we'll request this later
        big_old_block = self.nodes[0].getbestblockhash()
        old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
        big_old_block = int(big_old_block, 16)

        # Advance to two days ago
        self.nodes[0].setmocktime(int(time.time()) - 2 * 60 * 60 * 24)

        # Mine one more block, so that the prior block looks old
        mine_large_block(self.nodes[0], self.utxo_cache)

        # We'll be requesting this new block too
        big_new_block = self.nodes[0].getbestblockhash()
        big_new_block = int(big_new_block, 16)

        # test_nodes[0] will test what happens if we just keep requesting the
        # the same big old block too many times (expect: disconnect)

        getdata_request = msg_getdata()
        getdata_request.inv.append(CInv(2, big_old_block))

        block_rate_minutes = 1
        blocks_per_day = 24 * 60 / block_rate_minutes
        max_block_serialized_size = 8000000  # This is MAX_BLOCK_SERIALIZED_SIZE_RIP2
        max_bytes_per_day = self.maxuploadtarget * 1024 * 1024
        daily_buffer = blocks_per_day * max_block_serialized_size
        max_bytes_available = max_bytes_per_day - daily_buffer
        success_count = max_bytes_available // old_block_size

        # 224051200B will be reserved for relaying new blocks, so expect this to
        # succeed for ~236 tries.
        for i in range(int(success_count)):
            test_nodes[0].send_message(getdata_request)
            test_nodes[0].sync_with_ping()
            assert_equal(test_nodes[0].block_receive_map[big_old_block], i + 1)

        assert_equal(len(self.nodes[0].getpeerinfo()), 3)
        # At most a couple more tries should succeed (depending on how long
        # the test has been running so far).
        for i in range(3):
            test_nodes[0].send_message(getdata_request)
        test_nodes[0].wait_for_disconnect()
        assert_equal(len(self.nodes[0].getpeerinfo()), 2)
        self.log.info(
            "Peer 0 disconnected after downloading old block too many times")

        # Requesting the current block on test_nodes[1] should succeed indefinitely,
        # even when over the max upload target.
        # We'll try lots of times
        getdata_request.inv = [CInv(2, big_new_block)]
        for i in range(500):
            test_nodes[1].send_message(getdata_request)
            test_nodes[1].sync_with_ping()
            assert_equal(test_nodes[1].block_receive_map[big_new_block], i + 1)

        self.log.info("Peer 1 able to repeatedly download new block")

        # But if test_nodes[1] tries for an old block, it gets disconnected too.
        getdata_request.inv = [CInv(2, big_old_block)]
        test_nodes[1].send_message(getdata_request)
        test_nodes[1].wait_for_disconnect()
        assert_equal(len(self.nodes[0].getpeerinfo()), 1)

        self.log.info("Peer 1 disconnected after trying to download old block")

        self.log.info("Advancing system time on node to clear counters...")

        # If we advance the time by 24 hours, then the counters should reset,
        # and test_nodes[2] should be able to retrieve the old block.
        self.nodes[0].setmocktime(int(time.time()))
        test_nodes[2].sync_with_ping()
        test_nodes[2].send_message(getdata_request)
        test_nodes[2].sync_with_ping()
        assert_equal(test_nodes[2].block_receive_map[big_old_block], 1)

        self.log.info("Peer 2 able to download old block")

        [c.disconnect_node() for c in connections]

        # stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
        self.log.info("Restarting nodes with -whitelist=127.0.0.1")
        self.stop_node(0)
        self.start_node(0, [
            "-whitelist=127.0.0.1", "-maxuploadtarget=1",
            "-blockmaxsize=999000"
        ])

        # recreate/reconnect a test node
        test_nodes = [TestNode()]
        connections = [
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[0])
        ]
        test_nodes[0].add_connection(connections[0])

        NetworkThread().start()  # Start up network handling in another thread
        test_nodes[0].wait_for_verack()

        # retrieve 20 blocks which should be enough to break the 1MB limit
        getdata_request.inv = [CInv(2, big_new_block)]
        for i in range(20):
            test_nodes[0].send_message(getdata_request)
            test_nodes[0].sync_with_ping()
            assert_equal(test_nodes[0].block_receive_map[big_new_block], i + 1)

        getdata_request.inv = [CInv(2, big_old_block)]
        test_nodes[0].send_and_ping(getdata_request)
        assert_equal(len(self.nodes[0].getpeerinfo()),
                     1)  # node is still connected because of the whitelist

        self.log.info(
            "Peer still connected after trying to download old block (whitelisted)"
        )
Ejemplo n.º 11
0
    def run_test(self):
        """Main test logic"""

        # Create P2P connections to two of the nodes
        self.nodes[0].add_p2p_connection(BaseNode())

        # Start up network handling in another thread. This needs to be called
        # after the P2P connections have been created.
        network_thread_start()
        # wait_for_verack ensures that the P2P connection is fully up.
        self.nodes[0].p2p.wait_for_verack()

        self.setup_stake_coins(self.nodes[0])

        # Generating a block on one of the nodes will get us out of IBD
        blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
        self.sync_all([self.nodes[0:1]])

        # Notice above how we called an RPC by calling a method with the same
        # name on the node object. Notice also how we used a keyword argument
        # to specify a named RPC argument. Neither of those are defined on the
        # node object. Instead there's some __getattr__() magic going on under
        # the covers to dispatch unrecognised attribute calls to the RPC
        # interface.

        # Logs are nice. Do plenty of them. They can be used in place of comments for
        # breaking the test into sub-sections.
        self.log.info("Starting test!")

        self.log.info("Calling a custom function")
        custom_function()

        self.log.info("Calling a custom method")
        self.custom_method()

        self.log.info("Create some blocks")
        self.tip = int(self.nodes[0].getbestblockhash(), 16)
        self.block_time = self.nodes[0].getblock(
            self.nodes[0].getbestblockhash())['time'] + 1

        height = self.nodes[0].getblockcount()

        snapshot_meta = get_tip_snapshot_meta(self.nodes[0])
        stakes = self.nodes[0].listunspent()
        for stake in stakes:
            # Use the mininode and blocktools functionality to manually build a block
            # Calling the generate() rpc is easier, but this allows us to exactly
            # control the blocks and transactions.
            coinbase = sign_coinbase(
                self.nodes[0],
                create_coinbase(height, stake, snapshot_meta.hash))
            block = create_block(self.tip, coinbase, self.block_time)
            # Wait until the active chain picks up the previous block
            wait_until(lambda: self.nodes[0].getblockcount() == height,
                       timeout=5)
            snapshot_meta = update_snapshot_with_tx(self.nodes[0],
                                                    snapshot_meta, height + 1,
                                                    coinbase)
            block.solve()
            block_message = msg_block(block)
            # Send message is used to send a P2P message to the node over our P2PInterface
            self.nodes[0].p2p.send_message(block_message)
            self.tip = block.sha256
            blocks.append(self.tip)
            self.block_time += 1
            height += 1

        self.log.info(
            "Wait for node1 to reach current tip (height %d) using RPC" %
            height)
        self.nodes[1].waitforblockheight(height)

        self.log.info("Connect node2 and node1")
        connect_nodes(self.nodes[1], 2)

        self.log.info("Add P2P connection to node2")
        # We can't add additional P2P connections once the network thread has started. Disconnect the connection
        # to node0, wait for the network thread to terminate, then connect to node2. This is specific to
        # the current implementation of the network thread and may be improved in future.
        self.nodes[0].disconnect_p2ps()
        network_thread_join()

        self.nodes[2].add_p2p_connection(BaseNode())
        network_thread_start()
        self.nodes[2].p2p.wait_for_verack()

        self.log.info(
            "Wait for node2 reach current tip. Test that it has propagated all the blocks to us"
        )

        getdata_request = msg_getdata()
        for block in blocks:
            getdata_request.inv.append(CInv(2, block))
        self.nodes[2].p2p.send_message(getdata_request)

        # wait_until() will loop until a predicate condition is met. Use it to test properties of the
        # P2PInterface objects.
        wait_until(lambda: sorted(blocks) == sorted(
            list(self.nodes[2].p2p.block_receive_map.keys())),
                   timeout=5,
                   lock=mininode_lock)

        self.log.info("Check that each block was received only once")
        # The network thread uses a global lock on data access to the P2PConnection objects when sending and receiving
        # messages. The test thread should acquire the global lock before accessing any P2PConnection data to avoid locking
        # and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
        with mininode_lock:
            for block in self.nodes[2].p2p.block_receive_map.values():
                assert_equal(block, 1)
Ejemplo n.º 12
0
 def send_block_request(self, block_hash, node):
     msg = msg_getdata()
     msg.inv.append(CInv(MSG_BLOCK, block_hash))
     node.send_message(msg)
Ejemplo n.º 13
0
    def run_test(self):

        self.stop_node(0)

        with self.run_node_with_connections(
                "send GETDATA messages and check responses", 0, [],
                1) as p2p_connections:

            receivedBlocks = set()

            def on_block(conn, message):
                nonlocal receivedBlocks
                receivedBlocks.add(message.block.hash)

            receivedTxs = set()

            def on_tx(conn, message):
                nonlocal receivedTxs
                receivedTxs.add(message.tx.hash)

            receivedTxsNotFound = set()

            def on_notfound(conn, message):
                nonlocal receivedTxsNotFound
                for inv in message.inv:
                    receivedTxsNotFound.add(inv.hash)

            self.nodes[0].generate(5)

            connection = p2p_connections[0]
            connection.cb.on_block = on_block
            connection.cb.on_tx = on_tx
            connection.cb.on_notfound = on_notfound

            # 1. Check that sending GETDATA of unknown block does no action.
            unknown_hash = 0xdecaf
            connection.cb.send_message(
                msg_getdata([CInv(CInv.BLOCK, unknown_hash)]))

            # 2. Check that sending GETDATA of known block returns BLOCK message.
            known_hash = self.nodes[0].getbestblockhash()
            connection.cb.send_message(
                msg_getdata([CInv(CInv.BLOCK, int(known_hash, 16))]))
            wait_until(lambda: known_hash in receivedBlocks)
            # previously requested unknown block is not in the received list
            assert_equal(unknown_hash not in receivedBlocks, True)
            # python automatically sends GETDATA for INV that it receives
            # this means we can receive more blocks than just the one previously requested
            assert_equal(len(receivedBlocks) >= 1, True)

            # 3. Check that sending GETDATA of unknown transaction returns NOTFOUND message.
            connection.cb.send_message(
                msg_getdata([CInv(CInv.TX, unknown_hash)]))
            wait_until(lambda: unknown_hash in receivedTxsNotFound)

            # 4. Check that sending GETDATA of known transaction returns TX message.
            known_hash = self.nodes[0].sendtoaddress(
                self.nodes[0].getnewaddress(), 1.0)
            connection.cb.send_message(
                msg_getdata([CInv(CInv.TX, int(known_hash, 16))]))
            wait_until(lambda: known_hash in receivedTxs)
            assert_equal(len(receivedTxs), 1)
Ejemplo n.º 14
0
    def run_test(self):
        """Main test logic"""

        # Create a P2P connection to one of the nodes
        node0 = BaseNode()
        connections = []
        connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
        node0.add_connection(connections[0])

        # Start up network handling in another thread. This needs to be called
        # after the P2P connections have been created.
        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        # Generating a block on one of the nodes will get us out of IBD
        blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
        self.sync_all([self.nodes[0:1]])

        # Notice above how we called an RPC by calling a method with the same
        # name on the node object. Notice also how we used a keyword argument
        # to specify a named RPC argument. Neither of those are defined on the
        # node object. Instead there's some __getattr__() magic going on under
        # the covers to dispatch unrecognised attribute calls to the RPC
        # interface.

        # Logs are nice. Do plenty of them. They can be used in place of comments for
        # breaking the test into sub-sections.
        self.log.info("Starting test!")

        self.log.info("Calling a custom function")
        custom_function()

        self.log.info("Calling a custom method")
        self.custom_method()

        self.log.info("Create some blocks")
        self.tip = int(self.nodes[0].getbestblockhash(), 16)
        self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1

        height = 1

        for i in range(10):
            # Use the mininode and blocktools functionality to manually build a block
            # Calling the generate() rpc is easier, but this allows us to exactly
            # control the blocks and transactions.
            block = create_block(self.tip, create_coinbase(height), self.block_time)
            block.solve()
            block_message = msg_block(block)
            # Send message is used to send a P2P message to the node over our NodeConn connection
            node0.send_message(block_message)
            self.tip = block.sha256
            blocks.append(self.tip)
            self.block_time += 1
            height += 1

        self.log.info("Wait for node1 to reach current tip (height 11) using RPC")
        self.nodes[1].waitforblockheight(11)

        self.log.info("Connect node2 and node1")
        connect_nodes(self.nodes[1], 2)

        self.log.info("Add P2P connection to node2")
        node2 = BaseNode()
        connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))
        node2.add_connection(connections[1])
        node2.wait_for_verack()

        self.log.info("Wait for node2 reach current tip. Test that it has propogated all the blocks to us")

        for block in blocks:
            getdata_request = msg_getdata()
            getdata_request.inv.append(CInv(2, block))
            node2.send_message(getdata_request)

        # wait_until() will loop until a predicate condition is met. Use it to test properties of the
        # NodeConnCB objects.
        assert wait_until(lambda: sorted(blocks) == sorted(list(node2.block_receive_map.keys())), timeout=5)

        self.log.info("Check that each block was received only once")
        # The network thread uses a global lock on data access to the NodeConn objects when sending and receiving
        # messages. The test thread should acquire the global lock before accessing any NodeConn data to avoid locking
        # and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
        with mininode_lock:
            for block in node2.block_receive_map.values():
                assert_equal(block, 1)
Ejemplo n.º 15
0
    def run_test(self):
        inv_items = []
        block_priority_block_msg_pos = []
        default_block_msg_pos = []
        last_msg_pos = self.num_txns + 1

        # Initial node setup
        extra_args = [
            '-maxnonstdtxvalidationduration=100000',
            '-maxtxnvalidatorasynctasksrunduration=100001'
        ]
        with self.run_node_with_connections("Setup node", 0, extra_args,
                                            1) as connections:
            conn = connections[0]

            # Create and send some transactions to the node
            node = self.nodes[0]
            node.generate(100)
            funding_tx = make_funding_transaction(node)
            tx_generator = transaction_generator(funding_tx)
            for tx in islice(tx_generator, self.num_txns):
                inv_items.append(CInv(1, tx.sha256))
                conn.send_message(msg_tx(tx))
            wait_until(lambda: node.getmempoolinfo()['size'] == self.num_txns,
                       timeout=240)

        # Restart node with associations
        associations_stream_policies = [
            BlockPriorityStreamPolicy(),
            DefaultStreamPolicy(),
            BlockPriorityStreamPolicy(),
            DefaultStreamPolicy()
        ]
        extra_args = [
            '-whitelist=127.0.0.1', '-maxnonstdtxvalidationduration=100000',
            '-maxtxnvalidatorasynctasksrunduration=100001'
        ]
        with self.run_node_with_associations(
                "Test block priority",
                0,
                extra_args,
                associations_stream_policies,
                cb_class=MyAssociationCB) as associations:
            # Wait for node to fully reinitialise itself
            node = self.nodes[0]
            wait_until(lambda: node.getmempoolinfo()['size'] == self.num_txns,
                       timeout=180)

            # Send MEMPOOL request so node will accept our GETDATA for transactions in the mempool
            for association in associations:
                association.send_message(msg_mempool())
                # This request will result in us requesting all the txns. Wait until that finishes and
                # then reset our message counts in preperation for the real test to come.
                wait_until(
                    lambda: association.callbacks.msg_count == self.num_txns)
                association.callbacks.reset_msg_counts()

            # Send GETDATA to request txns and a block, with the block as the last item in the list
            blockhash = int(node.getbestblockhash(), 16)
            inv_items.append(CInv(2, blockhash))
            for association in associations:
                association.send_message(msg_getdata(inv_items))

            # Wait for all GETDATA requests to have a response
            for association in associations:
                wait_until(lambda: association.callbacks.block_count == 1)

                # Remember at what position we got the block msg for the different policies
                if type(association.stream_policy
                        ) is BlockPriorityStreamPolicy:
                    block_priority_block_msg_pos.append(
                        association.callbacks.block_msg_position)
                    logger.info(
                        "BlockPriority policy block received at position {}".
                        format(association.callbacks.block_msg_position))
                elif type(association.stream_policy) is DefaultStreamPolicy:
                    default_block_msg_pos.append(
                        association.callbacks.block_msg_position)
                    logger.info(
                        "Default policy block received at position {}".format(
                            association.callbacks.block_msg_position))

            # For the DEFAULT policy, the block will have been received last (because it was requested last)
            for pos in default_block_msg_pos:
                assert_equal(pos, last_msg_pos)
            # For the BLOCKPRIORITY policy, the block should have been received sooner (this is possibly
            # slightly racy, but it's been very safe on all systems I've tried it on)
            avg_pos = sum(block_priority_block_msg_pos) / len(
                block_priority_block_msg_pos)
            assert_greater_than(last_msg_pos, avg_pos)

            # Generate a new block to trigger a block INV and wait for the INV
            node.generate(1)
            for association in associations:
                wait_until(lambda: association.callbacks.block_inv_stream_type
                           != StreamType.UNKNOWN)

                # Verify that BlockPriority associations got block INV over the high priority stream
                if type(association.stream_policy
                        ) is BlockPriorityStreamPolicy:
                    assert_equal(association.callbacks.block_inv_stream_type,
                                 StreamType.DATA1)
 def send_block_request(self, block_hash, node):
     msg = msg_getdata()
     msg.inv.append(CInv(2, block_hash))  # 2 == "Block"
     node.send_message(msg)
Ejemplo n.º 17
0
    def test_compactblock_construction(self, node, test_node, version,
                                       use_witness_address):
        # Generate a bunch of transactions.
        node.generate(101)
        num_transactions = 25
        address = node.getnewaddress()
        if use_witness_address:
            # Want at least one segwit spend, so move all funds to
            # a witness address.
            address = node.addwitnessaddress(address)
            value_to_send = node.getbalance()
            node.sendtoaddress(address,
                               satoshi_round(value_to_send - Decimal(0.1)))
            node.generate(1)

        segwit_tx_generated = False
        for _ in range(num_transactions):
            txid = node.sendtoaddress(address, 0.1)
            hex_tx = node.gettransaction(txid)["hex"]
            tx = from_hex(CTransaction(), hex_tx)
            if not tx.wit.is_null():
                segwit_tx_generated = True

        if use_witness_address:
            assert (segwit_tx_generated)  # check that our test is not broken

        # Wait until we've seen the block announcement for the resulting tip
        tip = int(node.getbestblockhash(), 16)
        test_node.wait_for_block_announcement(tip)

        # Make sure we will receive a fast-announce compact block
        self.request_cb_announcements(test_node, node, version)

        # Now mine a block, and look at the resulting compact block.
        test_node.clear_block_announcement()
        block_hash = int(node.generate(1)[0], 16)

        # Store the raw block in our internal format.
        block = from_hex(CBlock(), node.getblock("%02x" % block_hash, False))
        for tx in block.vtx:
            tx.calc_sha256()
        block.rehash()

        # Wait until the block was announced (via compact blocks)
        wait_until(test_node.received_block_announcement,
                   timeout=30,
                   lock=mininode_lock)

        # Now fetch and check the compact block
        header_and_shortids = None
        with mininode_lock:
            assert ("cmpctblock" in test_node.last_message)
            # Convert the on-the-wire representation to absolute indexes
            header_and_shortids = HeaderAndShortIDs(
                test_node.last_message["cmpctblock"].header_and_shortids)
        self.check_compactblock_construction_from_block(
            version, header_and_shortids, block_hash, block)

        # Now fetch the compact block using a normal non-announce getdata
        with mininode_lock:
            test_node.clear_block_announcement()
            inv = CInv(4, block_hash)  # 4 == "CompactBlock"
            test_node.send_message(msg_getdata([inv]))

        wait_until(test_node.received_block_announcement,
                   timeout=30,
                   lock=mininode_lock)

        # Now fetch and check the compact block
        header_and_shortids = None
        with mininode_lock:
            assert ("cmpctblock" in test_node.last_message)
            # Convert the on-the-wire representation to absolute indexes
            header_and_shortids = HeaderAndShortIDs(
                test_node.last_message["cmpctblock"].header_and_shortids)
        self.check_compactblock_construction_from_block(
            version, header_and_shortids, block_hash, block)