Пример #1
0
 def send_block_inv(self, blockhash):
     msg = msg_inv()
     msg.inv = [CInv(2, blockhash)]
     self.send_message(msg)
Пример #2
0
    def run_test(self):
        block_count = 0

        # Create a P2P connections
        node0 = NodeConnCB()
        connection0 = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection0)

        node1 = NodeConnCB()
        connection1 = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node1)
        node1.add_connection(connection1)

        # *** Prepare node connection for early announcements testing
        node2 = NodeConnCB()
        node2.add_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node2))

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()
        node1.wait_for_verack()

        # *** Activate early announcement functionality for this connection
        #     After this point the early announcements are not received yet -
        #     we still need to set latest announced block (CNode::pindexBestKnownBlock)
        #     which is set for e.g. by calling best headers message with locator
        #     set to non-null
        node2.wait_for_verack()
        node2.send_message(msg_sendcmpct(announce=True))

        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))
        block = self.chain.next_block(block_count)
        block_count += 1
        self.chain.save_spendable_output()
        node0.send_message(msg_block(block))

        for i in range(100):
            block = self.chain.next_block(block_count)
            block_count += 1
            self.chain.save_spendable_output()
            node0.send_message(msg_block(block))

        out = self.chain.get_spendable_output()

        self.log.info("waiting for block height 101 via rpc")
        self.nodes[0].waitforblockheight(101)

        tip_block_num = block_count - 1

        # adding extra transactions to get different block hashes
        block2_hard = self.chain.next_block(block_count, spend=out, extra_txns=8)
        block_count += 1

        self.chain.set_tip(tip_block_num)

        block3_easier = self.chain.next_block(block_count, spend=out, extra_txns=2)
        block_count += 1

        self.chain.set_tip(tip_block_num)

        block4_hard = self.chain.next_block(block_count, spend=out, extra_txns=10)
        block_count += 1

        # send three "hard" blocks, with waitaftervalidatingblock we artificially
        # extend validation time.
        self.log.info(f"hard block2 hash: {block2_hard.hash}")
        self.nodes[0].waitaftervalidatingblock(block2_hard.hash, "add")
        self.log.info(f"hard block4 hash: {block4_hard.hash}")
        self.nodes[0].waitaftervalidatingblock(block4_hard.hash, "add")

        # make sure block hashes are in waiting list
        wait_for_waiting_blocks({block2_hard.hash, block4_hard.hash}, self.nodes[0], self.log)

        # *** Complete early announcement setup by sending getheaders message
        #     with a non-null locator (pointing to the last block that we know
        #     of on python side - we claim that we know of all the blocks that
        #     bitcoind node knows of)
        #
        #     We also set on_cmpctblock handler as early announced blocks are
        #     announced via compact block messages instead of inv messages
        node2.send_and_ping(msg_getheaders(locator_have=[int(self.nodes[0].getbestblockhash(), 16)]))
        receivedAnnouncement = False
        waiting_for_announcement_block_hash = block2_hard.sha256
        def on_cmpctblock(conn, message):
            nonlocal receivedAnnouncement
            message.header_and_shortids.header.calc_sha256()
            if message.header_and_shortids.header.sha256 == waiting_for_announcement_block_hash:
                receivedAnnouncement = True
        node2.on_cmpctblock = on_cmpctblock

        # send one block via p2p and one via rpc
        node0.send_message(msg_block(block2_hard))

        # *** make sure that we receive announcement of the block before it has
        #     been validated
        wait_until(lambda: receivedAnnouncement)

        # making rpc call submitblock in a separate thread because waitaftervalidation is blocking
        # the return of submitblock
        submitblock_thread = threading.Thread(target=self.nodes[0].submitblock, args=(ToHex(block4_hard),))
        submitblock_thread.start()

        # because self.nodes[0] rpc is blocked we use another rpc client
        rpc_client = get_rpc_proxy(rpc_url(get_datadir_path(self.options.tmpdir, 0), 0), 0,
                             coveragedir=self.options.coveragedir)

        wait_for_validating_blocks({block2_hard.hash, block4_hard.hash}, rpc_client, self.log)

        # *** prepare to intercept block3_easier announcement - it will not be
        #     announced before validation is complete as early announcement is
        #     limited to announcing one block per height (siblings are ignored)
        #     but after validation is complete we should still get the announcing
        #     compact block message
        receivedAnnouncement = False
        waiting_for_announcement_block_hash = block3_easier.sha256

        self.log.info(f"easy block3 hash: {block3_easier.hash}")
        node1.send_message(msg_block(block3_easier))

        # *** Make sure that we receive compact block announcement of the block
        #     after the validation is complete even though it was not the first
        #     block that was received by bitcoind node.
        #
        #     Also make sure that we receive inv announcement of the block after
        #     the validation is complete by the nodes that are not using early
        #     announcement functionality.
        wait_until(lambda: receivedAnnouncement)
        node0.wait_for_inv([CInv(2, block3_easier.sha256)]) # 2 == GetDataMsg::MSG_BLOCK
        # node 1 was the sender but receives inv for block non the less
        # (with early announcement that's not the case - sender does not receive the announcement)
        node1.wait_for_inv([CInv(2, block3_easier.sha256)]) # 2 == GetDataMsg::MSG_BLOCK

        rpc_client.waitforblockheight(102)
        assert_equal(block3_easier.hash, rpc_client.getbestblockhash())

        # now we can remove waiting status from blocks and finish their validation
        rpc_client.waitaftervalidatingblock(block2_hard.hash, "remove")
        rpc_client.waitaftervalidatingblock(block4_hard.hash, "remove")
        submitblock_thread.join()

        # wait till validation of block or blocks finishes
        node0.sync_with_ping()

        # easier block should still be on tip
        assert_equal(block3_easier.hash, self.nodes[0].getbestblockhash())
Пример #3
0
    def run_test(self):
        # Before we connect anything, we first set the time on the node
        # to be in the past, otherwise things break because the CNode
        # time counters can't be reset backward after initialization
        old_time = int(time.time() - 60 * 60 * 24 * 9)
        self.nodes[0].setmocktime(old_time)

        # Generate some old blocks
        self.nodes[0].generate(260)

        # test_nodes[0] will only request old blocks
        # test_nodes[1] will only request new blocks
        # test_nodes[2] will test resetting the counters
        test_nodes = []
        connections = []

        for i in range(3):
            test_nodes.append(TestNode())
            connections.append(
                NodeConn('127.0.0.1',
                         p2p_port(0),
                         self.nodes[0],
                         test_nodes[i],
                         protocol_version=BLOSSOM_PROTO_VERSION))
            test_nodes[i].add_connection(connections[i])

        NetworkThread().start()  # Start up network handling in another thread
        [x.wait_for_verack() for x in test_nodes]

        # Test logic begins here

        # Now mine a big block
        self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())

        # Store the hash; we'll request this later
        big_old_block = self.nodes[0].getbestblockhash()
        old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
        big_old_block = int(big_old_block, 16)

        # Advance to two days ago
        self.nodes[0].setmocktime(int(time.time()) - 2 * 60 * 60 * 24)

        # Generate interim blocks. Due to the "max MTP" soft-forked rule, block timestamps
        # can be no more than 1.5 hours ahead of the chain tip's MTP. Thus we need to mine
        # enough blocks to advance the MTP forward to the desired mocked time.
        self.nodes[0].generate(1000)

        # Mine one more block, so that the prior block looks old
        self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())

        # We'll be requesting this new block too
        big_new_block = self.nodes[0].getbestblockhash()
        big_new_block = int(big_new_block, 16)

        # test_nodes[0] will test what happens if we just keep requesting the
        # the same big old block too many times (expect: disconnect)

        getdata_request = msg_getdata()
        getdata_request.inv.append(CInv(2, big_old_block))

        max_bytes_per_day = 2200 * 1024 * 1024
        daily_buffer = 1152 * 2000000
        max_bytes_available = max_bytes_per_day - daily_buffer
        success_count = max_bytes_available / old_block_size

        # 2304GB will be reserved for relaying new blocks, so expect this to
        # succeed for ~14 tries.
        for i in range(int(success_count)):
            test_nodes[0].send_message(getdata_request)
            test_nodes[0].sync_with_ping()
            assert_equal(test_nodes[0].block_receive_map[big_old_block], i + 1)

        assert_equal(len(self.nodes[0].getpeerinfo()), 3)
        # At most a couple more tries should succeed (depending on how long
        # the test has been running so far).
        for i in range(3):
            test_nodes[0].send_message(getdata_request)
        test_nodes[0].wait_for_disconnect()
        assert_equal(len(self.nodes[0].getpeerinfo()), 2)
        print("Peer 0 disconnected after downloading old block too many times")

        # Requesting the current block on test_nodes[1] should succeed indefinitely,
        # even when over the max upload target.
        # We'll try 200 times
        getdata_request.inv = [CInv(2, big_new_block)]
        for i in range(200):
            test_nodes[1].send_message(getdata_request)
            test_nodes[1].sync_with_ping()
            assert_equal(test_nodes[1].block_receive_map[big_new_block], i + 1)

        print("Peer 1 able to repeatedly download new block")

        # But if test_nodes[1] tries for an old block, it gets disconnected too.
        getdata_request.inv = [CInv(2, big_old_block)]
        test_nodes[1].send_message(getdata_request)
        test_nodes[1].wait_for_disconnect()
        assert_equal(len(self.nodes[0].getpeerinfo()), 1)

        print("Peer 1 disconnected after trying to download old block")

        print("Advancing system time on node to clear counters...")

        # If we advance the time by 24 hours, then the counters should reset,
        # and test_nodes[2] should be able to retrieve the old block.
        self.nodes[0].setmocktime(int(time.time()))
        test_nodes[2].sync_with_ping()
        test_nodes[2].send_message(getdata_request)
        test_nodes[2].sync_with_ping()
        assert_equal(test_nodes[2].block_receive_map[big_old_block], 1)

        print("Peer 2 able to download old block")

        [c.disconnect_node() for c in connections]

        #stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
        print("Restarting nodes with -whitelist=127.0.0.1")
        stop_node(self.nodes[0], 0)
        self.nodes[0] = start_node(
            0,
            self.options.tmpdir,
            [
                "-debug",
                '-nuparams=2bb40e60:1',  # Blossom
                "-whitelist=127.0.0.1",
                "-maxuploadtarget=1",
            ])

        #recreate/reconnect 3 test nodes
        test_nodes = []
        connections = []

        for i in range(3):
            test_nodes.append(TestNode())
            connections.append(
                NodeConn('127.0.0.1',
                         p2p_port(0),
                         self.nodes[0],
                         test_nodes[i],
                         protocol_version=BLOSSOM_PROTO_VERSION))
            test_nodes[i].add_connection(connections[i])

        NetworkThread().start()  # Start up network handling in another thread
        [x.wait_for_verack() for x in test_nodes]

        #retrieve 20 blocks which should be enough to break the 1MB limit
        getdata_request.inv = [CInv(2, big_new_block)]
        for i in range(20):
            test_nodes[1].send_message(getdata_request)
            test_nodes[1].sync_with_ping()
            assert_equal(test_nodes[1].block_receive_map[big_new_block], i + 1)

        getdata_request.inv = [CInv(2, big_old_block)]
        test_nodes[1].send_message(getdata_request)
        test_nodes[1].wait_for_disconnect()
        assert_equal(len(self.nodes[0].getpeerinfo()),
                     3)  #node is still connected because of the whitelist

        print(
            "Peer 1 still connected after trying to download old block (whitelisted)"
        )

        [c.disconnect_node() for c in connections]
Пример #4
0
 def send_get_data(self, block_hashes):
     """Request data for a list of block hashes."""
     msg = msg_getdata()
     for x in block_hashes:
         msg.inv.append(CInv(2, x))
     self.send_message(msg)
Пример #5
0
    def test_compactblocks_not_at_tip(self, node, test_node):
        # Test that requesting old compactblocks doesn't work.
        MAX_CMPCTBLOCK_DEPTH = 5
        new_blocks = []
        for _ in range(MAX_CMPCTBLOCK_DEPTH + 1):
            test_node.clear_block_announcement()
            new_blocks.append(node.generate(1)[0])
            wait_until(
                test_node.received_block_announcement,
                timeout=30,
                lock=mininode_lock,
                err_msg=
                "test_compactblocks_not_at_tip test_node.received_block_announcement"
            )

        test_node.clear_block_announcement()
        test_node.send_message(MsgGetdata([CInv(4, int(new_blocks[0], 16))]))
        wait_until(
            lambda: "cmpctblock" in test_node.last_message,
            timeout=30,
            lock=mininode_lock,
            err_msg="test_compactblocks_not_at_tip testnode.last_message")

        test_node.clear_block_announcement()
        node.generate(1)
        wait_until(
            test_node.received_block_announcement,
            timeout=30,
            lock=mininode_lock,
            err_msg=
            "test_compactblocks_not_at_tip test_node.received_block_announcement"
        )
        test_node.clear_block_announcement()
        with mininode_lock:
            test_node.last_message.pop("block", None)
        test_node.send_message(MsgGetdata([CInv(4, int(new_blocks[0], 16))]))
        wait_until(
            lambda: "block" in test_node.last_message,
            timeout=30,
            lock=mininode_lock,
            err_msg=
            "test_node.received_block_announcement test_node.last_message")
        with mininode_lock:
            test_node.last_message["block"].block.calc_x16r()
            assert_equal(test_node.last_message["block"].block.sha256,
                         int(new_blocks[0], 16))

        # Generate an old compactblock, and verify that it's not accepted.
        cur_height = node.getblockcount()
        hashPrevBlock = int(node.getblockhash(cur_height - 5), 16)
        block = self.build_block_on_tip(node)
        block.hashPrevBlock = hashPrevBlock
        block.solve()

        comp_block = HeaderAndShortIDs()
        comp_block.initialize_from_block(block)
        test_node.send_and_ping(MsgCmpctBlock(comp_block.to_p2p()))

        tips = node.getchaintips()
        found = False
        for x in tips:
            if x["hash"] == block.hash:
                assert_equal(x["status"], "headers-only")
                found = True
                break
        assert found

        # Requesting this block via getblocktxn should silently fail
        # (to avoid fingerprinting attacks).
        msg = MsgGetBlockTxn()
        msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
        with mininode_lock:
            test_node.last_message.pop("blocktxn", None)
        test_node.send_and_ping(msg)
        with mininode_lock:
            assert "blocktxn" not in test_node.last_message
Пример #6
0
    def run_test(self):
        """Main test logic"""

        # Create a P2P connection to one of the nodes
        node0 = BaseNode()
        connections = [
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        ]
        node0.add_connection(connections[0])

        # Start up network handling in another thread. This needs to be called
        # after the P2P connections have been created.
        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        # Generating a block on one of the nodes will get us out of IBD
        blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
        self.sync_all([self.nodes[0:1]])

        # Notice above how we called an RPC by calling a method with the same
        # name on the node object. Notice also how we used a keyword argument
        # to specify a named RPC argument. Neither of those are defined on the
        # node object. Instead there's some __getattr__() magic going on under
        # the covers to dispatch unrecognised attribute calls to the RPC
        # interface.

        # Logs are nice. Do plenty of them. They can be used in place of comments for
        # breaking the test into sub-sections.
        self.log.info("Starting test!")

        self.log.info("Calling a custom function")
        custom_function()

        self.log.info("Calling a custom method")
        self.custom_method()

        self.log.info("Create some blocks")
        self.tip = int(self.nodes[0].getbestblockhash(), 16)
        self.block_time = self.nodes[0].getblock(
            self.nodes[0].getbestblockhash())['time'] + 1

        self.nodes[0].generate(10)

        self.log.info(
            "Wait for node1 to reach current tip (height 11) using RPC")
        self.nodes[1].waitforblockheight(11)

        self.log.info("Connect node2 and node1")
        connect_nodes(self.nodes[1], 2)

        self.log.info("Add P2P connection to node2")
        node2 = BaseNode()
        connections.append(
            NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))
        node2.add_connection(connections[1])
        node2.wait_for_verack()

        self.log.info(
            "Wait for node2 reach current tip. Test that it has propagated all the blocks to us"
        )

        getdata_request = MsgGetdata()
        for block in blocks:
            getdata_request.inv.append(CInv(2, block))
        node2.send_message(getdata_request)
        self.sync_all([self.nodes[1:2]])

        self.log.info("Check that each block was received only once")
        # The network thread uses a global lock on data access to the NodeConn objects when sending and receiving
        # messages. The test thread should acquire the global lock before accessing any NodeConn data to avoid locking
        # and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
        with mininode_lock:
            for block in node2.block_receive_map.values():
                assert_equal(block, 1)
Пример #7
0
    def run_test(self):
        # Setup the p2p connections and start up the network thread.
        test_node = TestNode()   # connects to node0 (not whitelisted)
        white_node = TestNode()  # connects to node1 (whitelisted)

        connections = []
        connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
        connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
        test_node.add_connection(connections[0])
        white_node.add_connection(connections[1])

        NetworkThread().start() # Start up network handling in another thread

        # Test logic begins here
        test_node.wait_for_verack()
        white_node.wait_for_verack()

        # 1. Have both nodes mine a block (leave IBD)
        [ n.generate(1) for n in self.nodes ]
        tips = [ int ("0x" + n.getbestblockhash() + "L", 0) for n in self.nodes ]

        # 2. Send one block that builds on each tip.
        # This should be accepted.
        blocks_h2 = []  # the height 2 blocks on each node's chain
        block_time = time.time() + 1
        for i in xrange(2):
            blocks_h2.append(create_block(tips[i], create_coinbase(), block_time))
            blocks_h2[i].solve()
            block_time += 1
        test_node.send_message(msg_block(blocks_h2[0]))
        white_node.send_message(msg_block(blocks_h2[1]))

        [ x.sync_with_ping() for x in [test_node, white_node] ]
        assert_equal(self.nodes[0].getblockcount(), 2)
        assert_equal(self.nodes[1].getblockcount(), 2)
        print "First height 2 block accepted by both nodes"

        # 3. Send another block that builds on the original tip.
        blocks_h2f = []  # Blocks at height 2 that fork off the main chain
        for i in xrange(2):
            blocks_h2f.append(create_block(tips[i], create_coinbase(), blocks_h2[i].nTime+1))
            blocks_h2f[i].solve()
        test_node.send_message(msg_block(blocks_h2f[0]))
        white_node.send_message(msg_block(blocks_h2f[1]))

        [ x.sync_with_ping() for x in [test_node, white_node] ]
        for x in self.nodes[0].getchaintips():
            if x['hash'] == blocks_h2f[0].hash:
                assert_equal(x['status'], "headers-only")

        for x in self.nodes[1].getchaintips():
            if x['hash'] == blocks_h2f[1].hash:
                assert_equal(x['status'], "valid-headers")

        print "Second height 2 block accepted only from whitelisted peer"

        # 4. Now send another block that builds on the forking chain.
        blocks_h3 = []
        for i in xrange(2):
            blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(), blocks_h2f[i].nTime+1))
            blocks_h3[i].solve()
        test_node.send_message(msg_block(blocks_h3[0]))
        white_node.send_message(msg_block(blocks_h3[1]))

        [ x.sync_with_ping() for x in [test_node, white_node] ]
        # Since the earlier block was not processed by node0, the new block
        # can't be fully validated.
        for x in self.nodes[0].getchaintips():
            if x['hash'] == blocks_h3[0].hash:
                assert_equal(x['status'], "headers-only")

        # But this block should be accepted by node0 since it has more work.
        try:
            self.nodes[0].getblock(blocks_h3[0].hash)
            print "Unrequested more-work block accepted from non-whitelisted peer"
        except:
            raise AssertionError("Unrequested more work block was not processed")

        # Node1 should have accepted and reorged.
        assert_equal(self.nodes[1].getblockcount(), 3)
        print "Successfully reorged to length 3 chain from whitelisted peer"

        # 4b. Now mine 288 more blocks and deliver; all should be processed but
        # the last (height-too-high) on node0.  Node1 should process the tip if
        # we give it the headers chain leading to the tip.
        tips = blocks_h3
        headers_message = msg_headers()
        all_blocks = []   # node0's blocks
        for j in xrange(2):
            for i in xrange(288):
                next_block = create_block(tips[j].sha256, create_coinbase(), tips[j].nTime+1)
                next_block.solve()
                if j==0:
                    test_node.send_message(msg_block(next_block))
                    all_blocks.append(next_block)
                else:
                    headers_message.headers.append(CBlockHeader(next_block))
                tips[j] = next_block

        time.sleep(2)
        for x in all_blocks:
            try:
                self.nodes[0].getblock(x.hash)
                if x == all_blocks[287]:
                    raise AssertionError("Unrequested block too far-ahead should have been ignored")
            except:
                if x == all_blocks[287]:
                    print "Unrequested block too far-ahead not processed"
                else:
                    raise AssertionError("Unrequested block with more work should have been accepted")

        headers_message.headers.pop() # Ensure the last block is unrequested
        white_node.send_message(headers_message) # Send headers leading to tip
        white_node.send_message(msg_block(tips[1]))  # Now deliver the tip
        try:
            white_node.sync_with_ping()
            self.nodes[1].getblock(tips[1].hash)
            print "Unrequested block far ahead of tip accepted from whitelisted peer"
        except:
            raise AssertionError("Unrequested block from whitelisted peer not accepted")

        # 5. Test handling of unrequested block on the node that didn't process
        # Should still not be processed (even though it has a child that has more
        # work).
        test_node.send_message(msg_block(blocks_h2f[0]))

        # Here, if the sleep is too short, the test could falsely succeed (if the
        # node hasn't processed the block by the time the sleep returns, and then
        # the node processes it and incorrectly advances the tip).
        # But this would be caught later on, when we verify that an inv triggers
        # a getdata request for this block.
        test_node.sync_with_ping()
        assert_equal(self.nodes[0].getblockcount(), 2)
        print "Unrequested block that would complete more-work chain was ignored"

        # 6. Try to get node to request the missing block.
        # Poke the node with an inv for block at height 3 and see if that
        # triggers a getdata on block 2 (it should if block 2 is missing).
        with mininode_lock:
            # Clear state so we can check the getdata request
            test_node.last_getdata = None
            test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))

        test_node.sync_with_ping()
        with mininode_lock:
            getdata = test_node.last_getdata

        # Check that the getdata includes the right block
        assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
        print "Inv at tip triggered getdata for unprocessed block"

        # 7. Send the missing block for the third time (now it is requested)
        test_node.send_message(msg_block(blocks_h2f[0]))

        test_node.sync_with_ping()
        assert_equal(self.nodes[0].getblockcount(), 290)
        print "Successfully reorged to longer chain from non-whitelisted peer"

        [ c.disconnect_node() for c in connections ]
Пример #8
0
 def send_block_inv(self, blockhash):
     msg = MsgInv()
     msg.inv = [CInv(2, blockhash)]
     self.connection.send_message(msg)
    def run_test(self):
        # Setup the p2p connections and start up the network thread.
        test_node = NodeConnCB()   # connects to node0 (not whitelisted)
        white_node = NodeConnCB()  # connects to node1 (whitelisted)
        min_work_node = NodeConnCB()  # connects to node2 (not whitelisted)

        connections = [NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node),
                       NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node),
                       NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], min_work_node)]
        test_node.add_connection(connections[0])
        white_node.add_connection(connections[1])
        min_work_node.add_connection(connections[2])

        NetworkThread().start() # Start up network handling in another thread

        # Test logic begins here
        test_node.wait_for_verack()
        white_node.wait_for_verack()
        min_work_node.wait_for_verack()

        # 1. Have nodes mine a block (nodes1/2 leave IBD)
        [ n.generate(1) for n in self.nodes ]
        tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]

        # 2. Send one block that builds on each tip.
        # This should be accepted by nodes 1/2
        blocks_h2 = []  # the height 2 blocks on each node's chain
        block_time = int(time.time()) + 1
        for i in range(3):
            blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
            blocks_h2[i].solve()
            block_time += 1
        test_node.send_message(MsgBlock(blocks_h2[0]))
        white_node.send_message(MsgBlock(blocks_h2[1]))
        min_work_node.send_message(MsgBlock(blocks_h2[2]))

        for x in [test_node, white_node, min_work_node]:
            x.sync_with_ping()
        assert_equal(self.nodes[0].getblockcount(), 2)
        assert_equal(self.nodes[1].getblockcount(), 2)
        assert_equal(self.nodes[2].getblockcount(), 1)
        self.log.info("First height 2 block accepted by node0/node1; correctly rejected by node2")

        # 3. Send another block that builds on the original tip.
        blocks_h2f = []  # Blocks at height 2 that fork off the main chain
        for i in range(2):
            blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
            blocks_h2f[i].solve()
        test_node.send_message(MsgBlock(blocks_h2f[0]))
        white_node.send_message(MsgBlock(blocks_h2f[1]))

        for x in [test_node, white_node]:
            x.sync_with_ping()
        for x in self.nodes[0].getchaintips():
            if x['hash'] == blocks_h2f[0].hash:
                assert_equal(x['status'], "headers-only")

        for x in self.nodes[1].getchaintips():
            if x['hash'] == blocks_h2f[1].hash:
                assert_equal(x['status'], "valid-headers")

        self.log.info("Second height 2 block accepted only from whitelisted peer")

        # 4. Now send another block that builds on the forking chain.
        blocks_h3 = []
        for i in range(2):
            blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
            blocks_h3[i].solve()
        test_node.send_message(MsgBlock(blocks_h3[0]))
        white_node.send_message(MsgBlock(blocks_h3[1]))

        for x in [test_node, white_node]:
            x.sync_with_ping()
        # Since the earlier block was not processed by node0, the new block
        # can't be fully validated.
        for x in self.nodes[0].getchaintips():
            if x['hash'] == blocks_h3[0].hash:
                assert_equal(x['status'], "headers-only")

        # But this block should be accepted by node0 since it has more work.
        self.nodes[0].getblock(blocks_h3[0].hash)
        self.log.info("Unrequested more-work block accepted from non-whitelisted peer")

        # Node1 should have accepted and reorged.
        assert_equal(self.nodes[1].getblockcount(), 3)
        self.log.info("Successfully reorged to length 3 chain from whitelisted peer")

        # 4b. Now mine 288 more blocks and deliver; all should be processed but
        # the last (height-too-high) on node0.  Node1 should process the tip if
        # we give it the headers chain leading to the tip.
        tips = blocks_h3
        headers_message = MsgHeaders()
        all_blocks = []   # node0's blocks
        for j in range(2):
            for i in range(288):
                next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
                next_block.solve()
                if j==0:
                    test_node.send_message(MsgBlock(next_block))
                    all_blocks.append(next_block)
                else:
                    headers_message.headers.append(CBlockHeader(next_block))
                tips[j] = next_block

        time.sleep(2)
        # Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
        for x in all_blocks[:-1]:
            self.nodes[0].getblock(x.hash)
        assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)

        headers_message.headers.pop() # Ensure the last block is unrequested
        white_node.send_message(headers_message) # Send headers leading to tip
        white_node.send_message(MsgBlock(tips[1]))  # Now deliver the tip
        white_node.sync_with_ping()
        self.nodes[1].getblock(tips[1].hash)
        self.log.info("Unrequested block far ahead of tip accepted from whitelisted peer")

        # 5. Test handling of unrequested block on the node that didn't process
        # Should still not be processed (even though it has a child that has more
        # work).
        test_node.send_message(MsgBlock(blocks_h2f[0]))

        # Here, if the sleep is too short, the test could falsely succeed (if the
        # node hasn't processed the block by the time the sleep returns, and then
        # the node processes it and incorrectly advances the tip).
        # But this would be caught later on, when we verify that an inv triggers
        # a getdata request for this block.
        test_node.sync_with_ping()
        assert_equal(self.nodes[0].getblockcount(), 2)
        self.log.info("Unrequested block that would complete more-work chain was ignored")

        # 6. Try to get node to request the missing block.
        # Poke the node with an inv for block at height 3 and see if that
        # triggers a getdata on block 2 (it should if block 2 is missing).
        with mininode_lock:
            # Clear state so we can check the getdata request
            test_node.last_message.pop("getdata", None)
            test_node.send_message(MsgInv([CInv(2, blocks_h3[0].sha256)]))

        test_node.sync_with_ping()
        with mininode_lock:
            getdata = test_node.last_message["getdata"]

        # Check that the getdata includes the right block
        assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
        self.log.info("Inv at tip triggered getdata for unprocessed block")

        # 7. Send the missing block for the third time (now it is requested)
        test_node.send_message(MsgBlock(blocks_h2f[0]))

        test_node.sync_with_ping()
        assert_equal(self.nodes[0].getblockcount(), 290)
        self.log.info("Successfully reorged to longer chain from non-whitelisted peer")

        # 8. Connect node2 to node0 and ensure it is able to sync
        connect_nodes(self.nodes[0], 2)
        sync_blocks([self.nodes[0], self.nodes[2]])
        self.log.info("Successfully synced nodes 2 and 0")

        [ c.disconnect_node() for c in connections ]
Пример #10
0
    def run_test(self):
        # Before we connect anything, we first set the time on the node
        # to be in the past, otherwise things break because the CNode
        # time counters can't be reset backward after initialization
        old_time = int(time.time() - 2 * 60 * 60 * 24 * 7)
        self.nodes[0].setmocktime(old_time)

        # Generate some old blocks
        self.nodes[0].generate(130)

        # test_nodes[0] will only request old blocks
        # test_nodes[1] will only request new blocks
        # test_nodes[2] will test resetting the counters
        test_nodes = []
        connections = []

        for i in range(3):
            test_nodes.append(TestNode())
            connections.append(
                NodeConn('127.0.0.1', p2p_port(0), self.nodes[0],
                         test_nodes[i]))
            test_nodes[i].add_connection(connections[i])

        NetworkThread().start()  # Start up network handling in another thread
        [x.wait_for_verack() for x in test_nodes]

        # Test logic begins here

        # Now mine a big block
        mine_large_block(self.nodes[0], self.utxo_cache)

        # Store the hash; we'll request this later
        big_old_block = self.nodes[0].getbestblockhash()
        old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
        big_old_block = int(big_old_block, 16)

        # Advance to two days ago
        self.nodes[0].setmocktime(int(time.time()) - 2 * 60 * 60 * 24)

        # Mine one more block, so that the prior block looks old
        mine_large_block(self.nodes[0], self.utxo_cache)

        # We'll be requesting this new block too
        big_new_block = self.nodes[0].getbestblockhash()
        big_new_block = int(big_new_block, 16)

        # test_nodes[0] will test what happens if we just keep requesting the
        # the same big old block too many times (expect: disconnect)

        getdata_request = msg_getdata()
        getdata_request.inv.append(CInv(2, big_old_block))

        block_rate_minutes = 1
        blocks_per_day = 24 * 60 / block_rate_minutes
        max_block_serialized_size = 8000000  # This is MAX_BLOCK_SERIALIZED_SIZE_RIP2
        max_bytes_per_day = self.maxuploadtarget * 1024 * 1024
        daily_buffer = blocks_per_day * max_block_serialized_size
        max_bytes_available = max_bytes_per_day - daily_buffer
        success_count = max_bytes_available // old_block_size

        # 224051200B will be reserved for relaying new blocks, so expect this to
        # succeed for ~236 tries.
        for i in range(int(success_count)):
            test_nodes[0].send_message(getdata_request)
            test_nodes[0].sync_with_ping()
            assert_equal(test_nodes[0].block_receive_map[big_old_block], i + 1)

        assert_equal(len(self.nodes[0].getpeerinfo()), 3)
        # At most a couple more tries should succeed (depending on how long
        # the test has been running so far).
        for i in range(3):
            test_nodes[0].send_message(getdata_request)
        test_nodes[0].wait_for_disconnect()
        assert_equal(len(self.nodes[0].getpeerinfo()), 2)
        self.log.info(
            "Peer 0 disconnected after downloading old block too many times")

        # Requesting the current block on test_nodes[1] should succeed indefinitely,
        # even when over the max upload target.
        # We'll try lots of times
        getdata_request.inv = [CInv(2, big_new_block)]
        for i in range(500):
            test_nodes[1].send_message(getdata_request)
            test_nodes[1].sync_with_ping()
            assert_equal(test_nodes[1].block_receive_map[big_new_block], i + 1)

        self.log.info("Peer 1 able to repeatedly download new block")

        # But if test_nodes[1] tries for an old block, it gets disconnected too.
        getdata_request.inv = [CInv(2, big_old_block)]
        test_nodes[1].send_message(getdata_request)
        test_nodes[1].wait_for_disconnect()
        assert_equal(len(self.nodes[0].getpeerinfo()), 1)

        self.log.info("Peer 1 disconnected after trying to download old block")

        self.log.info("Advancing system time on node to clear counters...")

        # If we advance the time by 24 hours, then the counters should reset,
        # and test_nodes[2] should be able to retrieve the old block.
        self.nodes[0].setmocktime(int(time.time()))
        test_nodes[2].sync_with_ping()
        test_nodes[2].send_message(getdata_request)
        test_nodes[2].sync_with_ping()
        assert_equal(test_nodes[2].block_receive_map[big_old_block], 1)

        self.log.info("Peer 2 able to download old block")

        [c.disconnect_node() for c in connections]

        # stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
        self.log.info("Restarting nodes with -whitelist=127.0.0.1")
        self.stop_node(0)
        self.start_node(0, [
            "-whitelist=127.0.0.1", "-maxuploadtarget=1",
            "-blockmaxsize=999000"
        ])

        # recreate/reconnect a test node
        test_nodes = [TestNode()]
        connections = [
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[0])
        ]
        test_nodes[0].add_connection(connections[0])

        NetworkThread().start()  # Start up network handling in another thread
        test_nodes[0].wait_for_verack()

        # retrieve 20 blocks which should be enough to break the 1MB limit
        getdata_request.inv = [CInv(2, big_new_block)]
        for i in range(20):
            test_nodes[0].send_message(getdata_request)
            test_nodes[0].sync_with_ping()
            assert_equal(test_nodes[0].block_receive_map[big_new_block], i + 1)

        getdata_request.inv = [CInv(2, big_old_block)]
        test_nodes[0].send_and_ping(getdata_request)
        assert_equal(len(self.nodes[0].getpeerinfo()),
                     1)  # node is still connected because of the whitelist

        self.log.info(
            "Peer still connected after trying to download old block (whitelisted)"
        )
Пример #11
0
    def run_test(self):
        testnode0 = TestNode()
        connections = []
        connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0],
                                    testnode0, "regtest", OVERWINTER_PROTO_VERSION))
        testnode0.add_connection(connections[0])

        # Start up network handling in another thread
        NetworkThread().start()
        testnode0.wait_for_verack()

        # Verify mininodes are connected to zcashd nodes
        peerinfo = self.nodes[0].getpeerinfo()
        versions = [x["version"] for x in peerinfo]
        assert_equal(1, versions.count(OVERWINTER_PROTO_VERSION))
        assert_equal(0, peerinfo[0]["banscore"])

        # Mine some blocks so we can spend
        self.coinbase_blocks = self.nodes[0].generate(200)
        self.nodeaddress = self.nodes[0].getnewaddress()

        # Sync nodes 0 and 1
        sync_blocks(self.nodes[:2])
        sync_mempools(self.nodes[:2])

        # Verify block count
        assert_equal(self.nodes[0].getblockcount(), 200)
        assert_equal(self.nodes[1].getblockcount(), 200)
        assert_equal(self.nodes[2].getblockcount(), 0)

        # Mininodes send expiring soon transaction in "tx" message to zcashd node
        tx1 = self.create_transaction(self.nodes[0],
                                      self.coinbase_blocks[0],
                                      self.nodeaddress, 10.0,
                                      203)
        testnode0.send_message(msg_tx(tx1))

        # Mininodes send transaction in "tx" message to zcashd node
        tx2 = self.create_transaction(self.nodes[0],
                                      self.coinbase_blocks[1],
                                      self.nodeaddress, 10.0,
                                      204)
        testnode0.send_message(msg_tx(tx2))

        # Sync up with node after p2p messages delivered
        testnode0.sync_with_ping()

        # Sync nodes 0 and 1
        sync_blocks(self.nodes[:2])
        sync_mempools(self.nodes[:2])

        # Verify contents of mempool
        assert(tx1.hash not in self.nodes[0].getrawmempool()) # tx1 rejected as expiring soon
        assert(tx1.hash not in self.nodes[1].getrawmempool())
        assert(tx2.hash in self.nodes[0].getrawmempool()) # tx2 accepted
        assert(tx2.hash in self.nodes[1].getrawmempool())
        assert_equal(len(self.nodes[2].getrawmempool()), 0) # node 2 is isolated and empty

        # Send p2p message "mempool" to receive contents from zcashd node in "inv" message
        with mininode_lock:
            testnode0.last_inv = None
            testnode0.send_message(msg_mempool())

        # Sync up with node after p2p messages delivered
        testnode0.sync_with_ping()

        # Verify txid for tx2
        with mininode_lock:
            msg = testnode0.last_inv
            assert_equal(len(msg.inv), 1)
            assert_equal(tx2.sha256, msg.inv[0].hash)

        # Send p2p message "getdata" to verify tx2 gets sent in "tx" message
        getdatamsg = msg_getdata()
        getdatamsg.inv = [ CInv(1, tx2.sha256) ]
        with mininode_lock:
            testnode0.last_tx = None
            testnode0.send_message(getdatamsg)

        # Sync up with node after p2p messages delivered
        testnode0.sync_with_ping()

        # Verify data received in "tx" message is for tx2
        with mininode_lock:
            incoming_tx = testnode0.last_tx.tx
            incoming_tx.rehash()
            assert_equal(tx2.sha256, incoming_tx.sha256)

        # Sync and mine an empty block with node 2, leaving tx in the mempool of node0 and node1
        for blkhash in self.coinbase_blocks:
            blk = self.nodes[0].getblock(blkhash, 0)
            self.nodes[2].submitblock(blk)
        self.nodes[2].generate(1)

        # Verify block count
        assert_equal(self.nodes[0].getblockcount(), 200)
        assert_equal(self.nodes[1].getblockcount(), 200)
        assert_equal(self.nodes[2].getblockcount(), 201)

        # Reconnect node 2 to the network
        connect_nodes_bi(self.nodes,1,2)

        # Set up test node for node 2
        testnode2 = TestNode()
        connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2],
                                    testnode2, "regtest", OVERWINTER_PROTO_VERSION))
        testnode2.add_connection(connections[-1])

        # Verify block count
        sync_blocks(self.nodes[:3])
        assert_equal(self.nodes[0].getblockcount(), 201)
        assert_equal(self.nodes[1].getblockcount(), 201)
        assert_equal(self.nodes[2].getblockcount(), 201)

        # Verify contents of mempool
        assert(tx2.hash in self.nodes[0].getrawmempool())
        assert(tx2.hash in self.nodes[1].getrawmempool())
        assert(tx2.hash not in self.nodes[2].getrawmempool())

        # Confirm tx2 cannot be submitted to a mempool because it is expiring soon.
        try:
            rawtx2 = hexlify(tx2.serialize())
            self.nodes[2].sendrawtransaction(rawtx2)
            assert(False)
        except JSONRPCException as e:
            errorString = e.error['message']
            assert("tx-expiring-soon" in errorString)

        # Ask node 0 for tx2...
        with mininode_lock:
            testnode0.last_notfound = None
            testnode0.last_tx = None
            testnode0.send_message(getdatamsg)

        # Sync up with node after p2p messages delivered
        [ x.sync_with_ping() for x in [testnode0, testnode2] ]

        # Verify node 0 does not reply to "getdata" by sending "tx" message, as tx2 is expiring soon
        with mininode_lock:
            assert_equal(testnode0.last_tx, None)

        # Verify mininode received a "notfound" message containing the txid of tx2
        with mininode_lock:
            msg = testnode0.last_notfound
            assert_equal(len(msg.inv), 1)
            assert_equal(tx2.sha256, msg.inv[0].hash)

        # Create a transaction to verify that processing of "getdata" messages is functioning
        tx3 = self.create_transaction(self.nodes[0],
                                      self.coinbase_blocks[2],
                                      self.nodeaddress, 10.0,
                                      999)

        # Mininodes send tx3 to zcashd node
        testnode0.send_message(msg_tx(tx3))
        getdatamsg = msg_getdata()
        getdatamsg.inv = [ CInv(1, tx3.sha256) ]
        with mininode_lock:
            testnode0.last_tx = None
            testnode0.send_message(getdatamsg)

        # Sync up with node after p2p messages delivered
        [ x.sync_with_ping() for x in [testnode0, testnode2] ]

        # Verify we received a "tx" message for tx3
        with mininode_lock:
            incoming_tx = testnode0.last_tx.tx
            incoming_tx.rehash()
            assert_equal(tx3.sha256, incoming_tx.sha256)

        # Send p2p message "mempool" to receive contents from zcashd node in "inv" message
        with mininode_lock:
            testnode0.last_inv = None
            testnode0.send_message(msg_mempool())

        # Sync up with node after p2p messages delivered
        [ x.sync_with_ping() for x in [testnode0, testnode2] ]

        # Verify txid for tx3 is returned in "inv", but tx2 which is expiring soon is not returned
        with mininode_lock:
            msg = testnode0.last_inv
            assert_equal(len(msg.inv), 1)
            assert_equal(tx3.sha256, msg.inv[0].hash)

        # Verify contents of mempool
        assert_equal({tx2.hash, tx3.hash}, set(self.nodes[0].getrawmempool()))
        assert_equal({tx2.hash, tx3.hash}, set(self.nodes[1].getrawmempool()))
        assert_equal({tx3.hash}, set(self.nodes[2].getrawmempool()))

        # Verify banscore for nodes are still zero
        assert_equal(0, sum(peer["banscore"] for peer in self.nodes[0].getpeerinfo()))
        assert_equal(0, sum(peer["banscore"] for peer in self.nodes[2].getpeerinfo()))

        [ c.disconnect_node() for c in connections ]
Пример #12
0
    def run_test(self):

        self.stop_node(0)

        with self.run_node_with_connections(
                "send GETDATA messages and check responses", 0, [],
                1) as p2p_connections:

            receivedBlocks = set()

            def on_block(conn, message):
                nonlocal receivedBlocks
                receivedBlocks.add(message.block.hash)

            receivedTxs = set()

            def on_tx(conn, message):
                nonlocal receivedTxs
                receivedTxs.add(message.tx.hash)

            receivedTxsNotFound = set()

            def on_notfound(conn, message):
                nonlocal receivedTxsNotFound
                for inv in message.inv:
                    receivedTxsNotFound.add(inv.hash)

            self.nodes[0].generate(5)

            connection = p2p_connections[0]
            connection.cb.on_block = on_block
            connection.cb.on_tx = on_tx
            connection.cb.on_notfound = on_notfound

            # 1. Check that sending GETDATA of unknown block does no action.
            unknown_hash = 0xdecaf
            connection.cb.send_message(
                msg_getdata([CInv(CInv.BLOCK, unknown_hash)]))

            # 2. Check that sending GETDATA of known block returns BLOCK message.
            known_hash = self.nodes[0].getbestblockhash()
            connection.cb.send_message(
                msg_getdata([CInv(CInv.BLOCK, int(known_hash, 16))]))
            wait_until(lambda: known_hash in receivedBlocks)
            # previously requested unknown block is not in the received list
            assert_equal(unknown_hash not in receivedBlocks, True)
            # python automatically sends GETDATA for INV that it receives
            # this means we can receive more blocks than just the one previously requested
            assert_equal(len(receivedBlocks) >= 1, True)

            # 3. Check that sending GETDATA of unknown transaction returns NOTFOUND message.
            connection.cb.send_message(
                msg_getdata([CInv(CInv.TX, unknown_hash)]))
            wait_until(lambda: unknown_hash in receivedTxsNotFound)

            # 4. Check that sending GETDATA of known transaction returns TX message.
            known_hash = self.nodes[0].sendtoaddress(
                self.nodes[0].getnewaddress(), 1.0)
            connection.cb.send_message(
                msg_getdata([CInv(CInv.TX, int(known_hash, 16))]))
            wait_until(lambda: known_hash in receivedTxs)
            assert_equal(len(receivedTxs), 1)
Пример #13
0
 def get_data(self, block_hashes):
     msg = MsgGetdata()
     for x in block_hashes:
         msg.inv.append(CInv(2, x))
     self.connection.send_message(msg)
Пример #14
0
    def run_test(self):
        """Main test logic"""

        # Create P2P connections to two of the nodes
        self.nodes[0].add_p2p_connection(BaseNode())

        # Start up network handling in another thread. This needs to be called
        # after the P2P connections have been created.
        network_thread_start()
        # wait_for_verack ensures that the P2P connection is fully up.
        self.nodes[0].p2p.wait_for_verack()

        # Generating a block on one of the nodes will get us out of IBD
        blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
        self.sync_all([self.nodes[0:1]])

        # Notice above how we called an RPC by calling a method with the same
        # name on the node object. Notice also how we used a keyword argument
        # to specify a named RPC argument. Neither of those are defined on the
        # node object. Instead there's some __getattr__() magic going on under
        # the covers to dispatch unrecognised attribute calls to the RPC
        # interface.

        # Logs are nice. Do plenty of them. They can be used in place of comments for
        # breaking the test into sub-sections.
        self.log.info("Starting test!")

        self.log.info("Calling a custom function")
        custom_function()

        self.log.info("Calling a custom method")
        self.custom_method()

        self.log.info("Create some blocks")
        self.tip = int(self.nodes[0].getbestblockhash(), 16)
        self.block_time = self.nodes[0].getblock(
            self.nodes[0].getbestblockhash())['time'] + 1

        height = 1

        for i in range(10):
            # Use the mininode and blocktools functionality to manually build a block
            # Calling the generate() rpc is easier, but this allows us to exactly
            # control the blocks and transactions.
            block = create_block(self.tip, create_coinbase(height),
                                 self.block_time)
            block.solve()
            block_message = msg_block(block)
            # Send message is used to send a P2P message to the node over our P2PInterface
            self.nodes[0].p2p.send_message(block_message)
            self.tip = block.sha256
            blocks.append(self.tip)
            self.block_time += 1
            height += 1

        self.log.info(
            "Wait for node1 to reach current tip (height 11) using RPC")
        self.nodes[1].waitforblockheight(11)

        self.log.info("Connect node2 and node1")
        connect_nodes(self.nodes[1], 2)

        self.log.info("Add P2P connection to node2")
        # We can't add additional P2P connections once the network thread has started. Disconnect the connection
        # to node0, wait for the network thread to terminate, then connect to node2. This is specific to
        # the current implementation of the network thread and may be improved in future.
        self.nodes[0].disconnect_p2ps()
        network_thread_join()

        self.nodes[2].add_p2p_connection(BaseNode())
        network_thread_start()
        self.nodes[2].p2p.wait_for_verack()

        self.log.info(
            "Wait for node2 reach current tip. Test that it has propagated all the blocks to us"
        )

        getdata_request = msg_getdata()
        for block in blocks:
            getdata_request.inv.append(CInv(2, block))
        self.nodes[2].p2p.send_message(getdata_request)

        # wait_until() will loop until a predicate condition is met. Use it to test properties of the
        # P2PInterface objects.
        wait_until(lambda: sorted(blocks) == sorted(
            list(self.nodes[2].p2p.block_receive_map.keys())),
                   timeout=5,
                   lock=mininode_lock)

        self.log.info("Check that each block was received only once")
        # The network thread uses a global lock on data access to the P2PConnection objects when sending and receiving
        # messages. The test thread should acquire the global lock before accessing any P2PConnection data to avoid locking
        # and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
        with mininode_lock:
            for block in self.nodes[2].p2p.block_receive_map.values():
                assert_equal(block, 1)
Пример #15
0
    def test_compactblock_construction(self, node, test_node, version,
                                       use_witness_address):
        # Generate a bunch of transactions.
        node.generate(101)
        num_transactions = 25
        address = node.getnewaddress()
        if use_witness_address:
            # Want at least one segwit spend, so move all funds to
            # a witness address.
            address = node.addwitnessaddress(address)
            value_to_send = node.getbalance()
            node.sendtoaddress(address,
                               satoshi_round(value_to_send - Decimal(0.1)))
            node.generate(1)

        segwit_tx_generated = False
        for _ in range(num_transactions):
            txid = node.sendtoaddress(address, 0.1)
            hex_tx = node.gettransaction(txid)["hex"]
            tx = from_hex(CTransaction(), hex_tx)
            if not tx.wit.is_null():
                segwit_tx_generated = True

        if use_witness_address:
            assert segwit_tx_generated  # check that our test is not broken

        # Wait until we've seen the block announcement for the resulting tip
        tip = int(node.getbestblockhash(), 16)
        test_node.wait_for_block_announcement(tip)

        # Make sure we will receive a fast-announce compact block
        self.request_cb_announcements(test_node, node, version)

        # Now mine a block, and look at the resulting compact block.
        test_node.clear_block_announcement()
        block_hash = int(node.generate(1)[0], 16)

        # Store the raw block in our internal format.
        block = from_hex(CBlock(), node.getblock("%02x" % block_hash, False))
        for tx in block.vtx:
            tx.calc_x16r()
        block.rehash()

        # Wait until the block was announced (via compact blocks)
        wait_until(test_node.received_block_announcement,
                   timeout=30,
                   lock=mininode_lock,
                   err_msg="test_node.received_block_announcement")

        # Now fetch and check the compact block
        with mininode_lock:
            assert ("cmpctblock" in test_node.last_message)
            # Convert the on-the-wire representation to absolute indexes
            header_and_shortids = HeaderAndShortIDs(
                test_node.last_message["cmpctblock"].header_and_shortids)
        self.check_compactblock_construction_from_block(
            version, header_and_shortids, block_hash, block)

        # Now fetch the compact block using a normal non-announce getdata
        with mininode_lock:
            test_node.clear_block_announcement()
            inv = CInv(4, block_hash)  # 4 == "CompactBlock"
            test_node.send_message(MsgGetdata([inv]))

        wait_until(test_node.received_block_announcement,
                   timeout=30,
                   lock=mininode_lock,
                   err_msg="test_node.received_block_announcement")

        # Now fetch and check the compact block
        with mininode_lock:
            assert ("cmpctblock" in test_node.last_message)
            # Convert the on-the-wire representation to absolute indexes
            header_and_shortids = HeaderAndShortIDs(
                test_node.last_message["cmpctblock"].header_and_shortids)
        self.check_compactblock_construction_from_block(
            version, header_and_shortids, block_hash, block)
Пример #16
0
 def send_block_request(self, block_hash, node):
     msg = msg_getdata()
     msg.inv.append(CInv(2, block_hash))  # 2 == "Block"
     node.send_message(msg)
Пример #17
0
 def cinv_for(self, txid, authDigest=None):
     if authDigest is not None:
         return CInv(5, txid, authDigest)
     else:
         return CInv(1, txid)