예제 #1
0
    def run(self):
        try:
            self.connection.rpc.generate(1) # Leave IBD

            numBlocksToGenerate = [ 8, 16, 128, 1024 ]
            for count in range(len(numBlocksToGenerate)):
                current_invs = []
                for i in range(numBlocksToGenerate[count]):
                    current_invs.append(CInv(2, random.randrange(0, 1<<256)))
                    if len(current_invs) >= 50000:
                        self.connection.send_message(msg_inv(current_invs))
                        current_invs = []
                if len(current_invs) > 0:
                    self.connection.send_message(msg_inv(current_invs))

                # Wait and see how many blocks were requested
                time.sleep(2)

                total_requests = 0
                with mininode_lock:
                    for key in self.blockReqCounts:
                        total_requests += self.blockReqCounts[key]
                        if self.blockReqCounts[key] > 1:
                            raise AssertionError("Error, test failed: block %064x requested more than once" % key)
                if total_requests > MAX_REQUESTS:
                    raise AssertionError("Error, too many blocks (%d) requested" % total_requests)
                print "Round %d: success (total requests: %d)" % (count, total_requests)
        except AssertionError as e:
            print "TEST FAILED: ", e.args

        self.disconnectOkay = True
        self.connection.disconnect_node()
예제 #2
0
    def run(self):
        try:
            self.connection.rpc.generate(1) # Leave IBD

            numBlocksToGenerate = [ 8, 16, 128, 1024 ]
            for count in range(len(numBlocksToGenerate)):
                current_invs = []
                for i in range(numBlocksToGenerate[count]):
                    current_invs.append(CInv(2, random.randrange(0, 1<<256)))
                    if len(current_invs) >= 50000:
                        self.connection.send_message(msg_inv(current_invs))
                        current_invs = []
                if len(current_invs) > 0:
                    self.connection.send_message(msg_inv(current_invs))

                # Wait and see how many blocks were requested
                time.sleep(2)

                total_requests = 0
                with mininode_lock:
                    for key in self.blockReqCounts:
                        total_requests += self.blockReqCounts[key]
                        if self.blockReqCounts[key] > 1:
                            raise AssertionError("Error, test failed: block %064x requested more than once" % key)
                if total_requests > MAX_REQUESTS:
                    raise AssertionError("Error, too many blocks (%d) requested" % total_requests)
                print "Round %d: success (total requests: %d)" % (count, total_requests)
        except AssertionError as e:
            print "TEST FAILED: ", e.args

        self.disconnectOkay = True
        self.connection.disconnect_node()
예제 #3
0
    def run_recvinvqueuefactor_test(self, maxprotocolrecvpayloadlength, recvinvqueuefactor):
        """
        This method sends 4*recvinvqueuefactor inv messages (with max elements allowed by maxprotocolrecvpayloadlength).
        Node should save all this messages and request getdata.
        After sending additional inv message node doesn't request all invs sent to it anymore.
        """
        # start node, protoconf messages, ...
        test_node = self.start_node_with_protoconf(maxprotocolrecvpayloadlength, recvinvqueuefactor)

        maxInvElements = CInv.estimateMaxInvElements(test_node.max_recv_payload_length)

        # Send enough inv messages to fill the queue in bitcoind
        for n in range(0, 4*recvinvqueuefactor):
            test_node.send_message(msg_inv([CInv(CInv.TX, n*maxInvElements+i) for i in range(0, maxInvElements)]))
            test_node.sync_with_ping()
            assert_equal(len(self.nodes[0].listbanned()), 0)
            test_node.wait_for_getdata()

        # check if we have all the inv messages
        assert_equal(sum(test_node.wanted_inv_lengths), maxInvElements*4*recvinvqueuefactor)

        # send additional inv messages
        test_node.send_message(msg_inv([CInv(CInv.TX, 4*recvinvqueuefactor * maxInvElements + i) for i in range(0, maxInvElements)]))
        test_node.sync_with_ping()
        assert_equal(len(self.nodes[0].listbanned()), 0)
        test_node.wait_for_getdata()

        # check that the number of inv messages doesn't match anymore (we annouced more invs that the node can remember and ask for)
        assert_greater_than(maxInvElements*(4*recvinvqueuefactor+1), sum(test_node.wanted_inv_lengths))
        self.stop_node(0)
        logger.info("recvinvqueuefactor test finished successfully\n")
예제 #4
0
    def run_test(self):
        source = self.nodes[0]
        self.setup_stake_coins(source)

        # Exit IBD
        source.generate(1)
        self.sync_all()

        start_time = time.perf_counter()
        real_tx = source.sendtoaddress(self.nodes[1].getnewaddress(), 1)
        imaginary_tx = "424fe97567d21b6ae7f821b6b3af2519478a9f3def1e0bcb6dd0560eacfb0241"

        real_probe = Probe(real_tx)
        imaginary_probe = Probe(imaginary_tx)
        inv_sender = P2PInterface()

        source.add_p2p_connection(real_probe)
        source.add_p2p_connection(imaginary_probe)
        source.add_p2p_connection(inv_sender)
        network_thread_start()

        real_probe.wait_for_verack()
        imaginary_probe.wait_for_verack()
        inv_sender.wait_for_verack()

        # Finally start probing for some time
        while time.perf_counter() < start_time + PROBING_DURATION_SECONDS:
            real_probe.send_get_data()
            imaginary_probe.send_get_data()

            real_probe.send_get_mempool()
            imaginary_probe.send_get_mempool()

            time.sleep(0.5)

        while True:
            try:
                check_during_embargo(real_probe, imaginary_probe)
                break
            except AssertionError:
                if time.perf_counter() - start_time >= TEST_TIMEOUT:
                    raise
                time.sleep(0.5)

        # Force embargo to end
        inv_sender.send_message(msg_inv([CInv(1, int(real_tx, 16))]))

        while True:
            try:
                real_probe.send_get_data()
                check_after_embargo(real_probe)
                break
            except AssertionError:
                if time.perf_counter() - start_time >= TEST_TIMEOUT:
                    raise
                time.sleep(0.5)
예제 #5
0
    def run_ban_test(self, maxprotocolrecvpayloadlength=0):
        """
        This method tests banning our connection when sending inv message with too many elements (violates limit set by
        maxprotocolrecvpayloadlength.
        Setting maxprotocolrecvpayloadlength very high results in long execution times and high memory usage.
        Minimal value for maxprotocolrecvpayloadlength is 1MiB.

        We send 3 protoconf messages:
        -> 1.) max_elements-1   ->  not banned
        -> 2.) max_elements     ->  not banned
        -> 3.) max_elements+1   ->  banned
        """
        # start node, protoconf messages, ...
        test_node = self.start_node_with_protoconf(maxprotocolrecvpayloadlength)

        maxInvElements = CInv.estimateMaxInvElements(test_node.max_recv_payload_length)
        logger.info(
            "Received bitcoind max message size: {} B, which represents {} elements. ".format(test_node.max_recv_payload_length,
                                                                                              maxInvElements))
        ### TEST WITH maxInvElements - 1, maxInvElements and maxInvElements + 1
        # 1. Send bitcoind Inv message that is smaller than max_recv_payload_length.
        logger.info("Sending inv message with: {} elements. Max allowed : {}".format(maxInvElements-1, maxInvElements))
        test_node.send_message(msg_inv([CInv(CInv.TX, i) for i in range(0, maxInvElements - 1)]))
        test_node.sync_with_ping()
        assert_equal(len(self.nodes[0].listbanned()), 0)  # not banned

        # 2. Send bitcoind Inv message that is equal to max_recv_payload_length.
        logger.info("Sending inv message with: {} elements. Max allowed : {}".format(maxInvElements, maxInvElements))
        test_node.send_message(msg_inv([CInv(CInv.TX, maxInvElements+i) for i in range(0, maxInvElements)]))
        test_node.sync_with_ping()
        assert_equal(len(self.nodes[0].listbanned()), 0)  # not banned

        # 3. Send bitcoind Inv message that is larger than max_recv_payload_length.
        logger.info( "Sending inv message with: {} elements. Max allowed : {}".format(maxInvElements + 1, maxInvElements))
        logger.info("Expecting to be banned...")
        test_node.send_message(msg_inv([CInv(CInv.TX, 2*maxInvElements+i) for i in range(0, maxInvElements + 1)]))
        test_node.wait_for_disconnect()
        assert (self.nodes[0].closed)  # disconnected
        assert_equal(len(self.nodes[0].listbanned()), 1)  # banned
        logger.info("Banned nodes : {}".format(self.nodes[0].listbanned()))
        self.nodes[0].setban("127.0.0.1", "remove")  # remove ban
        self.stop_node(0)
        logger.info("ban test finished successfully\n")
예제 #6
0
    def test_compactblock_requests(self, node, test_node, version, segwit):
        # Try announcing a block with an inv or header, expect a compactblock
        # request
        for announce in ["inv", "header"]:
            block = self.build_block_on_tip(node, segwit=segwit)
            with mininode_lock:
                test_node.last_message.pop("getdata", None)

            if announce == "inv":
                test_node.send_message(msg_inv([CInv(2, block.sha256)]))
                wait_until(lambda: "getheaders" in test_node.last_message,
                           timeout=30,
                           lock=mininode_lock)
                test_node.send_header_for_blocks([block])
            else:
                test_node.send_header_for_blocks([block])
            wait_until(lambda: "getdata" in test_node.last_message,
                       timeout=30,
                       lock=mininode_lock)
            assert_equal(len(test_node.last_message["getdata"].inv), 1)
            assert_equal(test_node.last_message["getdata"].inv[0].type, 4)
            assert_equal(test_node.last_message["getdata"].inv[0].hash,
                         block.sha256)

            # Send back a compactblock message that omits the coinbase
            comp_block = HeaderAndShortIDs()
            comp_block.header = CBlockHeader(block)
            comp_block.nonce = 0
            [k0, k1] = comp_block.get_siphash_keys()
            coinbase_hash = block.vtx[0].sha256
            if version == 2:
                coinbase_hash = block.vtx[0].calc_sha256(True)
            comp_block.shortids = [calculate_shortid(k0, k1, coinbase_hash)]
            test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
            assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
            # Expect a getblocktxn message.
            with mininode_lock:
                assert ("getblocktxn" in test_node.last_message)
                absolute_indexes = test_node.last_message[
                    "getblocktxn"].block_txn_request.to_absolute()
            assert_equal(absolute_indexes, [0])  # should be a coinbase request

            # Send the coinbase, and verify that the tip advances.
            if version == 2:
                msg = msg_witness_blocktxn()
            else:
                msg = msg_blocktxn()
            msg.block_transactions.blockhash = block.sha256
            msg.block_transactions.transactions = [block.vtx[0]]
            test_node.send_and_ping(msg)
            assert_equal(int(node.getbestblockhash(), 16), block.sha256)
예제 #7
0
    def run_maxprotocolrecvpayloadlength_test(self, n_of_inv_to_send, maxprotocolrecvpayloadlength=0):
        """
        This method sends INV message with n_of_inv_to_send elements to the node.
        It checks if node respects our settings for size of inv message, sends getdata for all inv elements and respects
        our limits send in protoconf message.
        n_of_inv_to_send should not be bigger than CInv.estimateMaxInvElements(maxprotocolrecvpayloadlength)
        """
        # start node, protoconf messages, ...
        test_node = self.start_node_with_protoconf(maxprotocolrecvpayloadlength)

        # Check if maxprotocolrecvpayloadlength is not set and use default value from bitcoind
        if not maxprotocolrecvpayloadlength:
            maxprotocolrecvpayloadlength = 2 * LEGACY_MAX_PROTOCOL_PAYLOAD_LENGTH

        # Assert received max payload length from bitcoind node to be the same as we set in kwargs
        assert_equal(test_node.max_recv_payload_length, maxprotocolrecvpayloadlength)
        # Calculate maximum number of elements that bitcoind node is willing to receive
        maxInvElements = CInv.estimateMaxInvElements(test_node.max_recv_payload_length)
        logger.info(
            "Received bitcoind max message size: {} B, which represents {} elements. ".format(test_node.max_recv_payload_length, maxInvElements))

        # Calculate our max size for inv message we can accept and how many element it contains.
        # Remote node has to respect our settings.
        expected_inv_len = CInv.estimateMaxInvElements(LEGACY_MAX_PROTOCOL_PAYLOAD_LENGTH)
        logger.info("Our max message size: {} B, which represents {} elements. ".format(
            LEGACY_MAX_PROTOCOL_PAYLOAD_LENGTH, expected_inv_len))

        # Send inv message with specified number of elements
        logger.info("Sending inv message with: {} elements.  Max allowed : {}".format(n_of_inv_to_send, maxInvElements))
        test_node.send_message(msg_inv([CInv(CInv.TX, i) for i in range(0, n_of_inv_to_send)]))
        test_node.sync_with_ping()
        assert_equal(len(self.nodes[0].listbanned()), 0)
        test_node.wait_for_getdata()
        logger.info("Received GetData from bitcoind.")

        # We should receive GetData messages with 1MB size (29126 elements = CInv.estimateMaxInvElements(LEGACY_MAX_PROTOCOL_PAYLOAD_LENGTH))
        # and last GetData message with remaining elements.
        max_elements_recieved_per_message = CInv.estimateMaxInvElements(LEGACY_MAX_PROTOCOL_PAYLOAD_LENGTH)
        number_of_getdata_messages = ceil(n_of_inv_to_send / max_elements_recieved_per_message)
        # number of received messages should be the same as we calculated above
        assert_equal(len(test_node.wanted_inv_lengths), number_of_getdata_messages)
        for i in range(0, number_of_getdata_messages - 1):
            assert_equal(test_node.wanted_inv_lengths[i], expected_inv_len)
        remained_for_last_getdata = n_of_inv_to_send - (number_of_getdata_messages - 1) * expected_inv_len
        # last message should contain the exact number of inv elements left
        assert_equal(test_node.wanted_inv_lengths[number_of_getdata_messages - 1], remained_for_last_getdata)
        self.stop_node(0)
        logger.info("maxprotocolrecvpayloadlength test finished successfully\n")
예제 #8
0
 def send_block_inv(self, blockhash):
     msg = msg_inv()
     msg.inv = [CInv(2, blockhash)]
     self.send_message(msg)
예제 #9
0
 def send_block_inv(self, blockhash):
     msg = msg_inv()
     msg.inv = [CInv(2, blockhash)]
     self.send_message(msg)
예제 #10
0
    def run_test(self):
        @contextlib.contextmanager
        def run_connection(test_node, title):
            logger.debug("setup %s", title)
            connections = []
            connections.append(
                mininode.NodeConn('127.0.0.1', p2p_port(0), self.nodes[0],
                                  test_node))

            test_node.add_connection(connections[0])
            thr = mininode.NetworkThread()
            thr.start()  # Start up network handling in another thread

            logger.debug("before %s", title)
            yield
            logger.debug("after %s", title)

            connections[0].close()
            del connections
            thr.join()

            logger.debug("finished %s", title)

        ELEMENTS_PER_1MiB = 29126
        ELEMENTS_PER_2MiB = 58254

        # 1. test
        # Send protoconf with 0 fields. Bitcoind should disconnect the node, since minimum number of fields is 1
        test_node = mininode.NodeConnCB()

        def send_protoconf(conn):
            conn.send_message(
                mininode.msg_protoconf(CProtoconfWithZeroFields()))

        test_node.send_protoconf = send_protoconf

        with run_connection(test_node, "0 fields"):
            test_node.wait_for_verack()
            test_node.wait_for_disconnect()
            assert (self.nodes[0].closed)
            assert_equal(len(self.nodes[0].listbanned()), 0)

        # 2. test
        # Send protoconf with 1B of max_recv_payload_length. Node should be disconnected, since minimum message size is 1MiB
        test_node = mininode.NodeConnCB()

        def send_protoconf_1B(conn):
            conn.send_message(mininode.msg_protoconf(mininode.CProtoconf(1,
                                                                         1)))

        test_node.send_protoconf = send_protoconf_1B

        with run_connection(test_node, "too small protoconf"):
            test_node.wait_for_verack()
            test_node.wait_for_disconnect()
            assert (self.nodes[0].closed)
            assert_equal(len(self.nodes[0].listbanned()), 0)

        # 3. test
        # Send protoconf with numberOfFields=2. max_recv_payload_length should be parsed correctly.
        test_node = mininode.NodeConnCB()

        def send_protoconf_2Fields(conn):
            conn.send_message(
                mininode.msg_protoconf(
                    CProtoconfWithNewField(2,
                                           MESSAGE_LENGTH_1MiB_PLUS_1_ELEMENT,
                                           5)))

        test_node.send_protoconf = send_protoconf_2Fields

        wanted_inv_lengths = []

        def on_getdata(conn, message):
            wanted_inv_lengths.append(len(message.inv))

        test_node.on_getdata = on_getdata

        # Set MESSAGE_LENGTH_1MiB_PLUS_1_ELEMENT to one that is slightly larger than 1MiB
        # 1MiB -- 29126 elements --> work with 29127 elements
        # In that way it is sure that bitcoind does not just take default (1MiB value).
        MESSAGE_LENGTH_1MiB_PLUS_1_ELEMENT = 1 * 1024 * 1024 + 4 + 32
        with run_connection(test_node, "2 fields"):
            expected_inv_len = mininode.CInv.estimateMaxInvElements(
                MESSAGE_LENGTH_1MiB_PLUS_1_ELEMENT)  #29127 elements
            assert_equal(expected_inv_len, ELEMENTS_PER_1MiB + 1)
            logger.info(
                "Our max message size: {} B, which represents {} elements. ".
                format(MESSAGE_LENGTH_1MiB_PLUS_1_ELEMENT, expected_inv_len))

            # 3.0. Prepare initial block. Needed so that GETDATA can be send back.
            self.nodes[0].generate(1)

            # 3.1. Receive bitcoind's protoconf (currently 2MiB) and send it Inv message
            test_node.wait_for_protoconf()
            max_recv_payload_length = test_node.last_message[
                "protoconf"].protoconf.max_recv_payload_length
            assert_equal(max_recv_payload_length,
                         mininode.MAX_PROTOCOL_RECV_PAYLOAD_LENGTH)
            maxInvElements = mininode.CInv.estimateMaxInvElements(
                max_recv_payload_length)
            logger.info(
                "Received bitcoind max message size: {} B, which represents {} elements. "
                .format(max_recv_payload_length, maxInvElements))

            # 3.2. Send bitcoind Inv message (should be 2MiB)
            test_node.send_message(
                mininode.msg_inv([
                    mininode.CInv(mininode.CInv.TX, i)
                    for i in range(0, maxInvElements)
                ]))

            # 3.3. Receive GetData.
            test_node.wait_for_getdata()
            test_node.sync_with_ping()

            # 3.4. We should receive 2 GetData messages with (1 * 1024 * 1024 + 4 + 32)B size (29127 elements).
            assert_equal(wanted_inv_lengths[0], expected_inv_len)
            assert_equal(wanted_inv_lengths[1], expected_inv_len)
            assert_equal(len(wanted_inv_lengths), 2)

        ########
        # 4.test
        # Send protoconf that is LEGACY_MAX_PROTOCOL_PAYLOAD_LENGTH of size
        test_node = mininode.NodeConnCB()

        def send_largest_protoconf(conn):
            # send protoconf of size LEGACY_MAX_PROTOCOL_PAYLOAD_LENGTH
            conn.send_message(
                msg_protoconf_largest(
                    mininode.LEGACY_MAX_PROTOCOL_PAYLOAD_LENGTH))

        test_node.send_protoconf = send_largest_protoconf

        with run_connection(test_node, "largest protoconf"):
            test_node.wait_for_verack()
            test_node.sync_with_ping()
            assert_equal(len(self.nodes[0].listbanned()), 0)

        # 5.test
        # Send protoconf that is larger that LEGACY_MAX_PROTOCOL_PAYLOAD_LENGTH
        test_node = mininode.NodeConnCB()

        def send_oversized_protoconf(conn):
            # send protoconf of size LEGACY_MAX_PROTOCOL_PAYLOAD_LENGTH + 1
            conn.send_message(
                msg_protoconf_largest(
                    mininode.LEGACY_MAX_PROTOCOL_PAYLOAD_LENGTH + 1))

        test_node.send_protoconf = send_oversized_protoconf

        with run_connection(test_node, "oversized protoconf"):
            test_node.wait_for_verack()
            test_node.wait_for_disconnect()
            assert (self.nodes[0].closed)
            assert_equal(len(self.nodes[0].listbanned()), 1)
예제 #11
0
    def run_test(self):
        # Setup the p2p connections and start up the network thread.
        test_node = TestNode()  # connects to node0 (not whitelisted)
        white_node = TestNode()  # connects to node1 (whitelisted)

        connections = []
        connections.append(
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
        connections.append(
            NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
        test_node.add_connection(connections[0])
        white_node.add_connection(connections[1])

        NetworkThread().start()  # Start up network handling in another thread

        # Test logic begins here
        test_node.wait_for_verack()
        white_node.wait_for_verack()

        # 1. Have both nodes mine a block (leave IBD)
        [n.generate(1) for n in self.nodes]
        tips = [int("0x" + n.getbestblockhash(), 0) for n in self.nodes]

        # 2. Send one block that builds on each tip.
        # This should be accepted.
        blocks_h2 = []  # the height 2 blocks on each node's chain
        block_time = int(time.time()) + 1
        for i in range(2):
            blocks_h2.append(
                create_block(tips[i], create_coinbase(2), block_time))
            blocks_h2[i].solve()
            block_time += 1
        test_node.send_message(msg_block(blocks_h2[0]))
        white_node.send_message(msg_block(blocks_h2[1]))

        [x.sync_with_ping() for x in [test_node, white_node]]
        assert_equal(self.nodes[0].getblockcount(), 2)
        assert_equal(self.nodes[1].getblockcount(), 2)
        print("First height 2 block accepted by both nodes")

        # 3. Send another block that builds on the original tip.
        blocks_h2f = []  # Blocks at height 2 that fork off the main chain
        for i in range(2):
            blocks_h2f.append(
                create_block(tips[i], create_coinbase(2),
                             blocks_h2[i].nTime + 1))
            blocks_h2f[i].solve()
        test_node.send_message(msg_block(blocks_h2f[0]))
        white_node.send_message(msg_block(blocks_h2f[1]))

        [x.sync_with_ping() for x in [test_node, white_node]]
        for x in self.nodes[0].getchaintips():
            if x['hash'] == blocks_h2f[0].hash:
                assert_equal(x['status'], "headers-only")

        for x in self.nodes[1].getchaintips():
            if x['hash'] == blocks_h2f[1].hash:
                assert_equal(x['status'], "valid-headers")

        print("Second height 2 block accepted only from whitelisted peer")

        # 4. Now send another block that builds on the forking chain.
        blocks_h3 = []
        for i in range(2):
            blocks_h3.append(
                create_block(blocks_h2f[i].sha256, create_coinbase(3),
                             blocks_h2f[i].nTime + 1))
            blocks_h3[i].solve()
        test_node.send_message(msg_block(blocks_h3[0]))
        white_node.send_message(msg_block(blocks_h3[1]))

        [x.sync_with_ping() for x in [test_node, white_node]]
        # Since the earlier block was not processed by node0, the new block
        # can't be fully validated.
        for x in self.nodes[0].getchaintips():
            if x['hash'] == blocks_h3[0].hash:
                assert_equal(x['status'], "headers-only")

        # But this block should be accepted by node0 since it has more work.
        try:
            self.nodes[0].getblock(blocks_h3[0].hash)
            print(
                "Unrequested more-work block accepted from non-whitelisted peer"
            )
        except:
            raise AssertionError(
                "Unrequested more work block was not processed")

        # Node1 should have accepted and reorged.
        assert_equal(self.nodes[1].getblockcount(), 3)
        print("Successfully reorged to length 3 chain from whitelisted peer")

        # 4b. Now mine 288 more blocks and deliver; all should be processed but
        # the last (height-too-high) on node0.  Node1 should process the tip if
        # we give it the headers chain leading to the tip.
        tips = blocks_h3
        headers_message = msg_headers()
        all_blocks = []  # node0's blocks
        for j in range(2):
            for i in range(288):
                next_block = create_block(tips[j].sha256,
                                          create_coinbase(i + 4),
                                          tips[j].nTime + 1)
                next_block.solve()
                if j == 0:
                    test_node.send_message(msg_block(next_block))
                    all_blocks.append(next_block)
                else:
                    headers_message.headers.append(CBlockHeader(next_block))
                tips[j] = next_block

        time.sleep(2)
        for x in all_blocks:
            try:
                self.nodes[0].getblock(x.hash)
                if x == all_blocks[287]:
                    raise AssertionError(
                        "Unrequested block too far-ahead should have been ignored"
                    )
            except:
                if x == all_blocks[287]:
                    print("Unrequested block too far-ahead not processed")
                else:
                    raise AssertionError(
                        "Unrequested block with more work should have been accepted"
                    )

        headers_message.headers.pop()  # Ensure the last block is unrequested
        white_node.send_message(headers_message)  # Send headers leading to tip
        white_node.send_message(msg_block(tips[1]))  # Now deliver the tip
        try:
            white_node.sync_with_ping()
            self.nodes[1].getblock(tips[1].hash)
            print(
                "Unrequested block far ahead of tip accepted from whitelisted peer"
            )
        except:
            raise AssertionError(
                "Unrequested block from whitelisted peer not accepted")

        # 5. Test handling of unrequested block on the node that didn't process
        # Should still not be processed (even though it has a child that has more
        # work).
        test_node.send_message(msg_block(blocks_h2f[0]))

        # Here, if the sleep is too short, the test could falsely succeed (if the
        # node hasn't processed the block by the time the sleep returns, and then
        # the node processes it and incorrectly advances the tip).
        # But this would be caught later on, when we verify that an inv triggers
        # a getdata request for this block.
        test_node.sync_with_ping()
        assert_equal(self.nodes[0].getblockcount(), 2)
        print(
            "Unrequested block that would complete more-work chain was ignored"
        )

        # 6. Try to get node to request the missing block.
        # Poke the node with an inv for block at height 3 and see if that
        # triggers a getdata on block 2 (it should if block 2 is missing).
        with mininode_lock:
            # Clear state so we can check the getdata request
            test_node.last_getdata = None
            test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))

        test_node.sync_with_ping()
        with mininode_lock:
            getdata = test_node.last_getdata

        # Check that the getdata includes the right block
        assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
        print("Inv at tip triggered getdata for unprocessed block")

        # 7. Send the missing block for the third time (now it is requested)
        test_node.send_message(msg_block(blocks_h2f[0]))

        test_node.sync_with_ping()
        assert_equal(self.nodes[0].getblockcount(), 290)
        print("Successfully reorged to longer chain from non-whitelisted peer")

        [c.disconnect_node() for c in connections]
예제 #12
0
    def run_test(self):
        # Setup the p2p connections and start up the network thread.
        test_node = TestNode()   # connects to node0 (not whitelisted)
        white_node = TestNode()  # connects to node1 (whitelisted)

        connections = []
        connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
        connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
        test_node.add_connection(connections[0])
        white_node.add_connection(connections[1])

        NetworkThread().start() # Start up network handling in another thread

        # Test logic begins here
        test_node.wait_for_verack()
        white_node.wait_for_verack()

        # 1. Have both nodes mine a block (leave IBD)
        [ n.generate(1) for n in self.nodes ]
        tips = [ int ("0x" + n.getbestblockhash() + "L", 0) for n in self.nodes ]

        # 2. Send one block that builds on each tip.
        # This should be accepted.
        blocks_h2 = []  # the height 2 blocks on each node's chain
        block_time = time.time() + 1
        for i in xrange(2):
            blocks_h2.append(create_block(tips[i], create_coinbase(), block_time))
            blocks_h2[i].solve()
            block_time += 1
        test_node.send_message(msg_block(blocks_h2[0]))
        white_node.send_message(msg_block(blocks_h2[1]))

        [ x.sync_with_ping() for x in [test_node, white_node] ]
        assert_equal(self.nodes[0].getblockcount(), 2)
        assert_equal(self.nodes[1].getblockcount(), 2)
        print "First height 2 block accepted by both nodes"

        # 3. Send another block that builds on the original tip.
        blocks_h2f = []  # Blocks at height 2 that fork off the main chain
        for i in xrange(2):
            blocks_h2f.append(create_block(tips[i], create_coinbase(), blocks_h2[i].nTime+1))
            blocks_h2f[i].solve()
        test_node.send_message(msg_block(blocks_h2f[0]))
        white_node.send_message(msg_block(blocks_h2f[1]))

        [ x.sync_with_ping() for x in [test_node, white_node] ]
        for x in self.nodes[0].getchaintips():
            if x['hash'] == blocks_h2f[0].hash:
                assert_equal(x['status'], "headers-only")

        for x in self.nodes[1].getchaintips():
            if x['hash'] == blocks_h2f[1].hash:
                assert_equal(x['status'], "valid-headers")

        print "Second height 2 block accepted only from whitelisted peer"

        # 4. Now send another block that builds on the forking chain.
        blocks_h3 = []
        for i in xrange(2):
            blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(), blocks_h2f[i].nTime+1))
            blocks_h3[i].solve()
        test_node.send_message(msg_block(blocks_h3[0]))
        white_node.send_message(msg_block(blocks_h3[1]))

        [ x.sync_with_ping() for x in [test_node, white_node] ]
        # Since the earlier block was not processed by node0, the new block
        # can't be fully validated.
        for x in self.nodes[0].getchaintips():
            if x['hash'] == blocks_h3[0].hash:
                assert_equal(x['status'], "headers-only")

        # But this block should be accepted by node0 since it has more work.
        try:
            self.nodes[0].getblock(blocks_h3[0].hash)
            print "Unrequested more-work block accepted from non-whitelisted peer"
        except:
            raise AssertionError("Unrequested more work block was not processed")

        # Node1 should have accepted and reorged.
        assert_equal(self.nodes[1].getblockcount(), 3)
        print "Successfully reorged to length 3 chain from whitelisted peer"

        # 4b. Now mine 288 more blocks and deliver; all should be processed but
        # the last (height-too-high) on node0.  Node1 should process the tip if
        # we give it the headers chain leading to the tip.
        tips = blocks_h3
        headers_message = msg_headers()
        all_blocks = []   # node0's blocks
        for j in xrange(2):
            for i in xrange(288):
                next_block = create_block(tips[j].sha256, create_coinbase(), tips[j].nTime+1)
                next_block.solve()
                if j==0:
                    test_node.send_message(msg_block(next_block))
                    all_blocks.append(next_block)
                else:
                    headers_message.headers.append(CBlockHeader(next_block))
                tips[j] = next_block

        time.sleep(2)
        for x in all_blocks:
            try:
                self.nodes[0].getblock(x.hash)
                if x == all_blocks[287]:
                    raise AssertionError("Unrequested block too far-ahead should have been ignored")
            except:
                if x == all_blocks[287]:
                    print "Unrequested block too far-ahead not processed"
                else:
                    raise AssertionError("Unrequested block with more work should have been accepted")

        headers_message.headers.pop() # Ensure the last block is unrequested
        white_node.send_message(headers_message) # Send headers leading to tip
        white_node.send_message(msg_block(tips[1]))  # Now deliver the tip
        try:
            white_node.sync_with_ping()
            self.nodes[1].getblock(tips[1].hash)
            print "Unrequested block far ahead of tip accepted from whitelisted peer"
        except:
            raise AssertionError("Unrequested block from whitelisted peer not accepted")

        # 5. Test handling of unrequested block on the node that didn't process
        # Should still not be processed (even though it has a child that has more
        # work).
        test_node.send_message(msg_block(blocks_h2f[0]))

        # Here, if the sleep is too short, the test could falsely succeed (if the
        # node hasn't processed the block by the time the sleep returns, and then
        # the node processes it and incorrectly advances the tip).
        # But this would be caught later on, when we verify that an inv triggers
        # a getdata request for this block.
        test_node.sync_with_ping()
        assert_equal(self.nodes[0].getblockcount(), 2)
        print "Unrequested block that would complete more-work chain was ignored"

        # 6. Try to get node to request the missing block.
        # Poke the node with an inv for block at height 3 and see if that
        # triggers a getdata on block 2 (it should if block 2 is missing).
        with mininode_lock:
            # Clear state so we can check the getdata request
            test_node.last_getdata = None
            test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))

        test_node.sync_with_ping()
        with mininode_lock:
            getdata = test_node.last_getdata

        # Check that the getdata includes the right block
        assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
        print "Inv at tip triggered getdata for unprocessed block"

        # 7. Send the missing block for the third time (now it is requested)
        test_node.send_message(msg_block(blocks_h2f[0]))

        test_node.sync_with_ping()
        assert_equal(self.nodes[0].getblockcount(), 290)
        print "Successfully reorged to longer chain from non-whitelisted peer"

        [ c.disconnect_node() for c in connections ]