def test_compactblocks_not_at_tip(self, test_node):
        node = self.nodes[0]
        # Test that requesting old compactblocks doesn't work.
        MAX_CMPCTBLOCK_DEPTH = 5
        new_blocks = []
        for i in range(MAX_CMPCTBLOCK_DEPTH + 1):
            test_node.clear_block_announcement()
            new_blocks.append(node.generate(1)[0])
            wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)

        test_node.clear_block_announcement()
        test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
        wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock)

        test_node.clear_block_announcement()
        node.generate(1)
        wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
        test_node.clear_block_announcement()
        with mininode_lock:
            test_node.last_message.pop("block", None)
        test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
        wait_until(lambda: "block" in test_node.last_message, timeout=30, lock=mininode_lock)
        with mininode_lock:
            test_node.last_message["block"].block.calc_sha256()
            assert_equal(test_node.last_message["block"].block.sha256, int(new_blocks[0], 16))

        # Generate an old compactblock, and verify that it's not accepted.
        cur_height = node.getblockcount()
        hashPrevBlock = int(node.getblockhash(cur_height - 5), 16)
        block = self.build_block_on_tip(node)
        block.hashPrevBlock = hashPrevBlock
        block.solve()

        comp_block = HeaderAndShortIDs()
        comp_block.initialize_from_block(block)
        test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))

        tips = node.getchaintips()
        found = False
        for x in tips:
            if x["hash"] == block.hash:
                assert_equal(x["status"], "headers-only")
                found = True
                break
        assert found

        # Requesting this block via getblocktxn should silently fail
        # (to avoid fingerprinting attacks).
        msg = msg_getblocktxn()
        msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
        with mininode_lock:
            test_node.last_message.pop("blocktxn", None)
        test_node.send_and_ping(msg)
        with mininode_lock:
            assert "blocktxn" not in test_node.last_message
Exemple #2
0
    def run_test(self):
        gen_node = self.nodes[0]  # The block and tx generating node
        gen_node.generate(1)

        inbound_peer = self.nodes[0].add_p2p_connection(P2PNode())  # An "attacking" inbound peer

        MAX_REPEATS = 100
        self.log.info("Running test up to {} times.".format(MAX_REPEATS))
        for i in range(MAX_REPEATS):
            self.log.info('Run repeat {}'.format(i + 1))
            txid = gen_node.sendtoaddress(gen_node.getnewaddress(), 0.01)

            want_tx = msg_getdata()
            want_tx.inv.append(CInv(t=1, h=int(txid, 16)))
            inbound_peer.last_message.pop('notfound', None)
            inbound_peer.send_message(want_tx)
            inbound_peer.sync_with_ping()

            if inbound_peer.last_message.get('notfound'):
                self.log.debug('tx {} was not yet announced to us.'.format(txid))
                self.log.debug("node has responded with a notfound message. End test.")
                assert_equal(inbound_peer.last_message['notfound'].vec[0].hash, int(txid, 16))
                inbound_peer.last_message.pop('notfound')
                break
            else:
                self.log.debug('tx {} was already announced to us. Try test again.'.format(txid))
                assert int(txid, 16) in [inv.hash for inv in inbound_peer.last_message['inv'].inv]
Exemple #3
0
 def on_inv(self, message):
     want = msg_getdata()
     for i in message.inv:
         if i.type != 0:
             want.inv.append(i)
     if len(want.inv):
         self.send_message(want)
Exemple #4
0
 def test_oversized_getdata_msg(self):
     size = MAX_INV_SIZE + 1
     self.test_oversized_msg(msg_getdata([CInv(MSG_TX, 1)] * size), size)
Exemple #5
0
    def run_test(self):
        node = self.nodes[0]

        # duplicate the deterministic sig test from src/test/key_tests.cpp
        privkey = ECKey()
        privkey.set(
            bytes.fromhex(
                "12b004fff7f4b69ef8650e767f18f11ede158148b425660723b9f9a66e61f747"
            ), True)
        wif_privkey = bytes_to_wif(privkey.get_bytes())

        self.log.info(
            "Check the node is signalling the avalanche service bit only if there is a proof."
        )
        assert_equal(
            int(node.getnetworkinfo()['localservices'], 16) & NODE_AVALANCHE,
            0)

        # Create stakes by mining blocks
        addrkey0 = node.get_deterministic_priv_key()
        blockhashes = node.generatetoaddress(2, addrkey0.address)
        stakes = create_coinbase_stakes(node, [blockhashes[0]], addrkey0.key)

        proof_sequence = 11
        proof_expiration = 12
        proof = node.buildavalancheproof(proof_sequence, proof_expiration,
                                         wif_privkey, stakes)

        # Restart the node
        self.restart_node(
            0, self.extra_args[0] + [
                "-avaproof={}".format(proof),
                "-avamasterkey=cND2ZvtabDbJ1gucx9GWH6XT9kgTAqfb6cotPt5Q5CyxVDhid2EN",
            ])

        assert_equal(
            int(node.getnetworkinfo()['localservices'], 16) & NODE_AVALANCHE,
            NODE_AVALANCHE)

        def check_avahello(args):
            # Restart the node with the given args
            self.restart_node(0, self.extra_args[0] + args)

            peer = get_ava_p2p_interface(node)

            avahello = peer.wait_for_avahello().hello

            avakey = ECPubKey()
            avakey.set(bytes.fromhex(node.getavalanchekey()))
            assert avakey.verify_schnorr(avahello.sig,
                                         avahello.get_sighash(peer))

        self.log.info(
            "Test the avahello signature with a generated delegation")
        check_avahello([
            "-avaproof={}".format(proof),
            "-avamasterkey=cND2ZvtabDbJ1gucx9GWH6XT9kgTAqfb6cotPt5Q5CyxVDhid2EN"
        ])

        master_key = ECKey()
        master_key.generate()
        limited_id = FromHex(LegacyAvalancheProof(), proof).limited_proofid
        delegation = node.delegateavalancheproof(
            f"{limited_id:0{64}x}",
            bytes_to_wif(privkey.get_bytes()),
            master_key.get_pubkey().get_bytes().hex(),
        )

        self.log.info("Test the avahello signature with a supplied delegation")
        check_avahello([
            "-avaproof={}".format(proof),
            "-avadelegation={}".format(delegation),
            "-avamasterkey={}".format(bytes_to_wif(master_key.get_bytes())),
        ])

        stakes = create_coinbase_stakes(node, [blockhashes[1]], addrkey0.key)
        interface_proof_hex = node.buildavalancheproof(proof_sequence,
                                                       proof_expiration,
                                                       wif_privkey, stakes)
        limited_id = FromHex(LegacyAvalancheProof(),
                             interface_proof_hex).limited_proofid

        # delegate
        delegated_key = ECKey()
        delegated_key.generate()
        interface_delegation_hex = node.delegateavalancheproof(
            f"{limited_id:0{64}x}", bytes_to_wif(privkey.get_bytes()),
            delegated_key.get_pubkey().get_bytes().hex(), None)

        self.log.info("Test that wrong avahello signature causes a ban")
        bad_interface = get_ava_p2p_interface(node)
        wrong_key = ECKey()
        wrong_key.generate()
        with node.assert_debug_log([
                "Misbehaving",
                "peer=1 (0 -> 100) BAN THRESHOLD EXCEEDED: invalid-avahello-signature"
        ]):
            bad_interface.send_avahello(interface_delegation_hex, wrong_key)
            bad_interface.wait_for_disconnect()

        self.log.info(
            'Check that receiving a valid avahello triggers a proof getdata request'
        )
        good_interface = get_ava_p2p_interface(node)
        proofid = good_interface.send_avahello(interface_delegation_hex,
                                               delegated_key)

        def getdata_found(peer, proofid):
            with p2p_lock:
                return good_interface.last_message.get(
                    "getdata") and good_interface.last_message["getdata"].inv[
                        -1].hash == proofid

        self.wait_until(lambda: getdata_found(good_interface, proofid))

        self.log.info('Check that we can download the proof from our peer')

        node_proofid = FromHex(LegacyAvalancheProof(), proof).proofid

        def wait_for_proof_validation():
            # Connect some blocks to trigger the proof verification
            node.generate(1)
            self.wait_until(lambda: node_proofid in get_proof_ids(node))

        wait_for_proof_validation()

        getdata = msg_getdata([CInv(MSG_AVA_PROOF, node_proofid)])

        self.log.info(
            "Proof has been inv'ed recently, check it can be requested")
        good_interface.send_message(getdata)

        def proof_received(peer):
            with p2p_lock:
                return peer.last_message.get("avaproof") and peer.last_message[
                    "avaproof"].proof.proofid == node_proofid

        self.wait_until(lambda: proof_received(good_interface))

        # Restart the node
        self.restart_node(
            0, self.extra_args[0] + [
                "-avaproof={}".format(proof),
                "-avamasterkey=cND2ZvtabDbJ1gucx9GWH6XT9kgTAqfb6cotPt5Q5CyxVDhid2EN",
            ])
        wait_for_proof_validation()

        self.log.info(
            "The proof has not been announced, it cannot be requested")
        peer = get_ava_p2p_interface(node, services=NODE_NETWORK)
        peer.send_message(getdata)

        # Give enough time for the node to answer. Since we cannot check for a
        # non-event this is the best we can do
        time.sleep(2)
        assert not proof_received(peer)

        self.log.info("The proof is known for long enough to be requested")
        current_time = int(time.time())
        node.setmocktime(current_time + UNCONDITIONAL_RELAY_DELAY)

        peer.send_message(getdata)
        self.wait_until(lambda: proof_received(peer))

        # Restart the node
        self.restart_node(
            0, self.extra_args[0] + [
                "-avaproof={}".format(proof),
                "-avamasterkey=cND2ZvtabDbJ1gucx9GWH6XT9kgTAqfb6cotPt5Q5CyxVDhid2EN",
            ])
        wait_for_proof_validation()
        # The only peer is the node itself
        assert_equal(len(node.getavalanchepeerinfo()), 1)
        assert_equal(node.getavalanchepeerinfo()[0]["proof"], proof)

        peer = get_ava_p2p_interface(node)
        peer_proofid = peer.send_avahello(interface_delegation_hex,
                                          delegated_key)

        self.wait_until(lambda: getdata_found(peer, peer_proofid))
        assert peer_proofid not in get_proof_ids(node)

        self.log.info(
            "Check that the peer gets added as an avalanche node as soon as the node knows about the proof"
        )
        node.sendavalancheproof(interface_proof_hex)

        def has_node_count(count):
            peerinfo = node.getavalanchepeerinfo()
            return (len(peerinfo) == 2
                    and peerinfo[-1]["proof"] == interface_proof_hex
                    and peerinfo[-1]["nodecount"] == count)

        self.wait_until(lambda: has_node_count(1))

        self.log.info(
            "Check that the peer gets added immediately if the proof is already known"
        )

        # Connect another peer using the same proof
        peer_proof_known = get_ava_p2p_interface(node)
        peer_proof_known.send_avahello(interface_delegation_hex, delegated_key)

        self.wait_until(lambda: has_node_count(2))

        self.log.info("Invalidate the proof and check the nodes are removed")
        tip = node.getbestblockhash()
        # Invalidate the block with the proof utxo
        node.invalidateblock(blockhashes[1])
        # Change the address to make sure we don't generate a block identical
        # to the one we just invalidated. Can be generate(1) after D9694 or
        # D9697 is landed.
        forked_tip = node.generatetoaddress(1, ADDRESS_ECREG_UNSPENDABLE)[0]
        self.wait_until(lambda: node.getbestblockhash() == forked_tip)

        self.wait_until(lambda: len(node.getavalanchepeerinfo()) == 1)
        assert peer_proofid not in get_proof_ids(node)

        self.log.info("Reorg back and check the nodes are added back")
        node.invalidateblock(forked_tip)
        node.reconsiderblock(tip)
        self.wait_until(lambda: has_node_count(2), timeout=2)
    def test_compactblock_construction(self, node, test_node, version, use_witness_address):
        # Generate a bunch of transactions.
        node.generate(101)
        num_transactions = 25
        address = node.getnewaddress()
        if use_witness_address:
            # Want at least one segwit spend, so move all funds to
            # a witness address.
            address = node.addwitnessaddress(address)
            value_to_send = node.getbalance()
            node.sendtoaddress(address, satoshi_round(value_to_send-Decimal(0.1)))
            node.generate(1)

        segwit_tx_generated = False
        for i in range(num_transactions):
            txid = node.sendtoaddress(address, 0.1)
            hex_tx = node.gettransaction(txid)["hex"]
            tx = FromHex(CTransaction(), hex_tx)
            if not tx.wit.is_null():
                segwit_tx_generated = True

        if use_witness_address:
            assert(segwit_tx_generated) # check that our test is not broken

        # Wait until we've seen the block announcement for the resulting tip
        tip = int(node.getbestblockhash(), 16)
        test_node.wait_for_block_announcement(tip)

        # Make sure we will receive a fast-announce compact block
        self.request_cb_announcements(test_node, node, version)

        # Now mine a block, and look at the resulting compact block.
        test_node.clear_block_announcement()
        block_hash = int(node.generate(1)[0], 16)

        # Store the raw block in our internal format.
        block = FromHex(CBlock(), node.getblock("%02x" % block_hash, False))
        for tx in block.vtx:
            tx.calc_sha256()
        block.rehash()

        # Wait until the block was announced (via compact blocks)
        wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)

        # Now fetch and check the compact block
        header_and_shortids = None
        with mininode_lock:
            assert("cmpctblock" in test_node.last_message)
            # Convert the on-the-wire representation to absolute indexes
            header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
        self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)

        # Now fetch the compact block using a normal non-announce getdata
        with mininode_lock:
            test_node.clear_block_announcement()
            inv = CInv(4, block_hash)  # 4 == "CompactBlock"
            test_node.send_message(msg_getdata([inv]))

        wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)

        # Now fetch and check the compact block
        header_and_shortids = None
        with mininode_lock:
            assert("cmpctblock" in test_node.last_message)
            # Convert the on-the-wire representation to absolute indexes
            header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
        self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
Exemple #7
0
 def send_getdata_for_block(self, blockhash):
     getdata_request = msg_getdata()
     getdata_request.inv.append(CInv(2, int(blockhash, 16)))
     self.send_message(getdata_request)
Exemple #8
0
    def test_compactblock_construction(self, node, test_node, version,
                                       use_witness_address):
        # Generate a bunch of transactions.
        node.generate(101)
        num_transactions = 25
        address = node.getnewaddress()
        if use_witness_address:
            # Want at least one segwit spend, so move all funds to
            # a witness address.
            address = node.getnewaddress(address_type='bech32')
            value_to_send = node.getbalance()
            node.sendtoaddress(address,
                               satoshi_round(value_to_send - Decimal(0.1)))
            node.generate(1)

        segwit_tx_generated = False
        for i in range(num_transactions):
            txid = node.sendtoaddress(address, 0.1)
            hex_tx = node.gettransaction(txid)["hex"]
            tx = FromHex(CTransaction(), hex_tx)
            if not tx.wit.is_null():
                segwit_tx_generated = True

        if use_witness_address:
            assert segwit_tx_generated  # check that our test is not broken

        # Wait until we've seen the block announcement for the resulting tip
        tip = int(node.getbestblockhash(), 16)
        test_node.wait_for_block_announcement(tip)

        # Make sure we will receive a fast-announce compact block
        self.request_cb_announcements(test_node, node, version)

        # Now mine a block, and look at the resulting compact block.
        test_node.clear_block_announcement()
        block_hash = int(node.generate(1)[0], 16)

        # Store the raw block in our internal format.
        block = FromHex(CBlock(), node.getblock("%064x" % block_hash, False))
        for tx in block.vtx:
            tx.calc_sha256()
        block.rehash()

        # Wait until the block was announced (via compact blocks)
        wait_until(test_node.received_block_announcement,
                   timeout=30,
                   lock=mininode_lock)

        # Now fetch and check the compact block
        header_and_shortids = None
        with mininode_lock:
            assert ("cmpctblock" in test_node.last_message)
            # Convert the on-the-wire representation to absolute indexes
            header_and_shortids = HeaderAndShortIDs(
                test_node.last_message["cmpctblock"].header_and_shortids)
        self.check_compactblock_construction_from_block(
            version, header_and_shortids, block_hash, block)

        # Now fetch the compact block using a normal non-announce getdata
        with mininode_lock:
            test_node.clear_block_announcement()
            inv = CInv(4, block_hash)  # 4 == "CompactBlock"
            test_node.send_message(msg_getdata([inv]))

        wait_until(test_node.received_block_announcement,
                   timeout=30,
                   lock=mininode_lock)

        # Now fetch and check the compact block
        header_and_shortids = None
        with mininode_lock:
            assert ("cmpctblock" in test_node.last_message)
            # Convert the on-the-wire representation to absolute indexes
            header_and_shortids = HeaderAndShortIDs(
                test_node.last_message["cmpctblock"].header_and_shortids)
        self.check_compactblock_construction_from_block(
            version, header_and_shortids, block_hash, block)
 def getBlock (self, blkHash):
   self.block = None
   inv = CInv (t=2, h=int (blkHash, 16))
   self.send_message (msg_getdata (inv=[inv]))
   wait_until (lambda: self.block is not None)
   return self.block
    def run_test(self):
        # Nodes will only request hb compact blocks mode when they're out of IBD
        for node in self.nodes:
            assert not node.getblockchaininfo()['initialblockdownload']

        p2p_conn_blocksonly = self.nodes[0].add_p2p_connection(P2PInterface())
        p2p_conn_high_bw = self.nodes[1].add_p2p_connection(P2PInterface())
        p2p_conn_low_bw = self.nodes[3].add_p2p_connection(P2PInterface())
        for conn in [p2p_conn_blocksonly, p2p_conn_high_bw, p2p_conn_low_bw]:
            assert_equal(conn.message_count['sendcmpct'], 2)
            conn.send_and_ping(msg_sendcmpct(announce=False, version=2))

        # Nodes:
        #   0 -> blocksonly
        #   1 -> high bandwidth
        #   2 -> miner
        #   3 -> low bandwidth
        #
        # Topology:
        #   p2p_conn_blocksonly ---> node0
        #   p2p_conn_high_bw    ---> node1
        #   p2p_conn_low_bw     ---> node3
        #   node2 (no connections)
        #
        # node2 produces blocks that are passed to the rest of the nodes
        # through the respective p2p connections.

        self.log.info("Test that -blocksonly nodes do not select peers for BIP152 high bandwidth mode")

        block0 = self.build_block_on_tip()

        # A -blocksonly node should not request BIP152 high bandwidth mode upon
        # receiving a new valid block at the tip.
        p2p_conn_blocksonly.send_and_ping(msg_block(block0))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), block0.sha256)
        assert_equal(p2p_conn_blocksonly.message_count['sendcmpct'], 2)
        assert_equal(p2p_conn_blocksonly.last_message['sendcmpct'].announce, False)

        # A normal node participating in transaction relay should request BIP152
        # high bandwidth mode upon receiving a new valid block at the tip.
        p2p_conn_high_bw.send_and_ping(msg_block(block0))
        assert_equal(int(self.nodes[1].getbestblockhash(), 16), block0.sha256)
        p2p_conn_high_bw.wait_until(lambda: p2p_conn_high_bw.message_count['sendcmpct'] == 3)
        assert_equal(p2p_conn_high_bw.last_message['sendcmpct'].announce, True)

        # Don't send a block from the p2p_conn_low_bw so the low bandwidth node
        # doesn't select it for BIP152 high bandwidth relay.
        self.nodes[3].submitblock(block0.serialize().hex())

        self.log.info("Test that -blocksonly nodes send getdata(BLOCK) instead"
                      " of getdata(CMPCT) in BIP152 low bandwidth mode")

        block1 = self.build_block_on_tip()

        p2p_conn_blocksonly.send_message(msg_headers(headers=[CBlockHeader(block1)]))
        p2p_conn_blocksonly.sync_send_with_ping()
        assert_equal(p2p_conn_blocksonly.last_message['getdata'].inv, [CInv(MSG_BLOCK | MSG_WITNESS_FLAG, block1.sha256)])

        p2p_conn_high_bw.send_message(msg_headers(headers=[CBlockHeader(block1)]))
        p2p_conn_high_bw.sync_send_with_ping()
        assert_equal(p2p_conn_high_bw.last_message['getdata'].inv, [CInv(MSG_CMPCT_BLOCK, block1.sha256)])

        self.log.info("Test that getdata(CMPCT) is still sent on BIP152 low bandwidth connections"
                      " when no -blocksonly nodes are involved")

        p2p_conn_low_bw.send_and_ping(msg_headers(headers=[CBlockHeader(block1)]))
        p2p_conn_low_bw.sync_with_ping()
        assert_equal(p2p_conn_low_bw.last_message['getdata'].inv, [CInv(MSG_CMPCT_BLOCK, block1.sha256)])

        self.log.info("Test that -blocksonly nodes still serve compact blocks")

        def test_for_cmpctblock(block):
            if 'cmpctblock' not in p2p_conn_blocksonly.last_message:
                return False
            return p2p_conn_blocksonly.last_message['cmpctblock'].header_and_shortids.header.rehash() == block.sha256

        p2p_conn_blocksonly.send_message(msg_getdata([CInv(MSG_CMPCT_BLOCK, block0.sha256)]))
        p2p_conn_blocksonly.wait_until(lambda: test_for_cmpctblock(block0))

        # Request BIP152 high bandwidth mode from the -blocksonly node.
        p2p_conn_blocksonly.send_and_ping(msg_sendcmpct(announce=True, version=2))

        block2 = self.build_block_on_tip()
        self.nodes[0].submitblock(block1.serialize().hex())
        self.nodes[0].submitblock(block2.serialize().hex())
        p2p_conn_blocksonly.wait_until(lambda: test_for_cmpctblock(block2))
Exemple #11
0
 def send_get_data(self):
     want = msg_getdata()
     want.inv.append(CInv(1, self.target_hash))
     self.send_message(want)
    def run_test(self):
        node = self.nodes[0]

        # duplicate the deterministic sig test from src/test/key_tests.cpp
        privkey = ECKey()
        privkey.set(
            bytes.fromhex(
                "12b004fff7f4b69ef8650e767f18f11ede158148b425660723b9f9a66e61f747"
            ), True)
        pubkey = privkey.get_pubkey()

        self.log.info(
            "Check the node is signalling the avalanche service bit only if there is a proof."
        )
        assert_equal(
            int(node.getnetworkinfo()['localservices'], 16) & NODE_AVALANCHE,
            0)

        # Create stakes by mining blocks
        addrkey0 = node.get_deterministic_priv_key()
        blockhashes = node.generatetoaddress(2, addrkey0.address)
        stakes = create_coinbase_stakes(node, [blockhashes[0]], addrkey0.key)

        proof_sequence = 11
        proof_expiration = 12
        proof = node.buildavalancheproof(proof_sequence, proof_expiration,
                                         pubkey.get_bytes().hex(), stakes)

        # Restart the node
        self.restart_node(
            0, self.extra_args[0] + [
                "-avaproof={}".format(proof),
                "-avamasterkey=cND2ZvtabDbJ1gucx9GWH6XT9kgTAqfb6cotPt5Q5CyxVDhid2EN",
            ])

        assert_equal(
            int(node.getnetworkinfo()['localservices'], 16) & NODE_AVALANCHE,
            NODE_AVALANCHE)

        def check_avahello(args):
            # Restart the node with the given args
            self.restart_node(0, self.extra_args[0] + args)

            peer = get_ava_p2p_interface(node)

            avahello = peer.wait_for_avahello().hello

            avakey = ECPubKey()
            avakey.set(bytes.fromhex(node.getavalanchekey()))
            assert avakey.verify_schnorr(avahello.sig,
                                         avahello.get_sighash(peer))

        self.log.info(
            "Test the avahello signature with a generated delegation")
        check_avahello([
            "-avaproof={}".format(proof),
            "-avamasterkey=cND2ZvtabDbJ1gucx9GWH6XT9kgTAqfb6cotPt5Q5CyxVDhid2EN"
        ])

        master_key = ECKey()
        master_key.generate()
        limited_id = FromHex(AvalancheProof(), proof).limited_proofid
        delegation = node.delegateavalancheproof(
            f"{limited_id:0{64}x}",
            bytes_to_wif(privkey.get_bytes()),
            master_key.get_pubkey().get_bytes().hex(),
        )

        self.log.info("Test the avahello signature with a supplied delegation")
        check_avahello([
            "-avaproof={}".format(proof),
            "-avadelegation={}".format(delegation),
            "-avamasterkey={}".format(bytes_to_wif(master_key.get_bytes())),
        ])

        stakes = create_coinbase_stakes(node, [blockhashes[1]], addrkey0.key)
        interface_proof_hex = node.buildavalancheproof(
            proof_sequence, proof_expiration,
            pubkey.get_bytes().hex(), stakes)
        limited_id = FromHex(AvalancheProof(),
                             interface_proof_hex).limited_proofid

        # delegate
        delegated_key = ECKey()
        delegated_key.generate()
        interface_delegation_hex = node.delegateavalancheproof(
            f"{limited_id:0{64}x}", bytes_to_wif(privkey.get_bytes()),
            delegated_key.get_pubkey().get_bytes().hex(), None)

        self.log.info("Test that wrong avahello signature causes a ban")
        bad_interface = get_ava_p2p_interface(node)
        wrong_key = ECKey()
        wrong_key.generate()
        with node.assert_debug_log([
                "Misbehaving",
                "peer=1 (0 -> 100) BAN THRESHOLD EXCEEDED: invalid-avahello-signature"
        ]):
            bad_interface.send_avahello(interface_delegation_hex, wrong_key)
            bad_interface.wait_for_disconnect()

        self.log.info(
            'Check that receiving a valid avahello triggers a proof getdata request'
        )
        good_interface = get_ava_p2p_interface(node)
        proofid = good_interface.send_avahello(interface_delegation_hex,
                                               delegated_key)

        def getdata_found():
            with p2p_lock:
                return good_interface.last_message.get(
                    "getdata") and good_interface.last_message["getdata"].inv[
                        -1].hash == proofid

        wait_until(getdata_found)

        self.log.info('Check that we can download the proof from our peer')

        node_proofid = FromHex(AvalancheProof(), proof).proofid

        def wait_for_proof_validation():
            # Connect some blocks to trigger the proof verification
            node.generate(1)
            wait_until(lambda: node_proofid in get_proof_ids(node))

        wait_for_proof_validation()

        getdata = msg_getdata([CInv(MSG_AVA_PROOF, node_proofid)])

        self.log.info(
            "Proof has been inv'ed recently, check it can be requested")
        good_interface.send_message(getdata)

        def proof_received(peer):
            with p2p_lock:
                return peer.last_message.get("avaproof") and peer.last_message[
                    "avaproof"].proof.proofid == node_proofid

        wait_until(lambda: proof_received(good_interface))

        # Restart the node
        self.restart_node(
            0, self.extra_args[0] + [
                "-avaproof={}".format(proof),
                "-avamasterkey=cND2ZvtabDbJ1gucx9GWH6XT9kgTAqfb6cotPt5Q5CyxVDhid2EN",
            ])
        wait_for_proof_validation()

        self.log.info(
            "The proof has not been announced, it cannot be requested")
        peer = get_ava_p2p_interface(node, services=NODE_NETWORK)
        peer.send_message(getdata)

        # Give enough time for the node to answer. Since we cannot check for a
        # non-event this is the best we can do
        time.sleep(2)
        assert not proof_received(peer)

        self.log.info("The proof is known for long enough to be requested")
        current_time = int(time.time())
        node.setmocktime(current_time + UNCONDITIONAL_RELAY_DELAY)

        peer.send_message(getdata)
        wait_until(lambda: proof_received(peer))
Exemple #13
0
 def getBlock (self, blkHash):
   self.block = None
   inv = CInv (t=2, h=int (blkHash, 16))
   self.send_message (msg_getdata (inv=[inv]))
   wait_until (lambda: self.block is not None)
   return self.block
    def test_unbroadcast(self):
        self.log.info("Test broadcasting proofs")

        node = self.nodes[0]

        def add_peers(count):
            peers = []
            for i in range(count):
                peer = node.add_p2p_connection(ProofInvStoreP2PInterface())
                peer.wait_for_verack()
                peers.append(peer)
            return peers

        _, proof = self.gen_proof(node)
        proofid_hex = "{:064x}".format(proof.proofid)

        # Broadcast the proof
        peers = add_peers(3)
        assert node.sendavalancheproof(proof.serialize().hex())
        wait_for_proof(node, proofid_hex)

        def proof_inv_received(peers):
            with p2p_lock:
                return all(
                    p.last_message.get("inv")
                    and p.last_message["inv"].inv[-1].hash == proof.proofid
                    for p in peers)

        wait_until(lambda: proof_inv_received(peers))

        # If no peer request the proof for download, the node should reattempt
        # broadcasting to all new peers after 10 to 15 minutes.
        peers = add_peers(3)
        node.mockscheduler(MAX_INITIAL_BROADCAST_DELAY + 1)
        peers[-1].sync_with_ping()
        wait_until(lambda: proof_inv_received(peers))

        # If at least one peer requests the proof, there is no more attempt to
        # broadcast it
        node.setmocktime(int(time.time()) + UNCONDITIONAL_RELAY_DELAY)
        msg = msg_getdata([CInv(t=MSG_AVA_PROOF, h=proof.proofid)])
        peers[-1].send_message(msg)

        # Give enough time for the node to broadcast the proof again
        peers = add_peers(3)
        node.mockscheduler(MAX_INITIAL_BROADCAST_DELAY + 1)
        peers[-1].sync_with_ping()

        assert not proof_inv_received(peers)

        self.log.info(
            "Proofs that become invalid should no longer be broadcasted")

        # Restart and add connect a new set of peers
        self.restart_node(0)

        # Broadcast the proof
        peers = add_peers(3)
        assert node.sendavalancheproof(proof.serialize().hex())
        wait_until(lambda: proof_inv_received(peers))

        # Sanity check our node knows the proof, and it is valid
        wait_for_proof(node, proofid_hex, expect_orphan=False)

        # Mature the utxo then spend it
        node.generate(100)
        utxo = proof.stakes[0].stake.utxo
        raw_tx = node.createrawtransaction(
            inputs=[{
                # coinbase
                "txid": "{:064x}".format(utxo.hash),
                "vout": utxo.n
            }],
            outputs={ADDRESS_BCHREG_UNSPENDABLE: 25_000_000 - 250.00},
        )
        signed_tx = node.signrawtransactionwithkey(
            hexstring=raw_tx,
            privkeys=[node.get_deterministic_priv_key().key],
        )
        node.sendrawtransaction(signed_tx['hex'])

        # Mine the tx in a block
        node.generate(1)

        # Wait for the proof to be orphaned
        wait_until(
            lambda: node.getrawavalancheproof(proofid_hex)["orphan"] is True)

        # It should no longer be broadcasted
        peers = add_peers(3)
        node.mockscheduler(MAX_INITIAL_BROADCAST_DELAY + 1)
        peers[-1].sync_with_ping()

        assert not proof_inv_received(peers)
    def test_compactblock_construction(self, node, test_node):
        # Generate a bunch of transactions.
        node.generate(101)
        num_transactions = 25
        address = node.getnewaddress()

        for i in range(num_transactions):
            txid = node.sendtoaddress(address, 0.1)
            hex_tx = node.gettransaction(txid)["hex"]
            tx = FromHex(CTransaction(), hex_tx)

        # Wait until we've seen the block announcement for the resulting tip
        tip = int(node.getbestblockhash(), 16)
        test_node.wait_for_block_announcement(tip)

        # Make sure we will receive a fast-announce compact block
        self.request_cb_announcements(test_node, node)

        # Now mine a block, and look at the resulting compact block.
        test_node.clear_block_announcement()
        block_hash = int(node.generate(1)[0], 16)

        # Store the raw block in our internal format.
        block = FromHex(CBlock(),
                        node.getblock("{:064x}".format(block_hash), False))
        for tx in block.vtx:
            tx.calc_sha256()
        block.rehash()

        # Wait until the block was announced (via compact blocks)
        wait_until(test_node.received_block_announcement,
                   timeout=30,
                   lock=mininode_lock)

        # Now fetch and check the compact block
        header_and_shortids = None
        with mininode_lock:
            assert "cmpctblock" in test_node.last_message
            # Convert the on-the-wire representation to absolute indexes
            header_and_shortids = HeaderAndShortIDs(
                test_node.last_message["cmpctblock"].header_and_shortids)
        self.check_compactblock_construction_from_block(
            header_and_shortids, block_hash, block)

        # Now fetch the compact block using a normal non-announce getdata
        with mininode_lock:
            test_node.clear_block_announcement()
            inv = CInv(4, block_hash)  # 4 == "CompactBlock"
            test_node.send_message(msg_getdata([inv]))

        wait_until(test_node.received_block_announcement,
                   timeout=30,
                   lock=mininode_lock)

        # Now fetch and check the compact block
        header_and_shortids = None
        with mininode_lock:
            assert "cmpctblock" in test_node.last_message
            # Convert the on-the-wire representation to absolute indexes
            header_and_shortids = HeaderAndShortIDs(
                test_node.last_message["cmpctblock"].header_and_shortids)
        self.check_compactblock_construction_from_block(
            header_and_shortids, block_hash, block)
Exemple #16
0
    def run_test(self):
        # Before we connect anything, we first set the time on the node
        # to be in the past, otherwise things break because the CNode
        # time counters can't be reset backward after initialization
        old_time = int(time.time() - 2 * 60 * 60 * 24 * 7)
        self.nodes[0].setmocktime(old_time)

        # Generate some old blocks
        self.nodes[0].generate(130)

        # p2p_conns[0] will only request old blocks
        # p2p_conns[1] will only request new blocks
        # p2p_conns[2] will test resetting the counters
        p2p_conns = []

        for _ in range(3):
            p2p_conns.append(self.nodes[0].add_p2p_connection(TestP2PConn()))

        # Now mine a big block
        mine_large_block(self.nodes[0], self.utxo_cache)

        # Store the hash; we'll request this later
        big_old_block = self.nodes[0].getbestblockhash()
        old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
        big_old_block = int(big_old_block, 16)

        # Advance to two days ago
        self.nodes[0].setmocktime(int(time.time()) - 2 * 60 * 60 * 24)

        # Mine one more block, so that the prior block looks old
        mine_large_block(self.nodes[0], self.utxo_cache)

        # We'll be requesting this new block too
        big_new_block = self.nodes[0].getbestblockhash()
        big_new_block = int(big_new_block, 16)

        # p2p_conns[0] will test what happens if we just keep requesting the
        # the same big old block too many times (expect: disconnect)

        getdata_request = msg_getdata()
        getdata_request.inv.append(CInv(2, big_old_block))

        max_bytes_per_day = 800 * 1024 * 1024
        daily_buffer = 144 * 4000000
        max_bytes_available = max_bytes_per_day - daily_buffer
        success_count = max_bytes_available // old_block_size

        # 576MB will be reserved for relaying new blocks, so expect this to
        # succeed for ~235 tries.
        for i in range(success_count):
            p2p_conns[0].send_message(getdata_request)
            p2p_conns[0].sync_with_ping()
            assert_equal(p2p_conns[0].block_receive_map[big_old_block], i + 1)

        assert_equal(len(self.nodes[0].getpeerinfo()), 3)
        # At most a couple more tries should succeed (depending on how long
        # the test has been running so far).
        for i in range(3):
            p2p_conns[0].send_message(getdata_request)
        p2p_conns[0].wait_for_disconnect()
        assert_equal(len(self.nodes[0].getpeerinfo()), 2)
        self.log.info(
            "Peer 0 disconnected after downloading old block too many times")

        # Requesting the current block on p2p_conns[1] should succeed indefinitely,
        # even when over the max upload target.
        # We'll try 800 times
        getdata_request.inv = [CInv(2, big_new_block)]
        for i in range(800):
            p2p_conns[1].send_message(getdata_request)
            p2p_conns[1].sync_with_ping()
            assert_equal(p2p_conns[1].block_receive_map[big_new_block], i + 1)

        self.log.info("Peer 1 able to repeatedly download new block")

        # But if p2p_conns[1] tries for an old block, it gets disconnected too.
        getdata_request.inv = [CInv(2, big_old_block)]
        p2p_conns[1].send_message(getdata_request)
        p2p_conns[1].wait_for_disconnect()
        assert_equal(len(self.nodes[0].getpeerinfo()), 1)

        self.log.info("Peer 1 disconnected after trying to download old block")

        self.log.info("Advancing system time on node to clear counters...")

        # If we advance the time by 24 hours, then the counters should reset,
        # and p2p_conns[2] should be able to retrieve the old block.
        self.nodes[0].setmocktime(int(time.time()))
        p2p_conns[2].sync_with_ping()
        p2p_conns[2].send_message(getdata_request)
        p2p_conns[2].sync_with_ping()
        assert_equal(p2p_conns[2].block_receive_map[big_old_block], 1)

        self.log.info("Peer 2 able to download old block")

        self.nodes[0].disconnect_p2ps()

        #stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
        self.log.info("Restarting nodes with -whitelist=127.0.0.1")
        self.stop_node(0)
        self.start_node(0, ["-whitelist=127.0.0.1", "-maxuploadtarget=1"])

        # Reconnect to self.nodes[0]
        self.nodes[0].add_p2p_connection(TestP2PConn())

        #retrieve 20 blocks which should be enough to break the 1MB limit
        getdata_request.inv = [CInv(2, big_new_block)]
        for i in range(20):
            self.nodes[0].p2p.send_message(getdata_request)
            self.nodes[0].p2p.sync_with_ping()
            assert_equal(self.nodes[0].p2p.block_receive_map[big_new_block],
                         i + 1)

        getdata_request.inv = [CInv(2, big_old_block)]
        self.nodes[0].p2p.send_and_ping(getdata_request)
        assert_equal(len(self.nodes[0].getpeerinfo()),
                     1)  #node is still connected because of the whitelist

        self.log.info(
            "Peer still connected after trying to download old block (whitelisted)"
        )
Exemple #17
0
    def test_compactblock_construction(self, node, test_node,
                                       use_witness_address):
        # Generate a bunch of transactions.
        node.generate(101)
        num_transactions = 25
        address = node.getnewaddress()
        segwit_tx_generated = False
        if use_witness_address:
            # Want at least one segwit spend, so move some funds to
            # a witness address.
            address = node.addwitnessaddress(address)
            value_to_send = 1000
            segwit_txid = node.sendtoaddress(address,
                                             satoshi_round(value_to_send))
            segwit_tx = node.getrawtransaction(segwit_txid, 1)
            vout = next(
                filter(lambda vout: int(vout['value']) == 1000,
                       segwit_tx['vout']))

            node.generate(1)

            segwit_spend_txid = node.sendtypeto(
                '', '', [{
                    'address': address,
                    'amount': 0.1
                }], '', '', False,
                {'inputs': [{
                    'tx': segwit_txid,
                    'n': vout['n']
                }]})
            segwit_spend_tx = node.gettransaction(segwit_spend_txid)
            segwit_spend = FromHex(CTransaction(), segwit_spend_tx["hex"])
            if not segwit_spend.wit.is_null():
                segwit_tx_generated = True
            num_transactions -= 1

        for i in range(num_transactions):
            node.sendtoaddress(address, 0.1)

        if use_witness_address:
            assert segwit_tx_generated  # check that our test is not broken

        # Wait until we've seen the block announcement for the resulting tip
        tip = int(node.getbestblockhash(), 16)
        test_node.wait_for_block_announcement(tip)

        # Make sure we will receive a fast-announce compact block
        self.request_cb_announcements(test_node, node)

        # Now mine a block, and look at the resulting compact block.
        test_node.clear_block_announcement()
        block_hash = int(node.generate(1)[0], 16)

        # Store the raw block in our internal format.
        block = FromHex(CBlock(), node.getblock("%02x" % block_hash, False))
        for tx in block.vtx:
            tx.calc_sha256()
        block.rehash()

        # Wait until the block was announced (via compact blocks)
        wait_until(test_node.received_block_announcement,
                   timeout=30,
                   lock=mininode_lock)

        # Now fetch and check the compact block
        header_and_shortids = None
        with mininode_lock:
            assert_in("cmpctblock", test_node.last_message)
            # Convert the on-the-wire representation to absolute indexes
            header_and_shortids = HeaderAndShortIDs(
                test_node.last_message["cmpctblock"].header_and_shortids)
        self.check_compactblock_construction_from_block(
            header_and_shortids, block_hash, block)

        # Now fetch the compact block using a normal non-announce getdata
        with mininode_lock:
            test_node.clear_block_announcement()
            inv = CInv(4, block_hash)  # 4 == "CompactBlock"
            test_node.send_message(msg_getdata([inv]))

        wait_until(test_node.received_block_announcement,
                   timeout=30,
                   lock=mininode_lock)

        # Now fetch and check the compact block
        header_and_shortids = None
        with mininode_lock:
            assert "cmpctblock" in test_node.last_message
            # Convert the on-the-wire representation to absolute indexes
            header_and_shortids = HeaderAndShortIDs(
                test_node.last_message["cmpctblock"].header_and_shortids)
        self.check_compactblock_construction_from_block(
            header_and_shortids, block_hash, block)
Exemple #18
0
    def run_test(self):
        """Main test logic"""

        # Create P2P connections will wait for a verack to make sure the
        # connection is fully up
        self.nodes[0].add_p2p_connection(BaseNode())

        # Generating a block on one of the nodes will get us out of IBD
        blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
        self.sync_all(self.nodes[0:2])

        # Notice above how we called an RPC by calling a method with the same
        # name on the node object. Notice also how we used a keyword argument
        # to specify a named RPC argument. Neither of those are defined on the
        # node object. Instead there's some __getattr__() magic going on under
        # the covers to dispatch unrecognised attribute calls to the RPC
        # interface.

        # Logs are nice. Do plenty of them. They can be used in place of comments for
        # breaking the test into sub-sections.
        self.log.info("Starting test!")

        self.log.info("Calling a custom function")
        custom_function()

        self.log.info("Calling a custom method")
        self.custom_method()

        self.log.info("Create some blocks")
        self.tip = int(self.nodes[0].getbestblockhash(), 16)
        self.block_time = self.nodes[0].getblock(
            self.nodes[0].getbestblockhash())['time'] + 1

        height = self.nodes[0].getblockcount()

        for i in range(10):
            # Use the mininode and blocktools functionality to manually build a block
            # Calling the generate() rpc is easier, but this allows us to exactly
            # control the blocks and transactions.
            block = create_block(self.tip, create_coinbase(height + 1),
                                 self.block_time)
            block.nHeight = height + 1
            prepare_block(block)
            block_message = msg_block(block)
            # Send message is used to send a P2P message to the node over our
            # P2PInterface
            self.nodes[0].p2p.send_message(block_message)
            self.tip = block.sha256
            blocks.append(self.tip)
            self.block_time += 1
            height += 1

        self.log.info(
            "Wait for node1 to reach current tip (height 11) using RPC")
        self.nodes[1].waitforblockheight(11)

        self.log.info("Connect node2 and node1")
        connect_nodes(self.nodes[1], self.nodes[2])

        self.log.info("Wait for node2 to receive all the blocks from node1")
        self.sync_all()

        self.log.info("Add P2P connection to node2")
        self.nodes[0].disconnect_p2ps()

        self.nodes[2].add_p2p_connection(BaseNode())

        self.log.info("Test that node2 propagates all the blocks to us")

        getdata_request = msg_getdata()
        for block in blocks:
            getdata_request.inv.append(CInv(MSG_BLOCK, block))
        self.nodes[2].p2p.send_message(getdata_request)

        # wait_until() will loop until a predicate condition is met. Use it to test properties of the
        # P2PInterface objects.
        wait_until(lambda: sorted(blocks) == sorted(
            list(self.nodes[2].p2p.block_receive_map.keys())),
                   timeout=5,
                   lock=mininode_lock)

        self.log.info("Check that each block was received only once")
        # The network thread uses a global lock on data access to the P2PConnection objects when sending and receiving
        # messages. The test thread should acquire the global lock before accessing any P2PConnection data to avoid locking
        # and synchronization issues. Note wait_until() acquires this global
        # lock when testing the predicate.
        with mininode_lock:
            for block in self.nodes[2].p2p.block_receive_map.values():
                assert_equal(block, 1)
Exemple #19
0
 def on_getgraphene(self, message):
     self.on_getdata(
         msg_getdata([CInv(2, message.request.requested_block_hash)]))
Exemple #20
0
    def test_compactblocks_not_at_tip(self, node, test_node):
        # Test that requesting old compactblocks doesn't work.
        MAX_CMPCTBLOCK_DEPTH = 5
        new_blocks = []
        for i in range(MAX_CMPCTBLOCK_DEPTH + 1):
            test_node.clear_block_announcement()
            new_blocks.append(node.generate(1)[0])
            wait_until(test_node.received_block_announcement,
                       timeout=30,
                       lock=mininode_lock)

        test_node.clear_block_announcement()
        test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
        wait_until(lambda: "cmpctblock" in test_node.last_message,
                   timeout=30,
                   lock=mininode_lock)

        test_node.clear_block_announcement()
        node.generate(1)
        wait_until(test_node.received_block_announcement,
                   timeout=30,
                   lock=mininode_lock)
        test_node.clear_block_announcement()
        with mininode_lock:
            test_node.last_message.pop("block", None)
        test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
        wait_until(lambda: "block" in test_node.last_message,
                   timeout=30,
                   lock=mininode_lock)
        with mininode_lock:
            test_node.last_message["block"].block.calc_sha256()
            assert_equal(test_node.last_message["block"].block.sha256,
                         int(new_blocks[0], 16))

        # Generate an old compactblock, and verify that it's not accepted.
        cur_height = node.getblockcount()
        hashPrevBlock = int(node.getblockhash(cur_height - 5), 16)
        block = self.build_block_on_tip(node)
        block.hashPrevBlock = hashPrevBlock
        block.solve()

        comp_block = HeaderAndShortIDs()
        comp_block.initialize_from_block(block)
        test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))

        tips = node.getchaintips()
        found = False
        for x in tips:
            if x["hash"] == block.hash:
                assert_equal(x["status"], "headers-only")
                found = True
                break
        assert (found)

        # Requesting this block via getblocktxn should silently fail
        # (to avoid fingerprinting attacks).
        msg = msg_getblocktxn()
        msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
        with mininode_lock:
            test_node.last_message.pop("blocktxn", None)
        test_node.send_and_ping(msg)
        with mininode_lock:
            assert "blocktxn" not in test_node.last_message
Exemple #21
0
 def send_block_request(self, block_hash, node):
     msg = msg_getdata()
     msg.inv.append(CInv(MSG_BLOCK, block_hash))
     node.send_message(msg)
    def run_test(self):
        # Before we connect anything, we first set the time on the node
        # to be in the past, otherwise things break because the CNode
        # time counters can't be reset backward after initialization
        old_time = int(time.time() - 2*60*60*24*7)
        self.nodes[0].setmocktime(old_time)

        # Generate some old blocks
        self.nodes[0].generate(130)

        # p2p_conns[0] will only request old blocks
        # p2p_conns[1] will only request new blocks
        # p2p_conns[2] will test resetting the counters
        p2p_conns = []

        for _ in range(3):
            p2p_conns.append(self.nodes[0].add_p2p_connection(TestP2PConn()))

        # Now mine a big block
        mine_large_block(self.nodes[0], self.utxo_cache)

        # Store the hash; we'll request this later
        big_old_block = self.nodes[0].getbestblockhash()
        old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
        big_old_block = int(big_old_block, 16)

        # Advance to two days ago
        self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)

        # Mine one more block, so that the prior block looks old
        mine_large_block(self.nodes[0], self.utxo_cache)

        # We'll be requesting this new block too
        big_new_block = self.nodes[0].getbestblockhash()
        big_new_block = int(big_new_block, 16)

        # p2p_conns[0] will test what happens if we just keep requesting the
        # the same big old block too many times (expect: disconnect)

        getdata_request = msg_getdata()
        getdata_request.inv.append(CInv(2, big_old_block))

        max_bytes_per_day = 800*1024*1024
        daily_buffer = 144 * 4000000
        max_bytes_available = max_bytes_per_day - daily_buffer
        success_count = max_bytes_available // old_block_size

        # 576MB will be reserved for relaying new blocks, so expect this to
        # succeed for ~235 tries.
        for i in range(success_count):
            p2p_conns[0].send_message(getdata_request)
            p2p_conns[0].sync_with_ping()
            assert_equal(p2p_conns[0].block_receive_map[big_old_block], i+1)

        assert_equal(len(self.nodes[0].getpeerinfo()), 3)
        # At most a couple more tries should succeed (depending on how long
        # the test has been running so far).
        for i in range(3):
            p2p_conns[0].send_message(getdata_request)
        p2p_conns[0].wait_for_disconnect()
        assert_equal(len(self.nodes[0].getpeerinfo()), 2)
        self.log.info("Peer 0 disconnected after downloading old block too many times")

        # Requesting the current block on p2p_conns[1] should succeed indefinitely,
        # even when over the max upload target.
        # We'll try 800 times
        getdata_request.inv = [CInv(2, big_new_block)]
        for i in range(800):
            p2p_conns[1].send_message(getdata_request)
            p2p_conns[1].sync_with_ping()
            assert_equal(p2p_conns[1].block_receive_map[big_new_block], i+1)

        self.log.info("Peer 1 able to repeatedly download new block")

        # But if p2p_conns[1] tries for an old block, it gets disconnected too.
        getdata_request.inv = [CInv(2, big_old_block)]
        p2p_conns[1].send_message(getdata_request)
        p2p_conns[1].wait_for_disconnect()
        assert_equal(len(self.nodes[0].getpeerinfo()), 1)

        self.log.info("Peer 1 disconnected after trying to download old block")

        self.log.info("Advancing system time on node to clear counters...")

        # If we advance the time by 24 hours, then the counters should reset,
        # and p2p_conns[2] should be able to retrieve the old block.
        self.nodes[0].setmocktime(int(time.time()))
        p2p_conns[2].sync_with_ping()
        p2p_conns[2].send_message(getdata_request)
        p2p_conns[2].sync_with_ping()
        assert_equal(p2p_conns[2].block_receive_map[big_old_block], 1)

        self.log.info("Peer 2 able to download old block")

        self.nodes[0].disconnect_p2ps()

        #stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
        self.log.info("Restarting nodes with -whitelist=127.0.0.1")
        self.stop_node(0)
        self.start_node(0, ["-whitelist=127.0.0.1", "-maxuploadtarget=1"])

        # Reconnect to self.nodes[0]
        self.nodes[0].add_p2p_connection(TestP2PConn())

        #retrieve 20 blocks which should be enough to break the 1MB limit
        getdata_request.inv = [CInv(2, big_new_block)]
        for i in range(20):
            self.nodes[0].p2p.send_message(getdata_request)
            self.nodes[0].p2p.sync_with_ping()
            assert_equal(self.nodes[0].p2p.block_receive_map[big_new_block], i+1)

        getdata_request.inv = [CInv(2, big_old_block)]
        self.nodes[0].p2p.send_and_ping(getdata_request)
        assert_equal(len(self.nodes[0].getpeerinfo()), 1) #node is still connected because of the whitelist

        self.log.info("Peer still connected after trying to download old block (whitelisted)")
Exemple #23
0
 def send_block_request(self, block_hash, node):
     msg = msg_getdata()
     # 2 == "Block"
     msg.inv.append(CInv(2, block_hash))
     node.send_message(msg)
 def send_getdata_for_block(self, blockhash):
     getdata_request = msg_getdata()
     getdata_request.inv.append(CInv(2, int(blockhash, 16)))
     self.send_message(getdata_request)
 def send_get_data(self, block_hashes):
     """Request data for a list of block hashes."""
     msg = msg_getdata()
     for x in block_hashes:
         msg.inv.append(CInv(2, x))
     self.send_message(msg)
Exemple #26
0
    def test_compactblock_construction(self,
                                       test_node,
                                       use_witness_address=True):
        version = test_node.cmpct_version
        node = self.nodes[0]
        # Generate a bunch of transactions.
        node.generate(COINBASE_MATURITY + 1)
        num_transactions = 25
        address = node.getnewaddress()

        segwit_tx_generated = False
        for _ in range(num_transactions):
            txid = node.sendtoaddress(address, 0.1)
            hex_tx = node.gettransaction(txid)["hex"]
            tx = tx_from_hex(hex_tx)
            if not tx.wit.is_null():
                segwit_tx_generated = True

        if use_witness_address:
            assert segwit_tx_generated  # check that our test is not broken

        # Wait until we've seen the block announcement for the resulting tip
        tip = int(node.getbestblockhash(), 16)
        test_node.wait_for_block_announcement(tip)

        # Make sure we will receive a fast-announce compact block
        self.request_cb_announcements(test_node)

        # Now mine a block, and look at the resulting compact block.
        test_node.clear_block_announcement()
        block_hash = int(node.generate(1)[0], 16)

        # Store the raw block in our internal format.
        block = from_hex(CBlock(), node.getblock("%064x" % block_hash, False))
        for tx in block.vtx:
            tx.calc_sha256()
        block.rehash()

        # Wait until the block was announced (via compact blocks)
        test_node.wait_until(lambda: "cmpctblock" in test_node.last_message,
                             timeout=30)

        # Now fetch and check the compact block
        header_and_shortids = None
        with p2p_lock:
            # Convert the on-the-wire representation to absolute indexes
            header_and_shortids = HeaderAndShortIDs(
                test_node.last_message["cmpctblock"].header_and_shortids)
        self.check_compactblock_construction_from_block(
            version, header_and_shortids, block_hash, block)

        # Now fetch the compact block using a normal non-announce getdata
        test_node.clear_block_announcement()
        inv = CInv(MSG_CMPCT_BLOCK, block_hash)
        test_node.send_message(msg_getdata([inv]))

        test_node.wait_until(lambda: "cmpctblock" in test_node.last_message,
                             timeout=30)

        # Now fetch and check the compact block
        header_and_shortids = None
        with p2p_lock:
            # Convert the on-the-wire representation to absolute indexes
            header_and_shortids = HeaderAndShortIDs(
                test_node.last_message["cmpctblock"].header_and_shortids)
        self.check_compactblock_construction_from_block(
            version, header_and_shortids, block_hash, block)
Exemple #27
0
 def request_block(self, blockhash, inv_type, timeout=60):
     with mininode_lock:
         self.last_message.pop("block", None)
     self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
     self.wait_for_block(blockhash, timeout)
     return self.last_message["block"].block