Exemplo n.º 1
0
    def run_test(self):
        nobf_node = TestNode()
        bf_node = TestNode()

        connections = []
        connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], nobf_node))
        connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], bf_node))
        nobf_node.add_connection(connections[0])
        bf_node.add_connection(connections[1])

        # Start up network handling in another thread
        NetworkThread().start()

        nobf_node.wait_for_verack()
        bf_node.wait_for_verack()

        # Verify mininodes are connected to zcashd nodes
        peerinfo = self.nodes[0].getpeerinfo()
        versions = [x["version"] for x in peerinfo]
        assert_equal(1, versions.count(SPROUT_PROTO_VERSION))
        peerinfo = self.nodes[1].getpeerinfo()
        versions = [x["version"] for x in peerinfo]
        assert_equal(1, versions.count(SPROUT_PROTO_VERSION))

        # Mininodes send filterclear message to zcashd node.
        nobf_node.send_message(msg_filterclear())
        bf_node.send_message(msg_filterclear())

        time.sleep(3)

        # Verify mininodes are still connected to zcashd nodes
        peerinfo = self.nodes[0].getpeerinfo()
        versions = [x["version"] for x in peerinfo]
        assert_equal(1, versions.count(SPROUT_PROTO_VERSION))
        peerinfo = self.nodes[1].getpeerinfo()
        versions = [x["version"] for x in peerinfo]
        assert_equal(1, versions.count(SPROUT_PROTO_VERSION))

        # Mininodes send filteradd message to zcashd node.
        nobf_node.send_message(msg_filteradd())
        bf_node.send_message(msg_filteradd())

        time.sleep(3)

        # Verify NoBF mininode has been dropped, and BF mininode is still connected.
        peerinfo = self.nodes[0].getpeerinfo()
        versions = [x["version"] for x in peerinfo]
        assert_equal(0, versions.count(SPROUT_PROTO_VERSION))
        peerinfo = self.nodes[1].getpeerinfo()
        versions = [x["version"] for x in peerinfo]
        assert_equal(1, versions.count(SPROUT_PROTO_VERSION))

        [ c.disconnect_node() for c in connections ]
Exemplo n.º 2
0
    def run_test(self):
        assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
        assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)

        self.nodes[0].setnetworkactive(False)
        assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], False)
        timeout = 3
        while self.nodes[0].getnetworkinfo()['connections'] != 0:
            # Wait a bit for all sockets to close
            assert timeout > 0, 'not all connections closed in time'
            timeout -= 0.1
            time.sleep(0.1)

        self.nodes[0].setnetworkactive(True)
        connect_nodes_bi(self.nodes, 0, 1)
        assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
        assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)

        # test getaddednodeinfo
        assert_equal(self.nodes[0].getaddednodeinfo(), [])
        # add a node (node2) to node0
        ip_port = "127.0.0.1:{}".format(p2p_port(2))
        self.nodes[0].addnode(ip_port, 'add')
        # check that the node has indeed been added
        added_nodes = self.nodes[0].getaddednodeinfo(ip_port)
        assert_equal(len(added_nodes), 1)
        assert_equal(added_nodes[0]['addednode'], ip_port)
        # check that a non-existant node returns an error
        assert_raises_jsonrpc(-24, "Node has not been added",
                              self.nodes[0].getaddednodeinfo, '1.1.1.1')
Exemplo n.º 3
0
    def run_test(self):
        test_node = TestNode()

        connections = []
        connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0],
                                    test_node, "regtest", True))
        test_node.add_connection(connections[0])

        # Start up network handling in another thread
        NetworkThread().start()

        test_node.wait_for_verack()

        # Verify mininodes are connected to zcashd nodes
        peerinfo = self.nodes[0].getpeerinfo()
        versions = [x["version"] for x in peerinfo]
        assert_equal(1, versions.count(OVERWINTER_PROTO_VERSION))
        assert_equal(0, peerinfo[0]["banscore"])

        self.coinbase_blocks = self.nodes[0].generate(1)
        self.nodes[0].generate(100)
        self.nodeaddress = self.nodes[0].getnewaddress()

        # Mininodes send transaction to zcashd node.
        def setExpiryHeight(tx):
            tx.nExpiryHeight = 101

        spendtx = self.create_transaction(self.nodes[0],
                                          self.coinbase_blocks[0],
                                          self.nodeaddress, 1.0,
                                          txModifier=setExpiryHeight)
        test_node.send_message(msg_tx(spendtx))

        time.sleep(3)

        # Verify test mininode has not been dropped
        # and still has a banscore of 0.
        peerinfo = self.nodes[0].getpeerinfo()
        versions = [x["version"] for x in peerinfo]
        assert_equal(1, versions.count(OVERWINTER_PROTO_VERSION))
        assert_equal(0, peerinfo[0]["banscore"])

        # Mine a block and resend the transaction
        self.nodes[0].generate(1)
        test_node.send_message(msg_tx(spendtx))

        time.sleep(3)

        # Verify test mininode has not been dropped
        # but has a banscore of 10.
        peerinfo = self.nodes[0].getpeerinfo()
        versions = [x["version"] for x in peerinfo]
        assert_equal(1, versions.count(OVERWINTER_PROTO_VERSION))
        assert_equal(10, peerinfo[0]["banscore"])

        [ c.disconnect_node() for c in connections ]
Exemplo n.º 4
0
    def run_test(self):
        ###########################
        # setban/listbanned tests #
        ###########################
        assert_equal(len(self.nodes[2].getpeerinfo()), 4) #we should have 4 nodes at this point
        self.nodes[2].setban("127.0.0.1", "add")
        time.sleep(3) #wait till the nodes are disconected
        assert_equal(len(self.nodes[2].getpeerinfo()), 0) #all nodes must be disconnected at this point
        assert_equal(len(self.nodes[2].listbanned()), 1)
        self.nodes[2].clearbanned()
        assert_equal(len(self.nodes[2].listbanned()), 0)
        self.nodes[2].setban("127.0.0.0/24", "add")
        assert_equal(len(self.nodes[2].listbanned()), 1)
        try:
            self.nodes[2].setban("127.0.0.1", "add") #throws exception because 127.0.0.1 is within range 127.0.0.0/24
        except:
            pass
        assert_equal(len(self.nodes[2].listbanned()), 1) #still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
        try:
            self.nodes[2].setban("127.0.0.1", "remove")
        except:
            pass
        assert_equal(len(self.nodes[2].listbanned()), 1)
        self.nodes[2].setban("127.0.0.0/24", "remove")
        assert_equal(len(self.nodes[2].listbanned()), 0)
        self.nodes[2].clearbanned()
        assert_equal(len(self.nodes[2].listbanned()), 0)
        
        ###########################
        # RPC disconnectnode test #
        ###########################
        url = urlparse.urlparse(self.nodes[1].url)
        self.nodes[0].disconnectnode(url.hostname+":"+str(p2p_port(1)))
        time.sleep(2) #disconnecting a node needs a little bit of time
        for node in self.nodes[0].getpeerinfo():
            assert(node['addr'] != url.hostname+":"+str(p2p_port(1)))

        connect_nodes_bi(self.nodes,0,1) #reconnect the node
        found = False
        for node in self.nodes[0].getpeerinfo():
            if node['addr'] == url.hostname+":"+str(p2p_port(1)):
                found = True
        assert(found)
Exemplo n.º 5
0
 def _test_getaddednodeinfo(self):
     assert_equal(self.nodes[0].getaddednodeinfo(), [])
     # add a node (node2) to node0
     ip_port = "127.0.0.1:{}".format(p2p_port(2))
     self.nodes[0].addnode(ip_port, 'add')
     # check that the node has indeed been added
     added_nodes = self.nodes[0].getaddednodeinfo(ip_port)
     assert_equal(len(added_nodes), 1)
     assert_equal(added_nodes[0]['addednode'], ip_port)
     # check that a non-existent node returns an error
     assert_raises_rpc_error(-24, "Node has not been added", self.nodes[0].getaddednodeinfo, '1.1.1.1')
Exemplo n.º 6
0
    def init_test(self):
        title = "*** Starting %s ***" % self.__class__.__name__
        underline = "-" * len(title)
        self.log.info("\n\n%s\n%s\n%s\n", title, underline, self.description)
        self.DEFAULT_FEE = 0.05
        # Setup the p2p connections and start up the network thread.
        self.test_nodes = []
        for i in range(self.num_nodes):
            self.test_nodes.append(FivebalanceTestNode())
            self.test_nodes[i].peer_connect('127.0.0.1', p2p_port(i))

        network_thread_start()  # Start up network handling in another thread

        # Let the test nodes get in sync
        for i in range(self.num_nodes):
            self.test_nodes[i].wait_for_verack()
Exemplo n.º 7
0
    def run_test(self):
        self.xt_node = self.nodes[0]

        # generate a block to get out of IBD
        self.xt_node.generate(1)

        self.test_node = TestNode()
        self.test_node.add_connection(
            NodeConn('127.0.0.1', p2p_port(0), self.xt_node, self.test_node))

        NetworkThread().start()
        self.test_node.handshake()

        self.test_basics(self.xt_node, self.test_node)
        self.test_garbage(self.test_node)
        self.test_big_bitmap(self.xt_node, self.test_node, 8 * 2)
        self.test_big_bitmap(self.xt_node, self.test_node, 8 * 128)
Exemplo n.º 8
0
    def run_test(self):
        node0 = NodeConnCB()
        node0.add_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))

        NetworkThread().start()
        node0.wait_for_verack()

        self.log.info("#1. generate 1 block by node0==================================================================")
        self.nodes[0].generate(nblocks=1)
        self.tip = int(self.nodes[0].getbestblockhash(), 16)
        self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1

        self.height = 1
        self.coinbase_txs = []

        self.log.info("#2. create 100 blocks and send to node0========================================================")
        for i in range(100):
            coinbase_tx = create_coinbase(self.height)
            self.coinbase_txs.append(coinbase_tx)
            self.create_block_and_send([coinbase_tx], node0)

        self.nodes[0].waitforblockheight(101)

        self.fork_point_hash = self.tip
        self.fork_height = self.height

        self.log.info("#3. create one fork chain with one block=======================================================")
        for i in range(1):
            block_fee, txns = self.create_txns_from(self.coinbase_txs[i], 2)
            coinbase = create_coinbase(self.height, None, block_fee)
            self.create_block_and_send([coinbase] + txns, node0)

        self.nodes[0].waitforblockheight(102)

        self.log.info("#4. create another fork chain with two blocks==================================================")
        self.tip = self.fork_point_hash
        self.height = self.fork_height

        for i in range(2):
            block_fee, txns = self.create_txns_from(self.coinbase_txs[i], 2)
            coinbase = create_coinbase(self.height, None, block_fee)
            self.create_block_and_send([coinbase] + txns, node0)

        self.log.info("#5. expect node0 switch to new chain===========================================================")
        self.nodes[0].waitforblockheight(103)
    def init_(self, nodes_count):
        nodes = []

        for no in range(0, nodes_count):
            # Create a P2P connections
            node = NodeConnCB()
            connection = NodeConn('127.0.0.1', p2p_port(no), self.nodes[no], node)
            node.add_connection(connection)
            nodes.append(node)

        NetworkThread().start()

        for no in range(0, nodes_count):
            # wait_for_verack ensures that the P2P connection is fully up.
            nodes[no].wait_for_verack()

        self.init_chain_(nodes[0], nodes_count)

        return nodes
Exemplo n.º 10
0
    def run_test(self):
        gen_node = self.nodes[0]  # The block and tx generating node
        gen_node.generate(1)

        # Setup the attacking p2p connection and start up the network thread.
        self.inbound_peer = TestNode()
        connections = [
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0],
                     self.inbound_peer)
        ]
        self.inbound_peer.add_connection(connections[0])
        NetworkThread().start()  # Start up network handling in another thread

        max_repeats = 48
        self.log.info("Running test up to {} times.".format(max_repeats))
        for i in range(max_repeats):
            self.log.info('Run repeat {}'.format(i + 1))
            txid = gen_node.sendtoaddress(gen_node.getnewaddress(), 0.01)

            want_tx = MsgGetdata()
            want_tx.inv.append(CInv(t=1, h=int(txid, 16)))
            self.inbound_peer.last_message.pop('notfound', None)
            connections[0].send_message(want_tx)
            self.inbound_peer.sync_with_ping()

            if self.inbound_peer.last_message.get('notfound'):
                self.log.debug(
                    'tx {} was not yet announced to us.'.format(txid))
                self.log.debug(
                    "node has responded with a notfound message. End test.")
                assert_equal(
                    self.inbound_peer.last_message['notfound'].vec[0].hash,
                    int(txid, 16))
                self.inbound_peer.last_message.pop('notfound')
                break
            else:
                self.log.debug(
                    'tx {} was already announced to us. Try test again.'.
                    format(txid))
                assert int(txid, 16) in [
                    inv.hash
                    for inv in self.inbound_peer.last_message['inv'].inv
                ]
Exemplo n.º 11
0
 def test_getaddednodeinfo(self):
     self.log.info("Test getaddednodeinfo")
     assert_equal(self.nodes[0].getaddednodeinfo(), [])
     # add a node (node2) to node0
     ip_port = "127.0.0.1:{}".format(p2p_port(2))
     self.nodes[0].addnode(node=ip_port, command='add')
     # check that the node has indeed been added
     added_nodes = self.nodes[0].getaddednodeinfo(ip_port)
     assert_equal(len(added_nodes), 1)
     assert_equal(added_nodes[0]['addednode'], ip_port)
     # check that node cannot be added again
     assert_raises_rpc_error(-23, "Node already added", self.nodes[0].addnode, node=ip_port, command='add')
     # check that node can be removed
     self.nodes[0].addnode(node=ip_port, command='remove')
     assert_equal(self.nodes[0].getaddednodeinfo(), [])
     # check that trying to remove the node again returns an error
     assert_raises_rpc_error(-24, "Node could not be removed", self.nodes[0].addnode, node=ip_port, command='remove')
     # check that a non-existent node returns an error
     assert_raises_rpc_error(-24, "Node has not been added", self.nodes[0].getaddednodeinfo, '1.1.1.1')
Exemplo n.º 12
0
    def _test_getaddednodeinfo(self):
        # 判断getaddednodeinfo是否能够返回已添加节点信息
        assert_equal(self.nodes[0].getaddednodeinfo(), [])
        # add a node (node2) to node0
        ip_port = "127.0.0.1:{}".format(p2p_port(2))

        added_nodes = self.nodes[0].getaddednodeinfo(ip_port)

        #添加节点 使用add命令
        self.nodes[0].addnode(ip_port, 'add')

        # check that the node has indeed been added
        # 带参数的如何传递呢
        added_nodes = self.nodes[0].getaddednodeinfo(ip_port)
        assert_equal(len(added_nodes), 1)
        assert_equal(added_nodes[0]['addednode'], ip_port)

        # check that a non-existent node returns an error
        assert_raises_rpc_error(-24, "Node has not been added",
                                self.nodes[0].getaddednodeinfo, '1.1.1.1')
Exemplo n.º 13
0
    def start_node_with_protoconf(self, maxprotocolrecvpayloadlength, recvinvqueuefactor=0):
        """
        This method starts the node and creates a connection to the node.
        It takes care of exchanging protoconf messages between node and python connection.
        Max size of bitcoin nodes' inv message is determined by maxprotocolrecvpayloadlength.
        Max size of python connections' inv message is LEGACY_MAX_PROTOCOL_PAYLOAD_LENGTH.
        If called with parameter recvinvqueuefactor it also sets this setting on the node.

        :returns test_node
        """

        # Start the node with maxprotocolrecvpayloadlength, recvinvqueuefactor (if set)
        start_params = []
        if maxprotocolrecvpayloadlength:
            start_params.append("-maxprotocolrecvpayloadlength={} ".format(maxprotocolrecvpayloadlength))
        if recvinvqueuefactor:
            start_params.append("-recvinvqueuefactor={} ".format(recvinvqueuefactor))
        self.start_node(0, start_params)

        # Create a connection and connect to the node
        test_node = NodeConnCB()
        test_node.wanted_inv_lengths = []
        def on_getdata(conn, message):
            test_node.wanted_inv_lengths.append(len(message.inv))
        test_node.on_getdata = on_getdata
        # Send protoconf message from python node to bitcoind node
        def send_protoconf_default_msg_length(conn):
            conn.send_message(msg_protoconf(CProtoconf(1, LEGACY_MAX_PROTOCOL_PAYLOAD_LENGTH)))
        test_node.send_protoconf = send_protoconf_default_msg_length
        connections = [NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)]
        test_node.add_connection(connections[0])
        NetworkThread().start()  # Start up network handling in another thread

        # Prepare initial block. Needed so that GETDATA can be send back.
        self.nodes[0].generate(1)

        # Receive bitcoind's protoconf and save max_recv_payload_length.
        test_node.wait_for_protoconf()
        test_node.max_recv_payload_length = test_node.last_message["protoconf"].protoconf.max_recv_payload_length

        return test_node
Exemplo n.º 14
0
    def init_test(self):
        ''' Initializes test parameters
        :param:
        :return:
        '''
        title = "*** Starting %s ***" % self.__class__.__name__
        underline = "-" * len(title)
        self.log.info("\n\n%s\n%s\n%s\n", title, underline, self.description)

        # Setup the p2p connections and start up the network thread.
        self.test_nodes = []
        for i in range(self.num_nodes):
            self.test_nodes.append(TestNode())
            self.test_nodes[i].peer_connect('127.0.0.1', p2p_port(i))

        network_thread_start()  # Start up network handling in another thread
        self.node = self.nodes[0]

        # Let the test nodes get in sync
        for i in range(self.num_nodes):
            self.test_nodes[i].wait_for_verack()
Exemplo n.º 15
0
    def run_test(self):
        # Node 0 connects to Node 1, check that the noban permission is not granted
        self.connect_nodes(0, 1)
        peerinfo = self.nodes[1].getpeerinfo()[0]
        assert (not 'noban' in peerinfo['permissions'])

        # Node 0 get banned by Node 1
        self.nodes[1].setban("127.0.0.1", "add")

        # Node 0 should not be able to reconnect
        with self.nodes[1].assert_debug_log(
                expected_msgs=['dropped (banned)\n'], timeout=50):
            self.restart_node(1, [])
            self.nodes[0].addnode("127.0.0.1:" + str(p2p_port(1)), "onetry")

        # However, node 0 should be able to reconnect if it has noban permission
        self.restart_node(1, ['-whitelist=127.0.0.1'])
        self.connect_nodes(0, 1)
        peerinfo = self.nodes[1].getpeerinfo()[0]
        assert ('noban' in peerinfo['permissions'])

        # If we remove the ban, Node 0 should be able to reconnect even without noban permission
        self.nodes[1].setban("127.0.0.1", "remove")
        self.restart_node(1, [])
        self.connect_nodes(0, 1)
        peerinfo = self.nodes[1].getpeerinfo()[0]
        assert (not 'noban' in peerinfo['permissions'])

        self.log.info("Test that a non-IP address can be banned/unbanned")
        node = self.nodes[1]
        tor_addr = "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion"
        ip_addr = "1.2.3.4"
        assert (not self.is_banned(node, tor_addr))
        assert (not self.is_banned(node, ip_addr))
        node.setban(tor_addr, "add")
        assert (self.is_banned(node, tor_addr))
        assert (not self.is_banned(node, ip_addr))
        node.setban(tor_addr, "remove")
        assert (not self.is_banned(self.nodes[1], tor_addr))
        assert (not self.is_banned(node, ip_addr))
    def run_test(self):
        block_count = 0

        # Create a P2P connections
        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        NetworkThread().start()
        node0.wait_for_verack()

        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))
        block = self.chain.next_block(block_count)
        block_count += 1
        node0.send_message(msg_block(block))

        block = self.chain.next_block(block_count)
        self.log.info(f"block hash: {block.hash}")
        self.nodes[0].waitaftervalidatingblock(block.hash, "add")

        # make sure block hash is in waiting list
        wait_for_waiting_blocks({block.hash}, self.nodes[0], self.log)

        self.log.info("sending block")
        node0.send_message(msg_block(block))

        # make sure we started validating block
        wait_for_validating_blocks({block.hash}, self.nodes[0], self.log)

        # sleep a bit and check that in the meantime validation hasn't proceeded
        time.sleep(1)
        assert (block.hash != self.nodes[0].getbestblockhash())

        # after validating the block we release its waiting status
        self.nodes[0].waitaftervalidatingblock(block.hash, "remove")

        # wait till validation of block or blocks finishes
        node0.sync_with_ping()

        assert_equal(block.hash, self.nodes[0].getbestblockhash())
    def run_test(self):
        block_count = 0

        # Create a P2P connection
        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        # send one to get out of IBD state
        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))
        block = self.chain.next_block(block_count)
        block_count += 1
        node0.send_message(msg_block(block))

        self.nodes[0].waitforblockheight(1)

        block_count = self.__test_getdata(node0, block_count)
        block_count = self.__test_getblocks(node0, block_count)
Exemplo n.º 18
0
    def init_test(self):
        ''' Initializes test parameters
        :param:
        :return:
        '''
        self.log.info("\n\n*** Starting %s ***\n------------------------\n%s\n", self.__class__.__name__, self.description)
        # Global Test parameters (override in run_test)
        self.DEFAULT_FEE = 0.1
        # Spam blocks to send in current test
        self.NUM_BLOCKS = 30

        # Setup the p2p connections and start up the network thread.
        self.test_nodes = []
        for i in range(self.num_nodes):
            self.test_nodes.append(TestNode())
            self.test_nodes[i].peer_connect('127.0.0.1', p2p_port(i))

        network_thread_start()  # Start up network handling in another thread
        self.node = self.nodes[0]

        # Let the test nodes get in sync
        for i in range(self.num_nodes):
            self.test_nodes[i].wait_for_verack()
Exemplo n.º 19
0
    def init_test(self):
        ''' Initializes test parameters
        :param:
        :return:
        '''
        self.log.info("\n\n*** Starting %s ***\n------------------------\n%s\n", self.__class__.__name__, self.description)
        # Global Test parameters (override in run_test)
        self.DEFAULT_FEE = 0.1
        # Spam blocks to send in current test
        self.NUM_BLOCKS = 30

        # Setup the p2p connections and start up the network thread.
        self.test_nodes = []
        for i in range(self.num_nodes):
            self.test_nodes.append(TestNode())
            self.test_nodes[i].peer_connect('127.0.0.1', p2p_port(i))

        network_thread_start()  # Start up network handling in another thread
        self.node = self.nodes[0]

        # Let the test nodes get in sync
        for i in range(self.num_nodes):
            self.test_nodes[i].wait_for_verack()
Exemplo n.º 20
0
    def run_test(self):
        node0conn = BaseNode()
        node0conn.add_connection(
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0conn))

        NetworkThread().start()
        node0conn.wait_for_verack()

        node0 = self.nodes[0]

        tip = int(node0.getbestblockhash(), 16)
        height = node0.getblockcount() + 1
        time = node0.getblock(node0.getbestblockhash())['time'] + 1

        blocks = []
        for i in range(NUM_IBD_BLOCKS * 2):
            block = create_block(tip, create_coinbase(height), time)
            block.solve()
            blocks.append(block)
            tip = block.sha256
            height += 1
            time += 1

        # Headers need to be sent in-order
        for b in blocks:
            node0conn.send_header(b)

        # Send blocks in some random order
        for b in random.sample(blocks, len(blocks)):
            node0conn.send_block(b)

        # The node should eventually, completely sync without getting stuck
        def node_synced():
            return node0.getbestblockhash() == blocks[-1].hash

        wait_until(node_synced)
Exemplo n.º 21
0
    def run_test(self):
        """Main test logic"""

        # Create a P2P connection to one of the nodes
        node0 = BaseNode()
        connections = []
        connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
        node0.add_connection(connections[0])

        # Start up network handling in another thread. This needs to be called
        # after the P2P connections have been created.
        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        # Generating a block on one of the nodes will get us out of IBD
        blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
        self.sync_all([self.nodes[0:1]])

        # Notice above how we called an RPC by calling a method with the same
        # name on the node object. Notice also how we used a keyword argument
        # to specify a named RPC argument. Neither of those are defined on the
        # node object. Instead there's some __getattr__() magic going on under
        # the covers to dispatch unrecognised attribute calls to the RPC
        # interface.

        # Logs are nice. Do plenty of them. They can be used in place of comments for
        # breaking the test into sub-sections.
        self.log.info("Starting test!")

        self.log.info("Calling a custom function")
        custom_function()

        self.log.info("Calling a custom method")
        self.custom_method()

        self.log.info("Create some blocks")
        self.tip = int(self.nodes[0].getbestblockhash(), 16)
        self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1

        height = 1

        for i in range(10):
            # Use the mininode and blocktools functionality to manually build a block
            # Calling the generate() rpc is easier, but this allows us to exactly
            # control the blocks and transactions.
            block = create_block(self.tip, create_coinbase(height), self.block_time)
            block.solve()
            block_message = msg_block(block)
            # Send message is used to send a P2P message to the node over our NodeConn connection
            node0.send_message(block_message)
            self.tip = block.sha256
            blocks.append(self.tip)
            self.block_time += 1
            height += 1

        self.log.info("Wait for node1 to reach current tip (height 11) using RPC")
        self.nodes[1].waitforblockheight(11)

        self.log.info("Connect node2 and node1")
        connect_nodes(self.nodes[1], 2)

        self.log.info("Add P2P connection to node2")
        node2 = BaseNode()
        connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))
        node2.add_connection(connections[1])
        node2.wait_for_verack()

        self.log.info("Wait for node2 reach current tip. Test that it has propogated all the blocks to us")

        for block in blocks:
            getdata_request = msg_getdata()
            getdata_request.inv.append(CInv(2, block))
            node2.send_message(getdata_request)

        # wait_until() will loop until a predicate condition is met. Use it to test properties of the
        # NodeConnCB objects.
        assert wait_until(lambda: sorted(blocks) == sorted(list(node2.block_receive_map.keys())), timeout=5)

        self.log.info("Check that each block was received only once")
        # The network thread uses a global lock on data access to the NodeConn objects when sending and receiving
        # messages. The test thread should acquire the global lock before accessing any NodeConn data to avoid locking
        # and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
        with mininode_lock:
            for block in node2.block_receive_map.values():
                assert_equal(block, 1)
Exemplo n.º 22
0
    def run_test(self):
        testnode0 = TestNode()
        connections = []
        connections.append(
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], testnode0,
                     "regtest", OVERWINTER_PROTO_VERSION))
        testnode0.add_connection(connections[0])

        # Start up network handling in another thread
        NetworkThread().start()
        testnode0.wait_for_verack()

        # Verify mininodes are connected to zelcashd nodes
        peerinfo = self.nodes[0].getpeerinfo()
        versions = [x["version"] for x in peerinfo]
        assert_equal(1, versions.count(OVERWINTER_PROTO_VERSION))
        assert_equal(0, peerinfo[0]["banscore"])

        # Mine some blocks so we can spend
        coinbase_blocks = self.nodes[0].generate(200)
        node_address = self.nodes[0].getnewaddress()

        # Sync nodes 0 and 1
        sync_blocks(self.nodes[:2])
        sync_mempools(self.nodes[:2])

        # Verify block count
        assert_equal(self.nodes[0].getblockcount(), 200)
        assert_equal(self.nodes[1].getblockcount(), 200)
        assert_equal(self.nodes[2].getblockcount(), 0)

        # Mininodes send expiring soon transaction in "tx" message to zelcashd node
        self.send_transaction(testnode0, coinbase_blocks[0], node_address, 203)

        # Assert that the tx is not in the mempool (expiring soon)
        assert_equal([], self.nodes[0].getrawmempool())
        assert_equal([], self.nodes[1].getrawmempool())
        assert_equal([], self.nodes[2].getrawmempool())

        # Mininodes send transaction in "tx" message to zelcashd node
        tx2 = self.send_transaction(testnode0, coinbase_blocks[1],
                                    node_address, 204)

        # tx2 is not expiring soon
        assert_equal([tx2.hash], self.nodes[0].getrawmempool())
        assert_equal([tx2.hash], self.nodes[1].getrawmempool())
        # node 2 is isolated
        assert_equal([], self.nodes[2].getrawmempool())

        # Verify txid for tx2
        self.verify_inv(testnode0, tx2)
        self.send_data_message(testnode0, tx2)
        self.verify_last_tx(testnode0, tx2)

        # Sync and mine an empty block with node 2, leaving tx in the mempool of node0 and node1
        for blkhash in coinbase_blocks:
            blk = self.nodes[0].getblock(blkhash, 0)
            self.nodes[2].submitblock(blk)
        self.nodes[2].generate(1)

        # Verify block count
        assert_equal(self.nodes[0].getblockcount(), 200)
        assert_equal(self.nodes[1].getblockcount(), 200)
        assert_equal(self.nodes[2].getblockcount(), 201)

        # Reconnect node 2 to the network
        connect_nodes_bi(self.nodes, 0, 2)

        # Set up test node for node 2
        testnode2 = TestNode()
        connections.append(
            NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], testnode2,
                     "regtest", OVERWINTER_PROTO_VERSION))
        testnode2.add_connection(connections[-1])

        # Verify block count
        sync_blocks(self.nodes[:3])
        assert_equal(self.nodes[0].getblockcount(), 201)
        assert_equal(self.nodes[1].getblockcount(), 201)
        assert_equal(self.nodes[2].getblockcount(), 201)

        # Verify contents of mempool
        assert_equal([tx2.hash], self.nodes[0].getrawmempool())
        assert_equal([tx2.hash], self.nodes[1].getrawmempool())
        assert_equal([], self.nodes[2].getrawmempool())

        # Confirm tx2 cannot be submitted to a mempool because it is expiring soon.
        try:
            rawtx2 = hexlify(tx2.serialize())
            self.nodes[2].sendrawtransaction(rawtx2)
            fail("Sending transaction should have failed")
        except JSONRPCException as e:
            assert_equal(
                "tx-expiring-soon: expiryheight is 204 but should be at least 205 to avoid transaction expiring soon",
                e.error['message'])

        self.send_data_message(testnode0, tx2)

        # Sync up with node after p2p messages delivered
        testnode0.sync_with_ping()

        # Verify node 0 does not reply to "getdata" by sending "tx" message, as tx2 is expiring soon
        with mininode_lock:
            assert_equal(testnode0.last_tx, None)

        # Verify mininode received a "notfound" message containing the txid of tx2
        with mininode_lock:
            msg = testnode0.last_notfound
            assert_equal(len(msg.inv), 1)
            assert_equal(tx2.sha256, msg.inv[0].hash)

        # Create a transaction to verify that processing of "getdata" messages is functioning
        tx3 = self.send_transaction(testnode0, coinbase_blocks[2],
                                    node_address, 999)

        self.send_data_message(testnode0, tx3)
        self.verify_last_tx(testnode0, tx3)
        # Verify txid for tx3 is returned in "inv", but tx2 which is expiring soon is not returned
        self.verify_inv(testnode0, tx3)
        self.verify_inv(testnode2, tx3)

        # Verify contents of mempool
        assert_equal({tx2.hash, tx3.hash}, set(self.nodes[0].getrawmempool()))
        assert_equal({tx2.hash, tx3.hash}, set(self.nodes[1].getrawmempool()))
        assert_equal({tx3.hash}, set(self.nodes[2].getrawmempool()))

        # Verify banscore for nodes are still zero
        assert_equal(
            0, sum(peer["banscore"] for peer in self.nodes[0].getpeerinfo()))
        assert_equal(
            0, sum(peer["banscore"] for peer in self.nodes[2].getpeerinfo()))

        [c.disconnect_node() for c in connections]
Exemplo n.º 23
0
    def run_test(self):
        # Setup the p2p connections and start up the network thread.
        inv_node = TestNode()
        test_node = TestNode()

        self.p2p_connections = [inv_node, test_node]

        connections = [
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node),
            NodeConn('127.0.0.1',
                     p2p_port(0),
                     self.nodes[0],
                     test_node,
                     services=0)
        ]
        # Set nServices to 0 for test_node, so no block download will occur outside of
        # direct fetching
        inv_node.add_connection(connections[0])
        test_node.add_connection(connections[1])

        NetworkThread().start()  # Start up network handling in another thread

        # Test logic begins here
        inv_node.wait_for_verack()
        test_node.wait_for_verack()

        # Ensure verack's have been processed by our peer
        inv_node.sync_with_ping()
        test_node.sync_with_ping()

        tip = int(self.nodes[0].getbestblockhash(), 16)

        # PART 1
        # 1. Mine a block; expect inv announcements each time
        self.log.info(
            "Part 1: headers don't start before sendheaders message...")
        block_time = 0
        for i in range(4):
            old_tip = tip
            tip = self.mine_blocks(1)
            assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
            assert_equal(test_node.check_last_announcement(inv=[tip]), True)
            # Try a few different responses; none should affect next announcement
            if i == 0:
                # first request the block
                test_node.get_data([tip])
                test_node.wait_for_block(tip)
            elif i == 1:
                # next try requesting header and block
                test_node.get_headers(locator=[old_tip], hashstop=tip)
                test_node.get_data([tip])
                test_node.wait_for_block(tip)
                test_node.clear_last_announcement(
                )  # since we requested headers...
            elif i == 2:
                # this time announce own block via headers
                height = self.nodes[0].getblockcount()
                last_time = self.nodes[0].getblock(
                    self.nodes[0].getbestblockhash())['time']
                block_time = last_time + 1
                new_block = create_block(tip, create_coinbase(height + 1),
                                         block_time)
                new_block.solve()
                test_node.send_header_for_blocks([new_block])
                test_node.wait_for_getdata([new_block.sha256])
                test_node.send_message(MsgBlock(new_block))
                test_node.sync_with_ping()  # make sure this block is processed
                inv_node.clear_last_announcement()
                test_node.clear_last_announcement()

        self.log.info("Part 1: success!")
        self.log.info(
            "Part 2: announce blocks with headers after sendheaders message..."
        )
        # PART 2
        # 2. Send a sendheaders message and test that headers announcements
        # commence and keep working.
        test_node.send_message(MsgSendHeaders())
        prev_tip = int(self.nodes[0].getbestblockhash(), 16)
        test_node.get_headers(locator=[prev_tip], hashstop=0)
        test_node.sync_with_ping()

        # Now that we've synced headers, headers announcements should work
        tip = self.mine_blocks(1)
        assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
        assert_equal(test_node.check_last_announcement(headers=[tip]), True)

        height = self.nodes[0].getblockcount() + 1
        block_time += 10  # Advance far enough ahead
        for i in range(10):
            # Mine i blocks, and alternate announcing either via
            # inv (of tip) or via headers. After each, new blocks
            # mined by the node should successfully be announced
            # with block header, even though the blocks are never requested
            for j in range(2):
                blocks = []
                for _ in range(i + 1):
                    blocks.append(
                        create_block(tip, create_coinbase(height), block_time))
                    blocks[-1].solve()
                    tip = blocks[-1].sha256
                    block_time += 1
                    height += 1
                if j == 0:
                    # Announce via inv
                    test_node.send_block_inv(tip)
                    test_node.wait_for_getheaders()
                    # Should have received a getheaders now
                    test_node.send_header_for_blocks(blocks)
                    # Test that duplicate inv's won't result in duplicate
                    # getdata requests, or duplicate headers announcements
                    [inv_node.send_block_inv(x.sha256) for x in blocks]
                    test_node.wait_for_getdata([x.sha256 for x in blocks])
                    inv_node.sync_with_ping()
                else:
                    # Announce via headers
                    test_node.send_header_for_blocks(blocks)
                    test_node.wait_for_getdata([x.sha256 for x in blocks])
                    # Test that duplicate headers won't result in duplicate
                    # getdata requests (the check is further down)
                    inv_node.send_header_for_blocks(blocks)
                    inv_node.sync_with_ping()
                [test_node.send_message(MsgBlock(x)) for x in blocks]
                test_node.sync_with_ping()
                inv_node.sync_with_ping()
                # This block should not be announced to the inv node (since it also
                # broadcast it)
                assert "inv" not in inv_node.last_message
                assert "headers" not in inv_node.last_message
                tip = self.mine_blocks(1)
                assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
                assert_equal(test_node.check_last_announcement(headers=[tip]),
                             True)
                height += 1
                block_time += 1

        self.log.info("Part 2: success!")

        self.log.info(
            "Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer..."
        )

        # PART 3.  Headers announcements can stop after large reorg, and resume after
        # getheaders or inv from peer.
        for j in range(2):
            # First try mining a reorg that can propagate with header announcement
            new_block_hashes = self.mine_reorg(length=7)
            tip = new_block_hashes[-1]
            assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
            assert_equal(
                test_node.check_last_announcement(headers=new_block_hashes),
                True)

            block_time += 8

            # Mine a too-large reorg, which should be announced with a single inv
            new_block_hashes = self.mine_reorg(length=8)
            tip = new_block_hashes[-1]
            assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
            assert_equal(test_node.check_last_announcement(inv=[tip]), True)

            block_time += 9

            fork_point = self.nodes[0].getblock(
                "%02x" % new_block_hashes[0])["previousblockhash"]
            fork_point = int(fork_point, 16)

            # Use getblocks/getdata
            test_node.send_getblocks(locator=[fork_point])
            assert_equal(
                test_node.check_last_announcement(inv=new_block_hashes), True)
            test_node.get_data(new_block_hashes)
            test_node.wait_for_block(new_block_hashes[-1])

            for i in range(3):
                # Mine another block, still should get only an inv
                tip = self.mine_blocks(1)
                assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
                assert_equal(test_node.check_last_announcement(inv=[tip]),
                             True)
                if i == 0:
                    # Just get the data -- shouldn't cause headers announcements to resume
                    test_node.get_data([tip])
                    test_node.wait_for_block(tip)
                elif i == 1:
                    # Send a getheaders message that shouldn't trigger headers announcements
                    # to resume (best header sent will be too old)
                    test_node.get_headers(locator=[fork_point],
                                          hashstop=new_block_hashes[1])
                    test_node.get_data([tip])
                    test_node.wait_for_block(tip)
                elif i == 2:
                    test_node.get_data([tip])
                    test_node.wait_for_block(tip)
                    # This time, try sending either a getheaders to trigger resumption
                    # of headers announcements, or mine a new block and inv it, also
                    # triggering resumption of headers announcements.
                    if j == 0:
                        test_node.get_headers(locator=[tip], hashstop=0)
                        test_node.sync_with_ping()
                    else:
                        test_node.send_block_inv(tip)
                        test_node.sync_with_ping()
            # New blocks should now be announced with header
            tip = self.mine_blocks(1)
            assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
            assert_equal(test_node.check_last_announcement(headers=[tip]),
                         True)

        self.log.info("Part 3: success!")

        self.log.info("Part 4: Testing direct fetch behavior...")
        tip = self.mine_blocks(1)
        height = self.nodes[0].getblockcount() + 1
        last_time = self.nodes[0].getblock(
            self.nodes[0].getbestblockhash())['time']
        block_time = last_time + 1

        # Create 2 blocks.  Send the blocks, then send the headers.
        blocks = []
        for _ in range(2):
            blocks.append(
                create_block(tip, create_coinbase(height), block_time))
            blocks[-1].solve()
            tip = blocks[-1].sha256
            block_time += 1
            height += 1
            inv_node.send_message(MsgBlock(blocks[-1]))

        inv_node.sync_with_ping()  # Make sure blocks are processed
        test_node.last_message.pop("getdata", None)
        test_node.send_header_for_blocks(blocks)
        test_node.sync_with_ping()
        # should not have received any getdata messages
        with mininode_lock:
            assert "getdata" not in test_node.last_message

        # This time, direct fetch should work
        blocks = []
        for _ in range(3):
            blocks.append(
                create_block(tip, create_coinbase(height), block_time))
            blocks[-1].solve()
            tip = blocks[-1].sha256
            block_time += 1
            height += 1

        test_node.send_header_for_blocks(blocks)
        test_node.sync_with_ping()
        test_node.wait_for_getdata([x.sha256 for x in blocks],
                                   timeout=int(direct_fetch_response_time))

        [test_node.send_message(MsgBlock(x)) for x in blocks]

        test_node.sync_with_ping()

        # Now announce a header that forks the last two blocks
        tip = blocks[0].sha256
        height -= 1
        blocks = []

        # Create extra blocks for later
        for _ in range(20):
            blocks.append(
                create_block(tip, create_coinbase(height), block_time))
            blocks[-1].solve()
            tip = blocks[-1].sha256
            block_time += 1
            height += 1

        # Announcing one block on fork should not trigger direct fetch
        # (less work than tip)
        test_node.last_message.pop("getdata", None)
        test_node.send_header_for_blocks(blocks[0:1])
        test_node.sync_with_ping()
        with mininode_lock:
            assert "getdata" not in test_node.last_message

        # Announcing one more block on fork should trigger direct fetch for
        # both blocks (same work as tip)
        test_node.send_header_for_blocks(blocks[1:2])
        test_node.sync_with_ping()
        test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]],
                                   timeout=int(direct_fetch_response_time))

        # Announcing 16 more headers should trigger direct fetch for 14 more
        # blocks
        test_node.send_header_for_blocks(blocks[2:18])
        test_node.sync_with_ping()
        test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]],
                                   timeout=int(direct_fetch_response_time))

        # Announcing 1 more header should not trigger any response
        test_node.last_message.pop("getdata", None)
        test_node.send_header_for_blocks(blocks[18:19])
        test_node.sync_with_ping()
        with mininode_lock:
            assert "getdata" not in test_node.last_message

        self.log.info("Part 4: success!")

        # Now deliver all those blocks we announced.
        [test_node.send_message(MsgBlock(x)) for x in blocks]

        self.log.info("Part 5: Testing handling of unconnecting headers")
        # First we test that receipt of an unconnecting header doesn't prevent
        # chain sync.
        for i in range(10):
            test_node.last_message.pop("getdata", None)
            blocks = []
            # Create two more blocks.
            for j in range(2):
                blocks.append(
                    create_block(tip, create_coinbase(height), block_time))
                blocks[-1].solve()
                tip = blocks[-1].sha256
                block_time += 1
                height += 1
            # Send the header of the second block -> this won't connect.
            with mininode_lock:
                test_node.last_message.pop("getheaders", None)
            test_node.send_header_for_blocks([blocks[1]])
            test_node.wait_for_getheaders()
            test_node.send_header_for_blocks(blocks)
            test_node.wait_for_getdata([x.sha256 for x in blocks])
            [test_node.send_message(MsgBlock(x)) for x in blocks]
            test_node.sync_with_ping()
            assert_equal(int(self.nodes[0].getbestblockhash(), 16),
                         blocks[1].sha256)

        blocks = []
        # Now we test that if we repeatedly don't send connecting headers, we
        # don't go into an infinite loop trying to get them to connect.
        MAX_UNCONNECTING_HEADERS = 10
        for j in range(MAX_UNCONNECTING_HEADERS + 1):
            blocks.append(
                create_block(tip, create_coinbase(height), block_time))
            blocks[-1].solve()
            tip = blocks[-1].sha256
            block_time += 1
            height += 1

        for i in range(1, MAX_UNCONNECTING_HEADERS):
            # Send a header that doesn't connect, check that we get a getheaders.
            with mininode_lock:
                test_node.last_message.pop("getheaders", None)
            test_node.send_header_for_blocks([blocks[i]])
            test_node.wait_for_getheaders()

        # Next header will connect, should re-set our count:
        test_node.send_header_for_blocks([blocks[0]])

        # Remove the first two entries (blocks[1] would connect):
        blocks = blocks[2:]

        # Now try to see how many unconnecting headers we can send
        # before we get disconnected.  Should be 5*MAX_UNCONNECTING_HEADERS
        for i in range(5 * MAX_UNCONNECTING_HEADERS - 1):
            # Send a header that doesn't connect, check that we get a getheaders.
            with mininode_lock:
                test_node.last_message.pop("getheaders", None)
            test_node.send_header_for_blocks([blocks[i % len(blocks)]])
            test_node.wait_for_getheaders()

        # Eventually this stops working.
        test_node.send_header_for_blocks([blocks[-1]])

        # Should get disconnected
        test_node.wait_for_disconnect()

        self.log.info("Part 5: success!")

        # Finally, check that the inv node never received a getdata request,
        # throughout the test
        assert "getdata" not in inv_node.last_message
Exemplo n.º 24
0
    def run_test(self):
        # NLAST_POW_BLOCK = 250 - so mine 125 blocks each node (25 consecutive blocks for 5 times)
        NMATURITY = 100
        self.log.info("Mining 250 blocks (125 with node 0 and 125 with node 1)...")
        for i in range(5):
            self.generateBatchBlocks(0, 25)
            sync_blocks(self.nodes)
            self.generateBatchBlocks(1, 25)
            sync_blocks(self.nodes)
        sync_mempools(self.nodes)

        # Check balances
        balance0 = 250.0 * (125 - 50)
        balance1 = 250.0 * (125 - 50)
        # Last two 25-blocks bursts (for each node) are not mature: NMATURITY = 2 * (2 * 25)
        immature_balance0 = 250.0 * 50
        immature_balance1 = 250.0 * 50
        w_info = self.nodes[0].getwalletinfo()
        assert_equal(w_info["balance"], balance0)
        assert_equal(w_info["immature_balance"], immature_balance0)
        self.log.info("Balance for node 0 checks out: %f [%f]" % (balance0, immature_balance0))
        w_info = self.nodes[1].getwalletinfo()
        assert_equal(w_info["balance"], balance1)
        assert_equal(w_info["immature_balance"], immature_balance1)
        self.log.info("Balance for node 1 checks out: %f [%f]" % (balance1, immature_balance1))
        initial_balance = balance0
        initial_immature_balance = immature_balance0
        initial_unspent = self.nodes[0].listunspent()

        # PoS start reached (block 250) - disconnect nodes
        self.nodes[0].disconnectnode(urllib.parse.urlparse(self.nodes[1].url).hostname + ":" + str(p2p_port(1)))
        self.nodes[1].disconnectnode(urllib.parse.urlparse(self.nodes[0].url).hostname + ":" + str(p2p_port(0)))
        self.log.info("Nodes disconnected")

        # Stake one block with node-0 and save the stake input
        self.log.info("Staking 1 block with node 0...")
        self.nodes[0].generate(1)
        last_block = self.nodes[0].getblock(self.nodes[0].getbestblockhash())
        assert(len(last_block["tx"]) > 1)                                       # a PoS block has at least two txes
        coinstake_txid = last_block["tx"][1]
        coinstake_tx = self.nodes[0].getrawtransaction(coinstake_txid, True)
        assert(coinstake_tx["vout"][0]["scriptPubKey"]["hex"] == "")            # first output of coinstake is empty
        stakeinput = coinstake_tx["vin"][0]

        # The stake input was unspent 1 block ago, now it's not
        res, utxo = self.findUtxoInList(stakeinput["txid"], stakeinput["vout"], initial_unspent)
        assert (res and utxo["spendable"])
        res, utxo = self.findUtxoInList(stakeinput["txid"], stakeinput["vout"], self.nodes[0].listunspent())
        assert (not res or not utxo["spendable"])
        self.log.info("Coinstake input %s...%s-%d is no longer spendable." % (
            stakeinput["txid"][:9], stakeinput["txid"][-4:], stakeinput["vout"]))

        # Stake 10 more blocks with node-0 and check balances
        self.log.info("Staking 10 more blocks with node 0...")
        self.generateBatchBlocks(0, 10)
        balance0 = initial_balance + 0          # mined blocks matured (250*11) - staked blocks inputs (250*11)
        immature_balance0 += 250 * 11           # -mined blocks matured (250*11) + staked blocks (500*11)
        w_info = self.nodes[0].getwalletinfo()
        assert_equal(w_info["balance"], balance0)
        assert_equal(w_info["immature_balance"], immature_balance0)
        self.log.info("Balance for node 0 checks out: %f [%f]" % (balance0, immature_balance0))

        # verify that the stakeinput can't be spent
        rawtx_unsigned = self.nodes[0].createrawtransaction(
            [{"txid": str(stakeinput["txid"]), "vout": int(stakeinput["vout"])}],
            {"xxncEuJK27ygNh7imNfaX8JV6ZQUnoBqzN": 249.99})
        rawtx = self.nodes[0].signrawtransaction(rawtx_unsigned)
        assert(rawtx["complete"])
        assert_raises_rpc_error(-25, "Missing inputs",self.nodes[0].sendrawtransaction, rawtx["hex"])

        # Stake 12 blocks with node-1
        self.log.info("Staking 12 blocks with node 1...")
        self.generateBatchBlocks(1, 12)
        balance1 -= 250 * 12                       # 0 - staked blocks inputs (250*12)
        immature_balance1 += 500 * 12              # + staked blocks (500 * 12)
        w_info = self.nodes[1].getwalletinfo()
        assert_equal(w_info["balance"], balance1)
        assert_equal(w_info["immature_balance"], immature_balance1)
        self.log.info("Balance for node 1 checks out: %f [%f]" % (balance1, immature_balance1))
        new_best_hash = self.nodes[1].getbestblockhash()

        # re-connect and sync nodes and check that node-0 gets on the other chain
        self.log.info("Connecting and syncing nodes...")
        connect_nodes_bi(self.nodes, 0, 1)
        sync_blocks(self.nodes)
        assert_equal(self.nodes[0].getbestblockhash(), new_best_hash)

        # check balance of node-0
        balance0 = initial_balance + 250 * 12                       # + mined blocks matured (250*12)
        immature_balance0 = initial_immature_balance - 250 * 12     # - mined blocks matured (250*12)
        w_info = self.nodes[0].getwalletinfo()
        assert_equal(w_info["balance"], balance0)                   # <--- !!! THIS FAILS before PR #1043
        assert_equal(w_info["immature_balance"], immature_balance0)
        self.log.info("Balance for node 0 checks out: %f [%f]" % (balance0, immature_balance0))

        # check that NOW the original stakeinput is present and spendable
        res, utxo = self.findUtxoInList(stakeinput["txid"], stakeinput["vout"], self.nodes[0].listunspent())
        assert (res and utxo["spendable"])                          # <--- !!! THIS FAILS before PR #1043
        self.log.info("Coinstake input %s...%s-%d is spendable again." % (
            stakeinput["txid"][:9], stakeinput["txid"][-4:], stakeinput["vout"]))
        self.nodes[0].sendrawtransaction(rawtx["hex"])
        self.nodes[1].generate(1)
        sync_blocks(self.nodes)
        res, utxo = self.findUtxoInList(stakeinput["txid"], stakeinput["vout"], self.nodes[0].listunspent())
        assert (not res or not utxo["spendable"])
Exemplo n.º 25
0
 def run_test(self):
     test = TestManager()
     test.add_new_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test))
     NetworkThread().start()  # Start up network handling in another thread
     test.run()
Exemplo n.º 26
0
    def setup_network(self, split=False):
        if self.options.parent_bitcoin and self.options.parent_binpath == "":
            raise Exception(
                "Can't run with --parent_bitcoin without specifying --parent_binpath"
            )

        self.nodes = []
        # Setup parent nodes
        parent_chain = "parent" if not self.options.parent_bitcoin else "regtest"
        parent_binary = [self.options.parent_binpath
                         ] if self.options.parent_binpath != "" else None
        for n in range(2):
            extra_args = [
                "-port=" + str(p2p_port(n)), "-rpcport=" + str(rpc_port(n))
            ]
            if self.options.parent_bitcoin:
                # bitcoind can't read elements.conf config files
                extra_args.extend([
                    "-regtest=1",
                    "-printtoconsole=0",
                    "-server=1",
                    "-discover=0",
                    "-keypool=1",
                    "-listenonion=0",
                    "-addresstype=legacy",  # To make sure bitcoind gives back p2pkh no matter version
                ])
            else:
                extra_args.extend([
                    "-validatepegin=0",
                    "-initialfreecoins=0",
                    "-anyonecanspendaremine",
                    "-signblockscript=51",  # OP_TRUE
                    '-con_blocksubsidy=5000000000',
                ])

            self.add_nodes(1, [extra_args],
                           chain=[parent_chain],
                           binary=parent_binary,
                           chain_in_args=[not self.options.parent_bitcoin])
            self.start_node(n)
            print("Node {} started".format(n))

        connect_nodes_bi(self.nodes, 0, 1)
        self.parentgenesisblockhash = self.nodes[0].getblockhash(0)
        if not self.options.parent_bitcoin:
            parent_pegged_asset = self.nodes[0].getsidechaininfo(
            )['pegged_asset']

        # Setup sidechain nodes
        self.fedpeg_script = "512103dff4923d778550cc13ce0d887d737553b4b58f4e8e886507fc39f5e447b2186451ae"
        for n in range(2):
            extra_args = [
                "-printtoconsole=0",
                "-port=" + str(p2p_port(2 + n)),
                "-rpcport=" + str(rpc_port(2 + n)),
                '-parentgenesisblockhash=%s' % self.parentgenesisblockhash,
                '-validatepegin=1',
                '-fedpegscript=%s' % self.fedpeg_script,
                '-anyonecanspendaremine=0',
                '-minrelaytxfee=0',
                '-blockmintxfee=0',
                '-initialfreecoins=0',
                '-peginconfirmationdepth=10',
                '-mainchainrpchost=127.0.0.1',
                '-mainchainrpcport=%s' % rpc_port(n),
                '-recheckpeginblockinterval=15',  # Long enough to allow failure and repair before timeout
                '-parentpubkeyprefix=111',
                '-parentscriptprefix=196',
            ]
            if not self.options.parent_bitcoin:
                extra_args.extend([
                    '-parentpubkeyprefix=111',
                    '-parentscriptprefix=196',
                    '-con_parent_chain_signblockscript=51',
                    '-con_parent_pegged_asset=%s' % parent_pegged_asset,
                ])

            # Use rpcuser auth only for first parent.
            if n == 0:
                # Extract username and password from cookie file and use directly.
                datadir = get_datadir_path(self.options.tmpdir, n)
                rpc_u, rpc_p = get_auth_cookie(datadir, parent_chain)
                extra_args.extend([
                    '-mainchainrpcuser=%s' % rpc_u,
                    '-mainchainrpcpassword=%s' % rpc_p,
                ])
            else:
                # Need to specify where to find parent cookie file
                datadir = get_datadir_path(self.options.tmpdir, n)
                extra_args.append('-mainchainrpccookiefile=' + datadir + "/" +
                                  parent_chain + "/.cookie")

            self.add_nodes(1, [extra_args], chain=["sidechain"])
            self.start_node(2 + n)
            print("Node {} started".format(2 + n))

        # We only connect the same-chain nodes, so sync_all works correctly
        connect_nodes_bi(self.nodes, 2, 3)
        self.node_groups = [[self.nodes[0], self.nodes[1]],
                            [self.nodes[2], self.nodes[3]]]
        self.sync_all(self.node_groups)
        print("Setting up network done")
Exemplo n.º 27
0
    def run_test(self):
        testnode0 = TestNode()
        connections = []
        connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0],
                                    testnode0, "regtest", OVERWINTER_PROTO_VERSION))
        testnode0.add_connection(connections[0])

        # Start up network handling in another thread
        NetworkThread().start()
        testnode0.wait_for_verack()

        # Verify mininodes are connected to zcashd nodes
        peerinfo = self.nodes[0].getpeerinfo()
        versions = [x["version"] for x in peerinfo]
        assert_equal(1, versions.count(OVERWINTER_PROTO_VERSION))
        assert_equal(0, peerinfo[0]["banscore"])

        # Mine some blocks so we can spend
        coinbase_blocks = self.nodes[0].generate(200)
        node_address = self.nodes[0].getnewaddress()

        # Sync nodes 0 and 1
        sync_blocks(self.nodes[:2])
        sync_mempools(self.nodes[:2])

        # Verify block count
        assert_equal(self.nodes[0].getblockcount(), 200)
        assert_equal(self.nodes[1].getblockcount(), 200)
        assert_equal(self.nodes[2].getblockcount(), 0)

        # Mininodes send expiring soon transaction in "tx" message to zcashd node
        self.send_transaction(testnode0, coinbase_blocks[0], node_address, 203)

        # Assert that the tx is not in the mempool (expiring soon)
        assert_equal([], self.nodes[0].getrawmempool())
        assert_equal([], self.nodes[1].getrawmempool())
        assert_equal([], self.nodes[2].getrawmempool())

        # Mininodes send transaction in "tx" message to zcashd node
        tx2 = self.send_transaction(testnode0, coinbase_blocks[1], node_address, 204)

        # tx2 is not expiring soon
        assert_equal([tx2.hash], self.nodes[0].getrawmempool())
        assert_equal([tx2.hash], self.nodes[1].getrawmempool())
        # node 2 is isolated
        assert_equal([], self.nodes[2].getrawmempool())

        # Verify txid for tx2
        self.verify_inv(testnode0, tx2)
        self.send_data_message(testnode0, tx2)
        self.verify_last_tx(testnode0, tx2)

        # Sync and mine an empty block with node 2, leaving tx in the mempool of node0 and node1
        for blkhash in coinbase_blocks:
            blk = self.nodes[0].getblock(blkhash, 0)
            self.nodes[2].submitblock(blk)
        self.nodes[2].generate(1)

        # Verify block count
        assert_equal(self.nodes[0].getblockcount(), 200)
        assert_equal(self.nodes[1].getblockcount(), 200)
        assert_equal(self.nodes[2].getblockcount(), 201)

        # Reconnect node 2 to the network
        connect_nodes_bi(self.nodes, 0, 2)

        # Set up test node for node 2
        testnode2 = TestNode()
        connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2],
                                    testnode2, "regtest", OVERWINTER_PROTO_VERSION))
        testnode2.add_connection(connections[-1])

        # Verify block count
        sync_blocks(self.nodes[:3])
        assert_equal(self.nodes[0].getblockcount(), 201)
        assert_equal(self.nodes[1].getblockcount(), 201)
        assert_equal(self.nodes[2].getblockcount(), 201)

        # Verify contents of mempool
        assert_equal([tx2.hash], self.nodes[0].getrawmempool())
        assert_equal([tx2.hash], self.nodes[1].getrawmempool())
        assert_equal([], self.nodes[2].getrawmempool())

        # Confirm tx2 cannot be submitted to a mempool because it is expiring soon.
        try:
            rawtx2 = hexlify(tx2.serialize())
            self.nodes[2].sendrawtransaction(rawtx2)
            fail("Sending transaction should have failed")
        except JSONRPCException as e:
            assert_equal(
                "tx-expiring-soon: expiryheight is 204 but should be at least 205 to avoid transaction expiring soon",
                e.error['message']
            )

        self.send_data_message(testnode0, tx2)

        # Sync up with node after p2p messages delivered
        testnode0.sync_with_ping()

        # Verify node 0 does not reply to "getdata" by sending "tx" message, as tx2 is expiring soon
        with mininode_lock:
            assert_equal(testnode0.last_tx, None)

        # Verify mininode received a "notfound" message containing the txid of tx2
        with mininode_lock:
            msg = testnode0.last_notfound
            assert_equal(len(msg.inv), 1)
            assert_equal(tx2.sha256, msg.inv[0].hash)

        # Create a transaction to verify that processing of "getdata" messages is functioning
        tx3 = self.send_transaction(testnode0, coinbase_blocks[2], node_address, 999)

        self.send_data_message(testnode0, tx3)
        self.verify_last_tx(testnode0, tx3)
        # Verify txid for tx3 is returned in "inv", but tx2 which is expiring soon is not returned
        self.verify_inv(testnode0, tx3)
        self.verify_inv(testnode2, tx3)

        # Verify contents of mempool
        assert_equal({tx2.hash, tx3.hash}, set(self.nodes[0].getrawmempool()))
        assert_equal({tx2.hash, tx3.hash}, set(self.nodes[1].getrawmempool()))
        assert_equal({tx3.hash}, set(self.nodes[2].getrawmempool()))

        # Verify banscore for nodes are still zero
        assert_equal(0, sum(peer["banscore"] for peer in self.nodes[0].getpeerinfo()))
        assert_equal(0, sum(peer["banscore"] for peer in self.nodes[2].getpeerinfo()))

        [c.disconnect_node() for c in connections]
Exemplo n.º 28
0
    def run_test(self):
        """Main test logic"""

        # Create a P2P connection to one of the nodes
        node0 = BaseNode()
        connections = []
        connections.append(
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
        node0.add_connection(connections[0])

        # Start up network handling in another thread. This needs to be called
        # after the P2P connections have been created.
        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        # Generating a block on one of the nodes will get us out of IBD
        blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
        self.sync_all([self.nodes[0:1]])

        # Notice above how we called an RPC by calling a method with the same
        # name on the node object. Notice also how we used a keyword argument
        # to specify a named RPC argument. Neither of those are defined on the
        # node object. Instead there's some __getattr__() magic going on under
        # the covers to dispatch unrecognised attribute calls to the RPC
        # interface.

        # Logs are nice. Do plenty of them. They can be used in place of comments for
        # breaking the test into sub-sections.
        self.log.info("Starting test!")

        self.log.info("Calling a custom function")
        custom_function()

        self.log.info("Calling a custom method")
        self.custom_method()

        self.log.info("Create some blocks")
        self.tip = int(self.nodes[0].getbestblockhash(), 16)
        self.block_time = self.nodes[0].getblock(
            self.nodes[0].getbestblockhash())['time'] + 1

        height = 1

        for i in range(10):
            # Use the mininode and blocktools functionality to manually build a block
            # Calling the generate() rpc is easier, but this allows us to exactly
            # control the blocks and transactions.
            block = create_block(self.tip, create_coinbase(height),
                                 self.block_time)
            block.solve()
            block_message = msg_block(block)
            # Send message is used to send a P2P message to the node over our NodeConn connection
            node0.send_message(block_message)
            self.tip = block.sha256
            blocks.append(self.tip)
            self.block_time += 1
            height += 1

        self.log.info(
            "Wait for node1 to reach current tip (height 11) using RPC")
        self.nodes[1].waitforblockheight(11)

        self.log.info("Connect node2 and node1")
        connect_nodes(self.nodes[1], self.nodes[2])

        self.log.info("Add P2P connection to node2")
        node2 = BaseNode()
        connections.append(
            NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))
        node2.add_connection(connections[1])
        node2.wait_for_verack()

        self.log.info(
            "Wait for node2 reach current tip. Test that it has propagated all the blocks to us"
        )

        getdata_request = msg_getdata()
        for block in blocks:
            getdata_request.inv.append(CInv(2, block))
        node2.send_message(getdata_request)

        # wait_until() will loop until a predicate condition is met. Use it to test properties of the
        # NodeConnCB objects.
        wait_until(lambda: sorted(blocks) == sorted(
            list(node2.block_receive_map.keys())),
                   timeout=5,
                   lock=mininode_lock)

        self.log.info("Check that each block was received only once")
        # The network thread uses a global lock on data access to the NodeConn objects when sending and receiving
        # messages. The test thread should acquire the global lock before accessing any NodeConn data to avoid locking
        # and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
        with mininode_lock:
            for block in node2.block_receive_map.values():
                assert_equal(block, 1)
Exemplo n.º 29
0
    def run_test(self):
        self.test_node = TestNode()
        self.test_node.add_connection(
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node))
        NetworkThread().start()  # Start up network handling in another thread
        self.test_node.wait_for_verack()

        self.confirm_mns()

        null_hash = format(0, "064x")

        # Check if a diff with the genesis block as base returns all MNs
        expectedUpdated = [mn.proTxHash for mn in self.mninfo]
        mnList = self.test_getmnlistdiff(null_hash,
                                         self.nodes[0].getbestblockhash(), {},
                                         [], expectedUpdated)
        expectedUpdated2 = expectedUpdated + []

        # Register one more MN, but don't start it (that would fail as AxeTestFramework doesn't support this atm)
        baseBlockHash = self.nodes[0].getbestblockhash()
        self.prepare_masternode(self.mn_count)
        new_mn = self.mninfo[self.mn_count]

        # Now test if that MN appears in a diff when the base block is the one just before MN registration
        expectedDeleted = []
        expectedUpdated = [new_mn.proTxHash]
        mnList = self.test_getmnlistdiff(baseBlockHash,
                                         self.nodes[0].getbestblockhash(),
                                         mnList, expectedDeleted,
                                         expectedUpdated)
        assert (mnList[new_mn.proTxHash].confirmedHash == 0)
        # Now let the MN get enough confirmations and verify that the MNLISTDIFF now has confirmedHash != 0
        self.confirm_mns()
        mnList = self.test_getmnlistdiff(baseBlockHash,
                                         self.nodes[0].getbestblockhash(),
                                         mnList, expectedDeleted,
                                         expectedUpdated)
        assert (mnList[new_mn.proTxHash].confirmedHash != 0)

        # Spend the collateral of the previously added MN and test if it appears in "deletedMNs"
        expectedDeleted = [new_mn.proTxHash]
        expectedUpdated = []
        baseBlockHash2 = self.nodes[0].getbestblockhash()
        self.remove_mastermode(self.mn_count)
        mnList = self.test_getmnlistdiff(baseBlockHash2,
                                         self.nodes[0].getbestblockhash(),
                                         mnList, expectedDeleted,
                                         expectedUpdated)

        # When comparing genesis and best block, we shouldn't see the previously added and then deleted MN
        mnList = self.test_getmnlistdiff(null_hash,
                                         self.nodes[0].getbestblockhash(), {},
                                         [], expectedUpdated2)

        #############################
        # Now start testing quorum commitment merkle roots

        self.nodes[0].generate(1)
        oldhash = self.nodes[0].getbestblockhash()
        # Test DIP8 activation once with a pre-existing quorum and once without (we don't know in which order it will activate on mainnet)
        self.test_dip8_quorum_merkle_root_activation(True)
        for n in self.nodes:
            n.invalidateblock(oldhash)
        self.sync_all()
        first_quorum = self.test_dip8_quorum_merkle_root_activation(False)

        self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
        self.wait_for_sporks_same()

        # Verify that the first quorum appears in MNLISTDIFF
        expectedDeleted = []
        expectedNew = [QuorumId(100, int(first_quorum, 16))]
        quorumList = self.test_getmnlistdiff_quorums(
            null_hash, self.nodes[0].getbestblockhash(), {}, expectedDeleted,
            expectedNew)
        baseBlockHash = self.nodes[0].getbestblockhash()

        second_quorum = self.mine_quorum()

        # Verify that the second quorum appears in MNLISTDIFF
        expectedDeleted = []
        expectedNew = [QuorumId(100, int(second_quorum, 16))]
        quorums_before_third = self.test_getmnlistdiff_quorums(
            baseBlockHash, self.nodes[0].getbestblockhash(), quorumList,
            expectedDeleted, expectedNew)
        block_before_third = self.nodes[0].getbestblockhash()

        third_quorum = self.mine_quorum()

        # Verify that the first quorum is deleted and the third quorum is added in MNLISTDIFF (the first got inactive)
        expectedDeleted = [QuorumId(100, int(first_quorum, 16))]
        expectedNew = [QuorumId(100, int(third_quorum, 16))]
        self.test_getmnlistdiff_quorums(block_before_third,
                                        self.nodes[0].getbestblockhash(),
                                        quorums_before_third, expectedDeleted,
                                        expectedNew)

        # Verify that the diff between genesis and best block is the current active set (second and third quorum)
        expectedDeleted = []
        expectedNew = [
            QuorumId(100, int(second_quorum, 16)),
            QuorumId(100, int(third_quorum, 16))
        ]
        self.test_getmnlistdiff_quorums(null_hash,
                                        self.nodes[0].getbestblockhash(), {},
                                        expectedDeleted, expectedNew)

        # Now verify that diffs are correct around the block that mined the third quorum.
        # This tests the logic in CalcCbTxMerkleRootQuorums, which has to manually add the commitment from the current
        # block
        mined_in_block = self.nodes[0].quorum("info", 100,
                                              third_quorum)["minedBlock"]
        prev_block = self.nodes[0].getblock(
            mined_in_block)["previousblockhash"]
        prev_block2 = self.nodes[0].getblock(prev_block)["previousblockhash"]
        next_block = self.nodes[0].getblock(mined_in_block)["nextblockhash"]
        next_block2 = self.nodes[0].getblock(mined_in_block)["nextblockhash"]
        # The 2 block before the quorum was mined should both give an empty diff
        expectedDeleted = []
        expectedNew = []
        self.test_getmnlistdiff_quorums(block_before_third, prev_block2,
                                        quorums_before_third, expectedDeleted,
                                        expectedNew)
        self.test_getmnlistdiff_quorums(block_before_third, prev_block,
                                        quorums_before_third, expectedDeleted,
                                        expectedNew)
        # The block in which the quorum was mined and the 2 after that should all give the same diff
        expectedDeleted = [QuorumId(100, int(first_quorum, 16))]
        expectedNew = [QuorumId(100, int(third_quorum, 16))]
        quorums_with_third = self.test_getmnlistdiff_quorums(
            block_before_third, mined_in_block, quorums_before_third,
            expectedDeleted, expectedNew)
        self.test_getmnlistdiff_quorums(block_before_third, next_block,
                                        quorums_before_third, expectedDeleted,
                                        expectedNew)
        self.test_getmnlistdiff_quorums(block_before_third, next_block2,
                                        quorums_before_third, expectedDeleted,
                                        expectedNew)
        # A diff between the two block that happened after the quorum was mined should give an empty diff
        expectedDeleted = []
        expectedNew = []
        self.test_getmnlistdiff_quorums(mined_in_block, next_block,
                                        quorums_with_third, expectedDeleted,
                                        expectedNew)
        self.test_getmnlistdiff_quorums(mined_in_block, next_block2,
                                        quorums_with_third, expectedDeleted,
                                        expectedNew)
        self.test_getmnlistdiff_quorums(next_block, next_block2,
                                        quorums_with_third, expectedDeleted,
                                        expectedNew)

        # Using the same block for baseBlockHash and blockHash should give empty diffs
        self.test_getmnlistdiff_quorums(prev_block, prev_block,
                                        quorums_before_third, expectedDeleted,
                                        expectedNew)
        self.test_getmnlistdiff_quorums(prev_block2, prev_block2,
                                        quorums_before_third, expectedDeleted,
                                        expectedNew)
        self.test_getmnlistdiff_quorums(mined_in_block, mined_in_block,
                                        quorums_with_third, expectedDeleted,
                                        expectedNew)
        self.test_getmnlistdiff_quorums(next_block, next_block,
                                        quorums_with_third, expectedDeleted,
                                        expectedNew)
        self.test_getmnlistdiff_quorums(next_block2, next_block2,
                                        quorums_with_third, expectedDeleted,
                                        expectedNew)
Exemplo n.º 30
0
    def run_test(self):

        # Connect to node0
        node0 = BaseNode()
        connections = []
        connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
        node0.add_connection(connections[0])

        NetworkThread().start()  # Start up network handling in another thread
        node0.wait_for_verack()

        # Build the blockchain
        self.tip = int(self.nodes[0].getbestblockhash(), 16)
        self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1

        self.blocks = []

        # Get a pubkey for the coinbase TXO
        coinbase_key = CECKey()
        coinbase_key.set_secretbytes(b"horsebattery")
        coinbase_pubkey = coinbase_key.get_pubkey()

        # Create the first block with a coinbase output to our key
        height = 1
        block = create_block(self.tip, create_coinbase(height, coinbase_pubkey), self.block_time)
        self.blocks.append(block)
        self.block_time += 1
        block.solve()
        # Save the coinbase for later
        self.block1 = block
        self.tip = block.sha256
        height += 1

        # Bury the block 100 deep so the coinbase output is spendable
        for i in range(100):
            block = create_block(self.tip, create_coinbase(height), self.block_time)
            block.solve()
            self.blocks.append(block)
            self.tip = block.sha256
            self.block_time += 1
            height += 1

        # Create a transaction spending the coinbase output with an invalid (null) signature
        tx = CTransaction()
        tx.vin.append(CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b""))
        tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE])))
        tx.calc_sha256()

        block102 = create_block(self.tip, create_coinbase(height), self.block_time)
        self.block_time += 1
        block102.vtx.extend([tx])
        block102.hashMerkleRoot = block102.calc_merkle_root()
        block102.rehash()
        block102.solve()
        self.blocks.append(block102)
        self.tip = block102.sha256
        self.block_time += 1
        height += 1

        # Bury the assumed valid block 2100 deep
        for i in range(2100):
            block = create_block(self.tip, create_coinbase(height), self.block_time)
            block.nVersion = 4
            block.solve()
            self.blocks.append(block)
            self.tip = block.sha256
            self.block_time += 1
            height += 1

        # Start node1 and node2 with assumevalid so they accept a block with a bad signature.
        self.start_node(1, extra_args=["-assumevalid=" + hex(block102.sha256)])
        node1 = BaseNode()  # connects to node1
        connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], node1))
        node1.add_connection(connections[1])
        node1.wait_for_verack()

        self.start_node(2, extra_args=["-assumevalid=" + hex(block102.sha256)])
        node2 = BaseNode()  # connects to node2
        connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))
        node2.add_connection(connections[2])
        node2.wait_for_verack()

        # send header lists to all three nodes
        node0.send_header_for_blocks(self.blocks[0:2000])
        node0.send_header_for_blocks(self.blocks[2000:])
        node1.send_header_for_blocks(self.blocks[0:2000])
        node1.send_header_for_blocks(self.blocks[2000:])
        node2.send_header_for_blocks(self.blocks[0:200])

        # Send blocks to node0. Block 102 will be rejected.
        self.send_blocks_until_disconnected(node0)
        self.assert_blockchain_height(self.nodes[0], 101)

        # Send all blocks to node1. All blocks will be accepted.
        for i in range(2202):
            node1.send_message(msg_block(self.blocks[i]))
        # Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync.
        node1.sync_with_ping(120)
        assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202)

        # Send blocks to node2. Block 102 will be rejected.
        self.send_blocks_until_disconnected(node2)
        self.assert_blockchain_height(self.nodes[2], 101)
Exemplo n.º 31
0
    def run_test(self):
        # Setup the p2p connections and start up the network thread.
        test_node = TestNode()   # connects to node0 (not whitelisted)
        white_node = TestNode()  # connects to node1 (whitelisted)

        connections = []
        connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
        connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
        test_node.add_connection(connections[0])
        white_node.add_connection(connections[1])

        NetworkThread().start() # Start up network handling in another thread

        # Test logic begins here
        test_node.wait_for_verack()
        white_node.wait_for_verack()

        # 1. Have both nodes mine a block (leave IBD)
        [ n.generate(1) for n in self.nodes ]
        tips = [ int ("0x" + n.getbestblockhash() + "L", 0) for n in self.nodes ]

        # 2. Send one block that builds on each tip.
        # This should be accepted.
        blocks_h2 = []  # the height 2 blocks on each node's chain
        block_time = time.time() + 1
        for i in xrange(2):
            blocks_h2.append(create_block(tips[i], create_coinbase(), block_time))
            blocks_h2[i].solve()
            block_time += 1
        test_node.send_message(msg_block(blocks_h2[0]))
        white_node.send_message(msg_block(blocks_h2[1]))

        [ x.sync_with_ping() for x in [test_node, white_node] ]
        assert_equal(self.nodes[0].getblockcount(), 2)
        assert_equal(self.nodes[1].getblockcount(), 2)
        print "First height 2 block accepted by both nodes"

        # 3. Send another block that builds on the original tip.
        blocks_h2f = []  # Blocks at height 2 that fork off the main chain
        for i in xrange(2):
            blocks_h2f.append(create_block(tips[i], create_coinbase(), blocks_h2[i].nTime+1))
            blocks_h2f[i].solve()
        test_node.send_message(msg_block(blocks_h2f[0]))
        white_node.send_message(msg_block(blocks_h2f[1]))

        [ x.sync_with_ping() for x in [test_node, white_node] ]
        for x in self.nodes[0].getchaintips():
            if x['hash'] == blocks_h2f[0].hash:
                assert_equal(x['status'], "headers-only")

        for x in self.nodes[1].getchaintips():
            if x['hash'] == blocks_h2f[1].hash:
                assert_equal(x['status'], "valid-headers")

        print "Second height 2 block accepted only from whitelisted peer"

        # 4. Now send another block that builds on the forking chain.
        blocks_h3 = []
        for i in xrange(2):
            blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(), blocks_h2f[i].nTime+1))
            blocks_h3[i].solve()
        test_node.send_message(msg_block(blocks_h3[0]))
        white_node.send_message(msg_block(blocks_h3[1]))

        [ x.sync_with_ping() for x in [test_node, white_node] ]
        # Since the earlier block was not processed by node0, the new block
        # can't be fully validated.
        for x in self.nodes[0].getchaintips():
            if x['hash'] == blocks_h3[0].hash:
                assert_equal(x['status'], "headers-only")

        # But this block should be accepted by node0 since it has more work.
        try:
            self.nodes[0].getblock(blocks_h3[0].hash)
            print "Unrequested more-work block accepted from non-whitelisted peer"
        except:
            raise AssertionError("Unrequested more work block was not processed")

        # Node1 should have accepted and reorged.
        assert_equal(self.nodes[1].getblockcount(), 3)
        print "Successfully reorged to length 3 chain from whitelisted peer"

        # 4b. Now mine 288 more blocks and deliver; all should be processed but
        # the last (height-too-high) on node0.  Node1 should process the tip if
        # we give it the headers chain leading to the tip.
        tips = blocks_h3
        headers_message = msg_headers()
        all_blocks = []   # node0's blocks
        for j in xrange(2):
            for i in xrange(288):
                next_block = create_block(tips[j].sha256, create_coinbase(), tips[j].nTime+1)
                next_block.solve()
                if j==0:
                    test_node.send_message(msg_block(next_block))
                    all_blocks.append(next_block)
                else:
                    headers_message.headers.append(CBlockHeader(next_block))
                tips[j] = next_block

        time.sleep(2)
        for x in all_blocks:
            try:
                self.nodes[0].getblock(x.hash)
                if x == all_blocks[287]:
                    raise AssertionError("Unrequested block too far-ahead should have been ignored")
            except:
                if x == all_blocks[287]:
                    print "Unrequested block too far-ahead not processed"
                else:
                    raise AssertionError("Unrequested block with more work should have been accepted")

        headers_message.headers.pop() # Ensure the last block is unrequested
        white_node.send_message(headers_message) # Send headers leading to tip
        white_node.send_message(msg_block(tips[1]))  # Now deliver the tip
        try:
            white_node.sync_with_ping()
            self.nodes[1].getblock(tips[1].hash)
            print "Unrequested block far ahead of tip accepted from whitelisted peer"
        except:
            raise AssertionError("Unrequested block from whitelisted peer not accepted")

        # 5. Test handling of unrequested block on the node that didn't process
        # Should still not be processed (even though it has a child that has more
        # work).
        test_node.send_message(msg_block(blocks_h2f[0]))

        # Here, if the sleep is too short, the test could falsely succeed (if the
        # node hasn't processed the block by the time the sleep returns, and then
        # the node processes it and incorrectly advances the tip).
        # But this would be caught later on, when we verify that an inv triggers
        # a getdata request for this block.
        test_node.sync_with_ping()
        assert_equal(self.nodes[0].getblockcount(), 2)
        print "Unrequested block that would complete more-work chain was ignored"

        # 6. Try to get node to request the missing block.
        # Poke the node with an inv for block at height 3 and see if that
        # triggers a getdata on block 2 (it should if block 2 is missing).
        with mininode_lock:
            # Clear state so we can check the getdata request
            test_node.last_getdata = None
            test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))

        test_node.sync_with_ping()
        with mininode_lock:
            getdata = test_node.last_getdata

        # Check that the getdata includes the right block
        assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
        print "Inv at tip triggered getdata for unprocessed block"

        # 7. Send the missing block for the third time (now it is requested)
        test_node.send_message(msg_block(blocks_h2f[0]))

        test_node.sync_with_ping()
        assert_equal(self.nodes[0].getblockcount(), 290)
        print "Successfully reorged to longer chain from non-whitelisted peer"

        [ c.disconnect_node() for c in connections ]
Exemplo n.º 32
0
    def run_test(self):
        # Set up test nodes.
        # - test_nodes[0] will only request v4 transactions
        # - test_nodes[1] will only request v5 transactions
        # - test_nodes[2] will test invalid v4 request using MSG_WTXID
        # - test_nodes[3] will test invalid v5 request using MSG_TX
        test_nodes = []
        connections = []

        for i in range(4):
            test_nodes.append(TestNode())
            connections.append(
                NodeConn('127.0.0.1',
                         p2p_port(0),
                         self.nodes[0],
                         test_nodes[i],
                         protocol_version=NU5_PROTO_VERSION))
            test_nodes[i].add_connection(connections[i])

        NetworkThread().start()  # Start up network handling in another thread
        [x.wait_for_verack() for x in test_nodes]

        net_version = self.nodes[0].getnetworkinfo()["protocolversion"]
        if net_version < NU5_PROTO_VERSION:
            # Sending a getdata message containing a MSG_WTX CInv message type
            # results in a reject message.
            self.verify_invalid_cinv(
                test_nodes[0],
                connections[0],
                5,
                "Negotiated protocol version does not support CInv message type MSG_WTX",
            )

            # Sending a getdata message containing an invalid CInv message type
            # results in a reject message.
            self.verify_invalid_cinv(test_nodes[1], connections[1], 0xffff,
                                     "Unknown CInv message type")

            print(
                "Node's block index is not NU5-aware, skipping remaining tests"
            )
            return

        # Load funds into the Sprout address
        sproutzaddr = self.nodes[0].z_getnewaddress('sprout')
        result = self.nodes[2].z_shieldcoinbase("*", sproutzaddr, 0)
        wait_and_assert_operationid_status(self.nodes[2], result['opid'])
        self.sync_all()
        self.nodes[1].generate(1)
        self.sync_all()

        # Activate NU5. Block height after this is 210.
        self.nodes[0].generate(9)
        self.sync_all()

        # Add v4 transaction to the mempool.
        node1_taddr = self.nodes[1].getnewaddress()
        opid = self.nodes[0].z_sendmany(sproutzaddr, [{
            'address': node1_taddr,
            'amount': 1,
        }])
        v4_txid = uint256_from_str(
            hex_str_to_bytes(
                wait_and_assert_operationid_status(self.nodes[0], opid))[::-1])

        # Add v5 transaction to the mempool.
        v5_txid = self.nodes[0].sendtoaddress(node1_taddr, 1, "", "", True)
        v5_tx = self.nodes[0].getrawtransaction(v5_txid, 1)
        assert_equal(v5_tx['version'], 5)
        v5_txid = uint256_from_str(hex_str_to_bytes(v5_txid)[::-1])
        v5_auth_digest = uint256_from_str(
            hex_str_to_bytes(v5_tx['authdigest'])[::-1])

        # Wait for the mempools to sync.
        self.sync_all()

        #
        # inv
        #

        # On a mempool request, nodes should return an inv message containing:
        # - the v4 tx, with type MSG_TX.
        # - the v5 tx, with type MSG_WTX.
        for testnode in test_nodes:
            self.verify_inv(testnode, [
                self.cinv_for(v4_txid),
                self.cinv_for(v5_txid, v5_auth_digest),
            ])

        #
        # getdata
        #

        # We can request a v4 transaction with MSG_TX.
        self.send_data_message(test_nodes[0], v4_txid)
        self.verify_last_tx(test_nodes[0], v4_txid)

        # We can request a v5 transaction with MSG_WTX.
        self.send_data_message(test_nodes[1], v5_txid, v5_auth_digest)
        self.verify_last_tx(test_nodes[1], v5_txid, v5_auth_digest)

        # Requesting with a different authDigest results in a notfound.
        self.send_data_message(test_nodes[1], v5_txid, 1)
        self.verify_last_notfound(test_nodes[1], v5_txid, 1)

        # Requesting a v4 transaction with MSG_WTX causes a disconnect.
        self.send_data_message(test_nodes[2], v4_txid, (1 << 256) - 1)
        self.verify_disconnected(test_nodes[2])

        # Requesting a v5 transaction with MSG_TX causes a disconnect.
        self.send_data_message(test_nodes[3], v5_txid)
        self.verify_disconnected(test_nodes[3])

        # Sending a getdata message containing an invalid CInv message type
        # results in a reject message.
        self.verify_invalid_cinv(test_nodes[0], connections[0], 0xffff,
                                 "Unknown CInv message type")

        [c.disconnect_node() for c in connections]
        self.log_stderr.close()
Exemplo n.º 33
0
    def run_test(self):
        test = TestManager()

        # Launch Sprout, Overwinter, and Sapling mininodes
        nodes = []
        for x in xrange(10):
            nodes.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0],
                test, "regtest", SPROUT_PROTO_VERSION))
            nodes.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0],
                test, "regtest", OVERWINTER_PROTO_VERSION))
            nodes.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0],
                test, "regtest", SAPLING_PROTO_VERSION))

        # Start up network handling in another thread
        NetworkThread().start()

        # Sprout consensus rules apply at block height 9
        self.nodes[0].generate(9)
        assert_equal(9, self.nodes[0].getblockcount())

        # Verify mininodes are still connected to zcashd node
        peerinfo = self.nodes[0].getpeerinfo()
        versions = [x["version"] for x in peerinfo]
        assert_equal(10, versions.count(SPROUT_PROTO_VERSION))
        assert_equal(10, versions.count(OVERWINTER_PROTO_VERSION))
        assert_equal(10, versions.count(SAPLING_PROTO_VERSION))

        # Overwinter consensus rules activate at block height 10
        self.nodes[0].generate(1)
        assert_equal(10, self.nodes[0].getblockcount())
        print('Overwinter active')

        # Mininodes send ping message to zcashd node.
        pingCounter = 1
        for node in nodes:
            node.send_message(msg_ping(pingCounter))
            pingCounter = pingCounter + 1

        time.sleep(3)

        # Verify Sprout mininodes have been dropped, while Overwinter and
        # Sapling mininodes are still connected.
        peerinfo = self.nodes[0].getpeerinfo()
        versions = [x["version"] for x in peerinfo]
        assert_equal(0, versions.count(SPROUT_PROTO_VERSION))
        assert_equal(10, versions.count(OVERWINTER_PROTO_VERSION))
        assert_equal(10, versions.count(SAPLING_PROTO_VERSION))

        # Extend the Overwinter chain with another block.
        self.nodes[0].generate(1)

        # Connect a new Overwinter mininode to the zcashd node, which is accepted.
        nodes.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test, "regtest", OVERWINTER_PROTO_VERSION))
        time.sleep(3)
        assert_equal(21, len(self.nodes[0].getpeerinfo()))

        # Connect a new Sapling mininode to the zcashd node, which is accepted.
        nodes.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test, "regtest", SAPLING_PROTO_VERSION))
        time.sleep(3)
        assert_equal(22, len(self.nodes[0].getpeerinfo()))

        # Try to connect a new Sprout mininode to the zcashd node, which is rejected.
        sprout = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test, "regtest", SPROUT_PROTO_VERSION)
        nodes.append(sprout)
        time.sleep(3)
        assert("Version must be 170003 or greater" in str(sprout.rejectMessage))

        # Verify that only Overwinter and Sapling mininodes are connected.
        peerinfo = self.nodes[0].getpeerinfo()
        versions = [x["version"] for x in peerinfo]
        assert_equal(0, versions.count(SPROUT_PROTO_VERSION))
        assert_equal(11, versions.count(OVERWINTER_PROTO_VERSION))
        assert_equal(11, versions.count(SAPLING_PROTO_VERSION))

        # Sapling consensus rules activate at block height 15
        self.nodes[0].generate(4)
        assert_equal(15, self.nodes[0].getblockcount())
        print('Sapling active')

        # Mininodes send ping message to zcashd node.
        pingCounter = 1
        for node in nodes:
            node.send_message(msg_ping(pingCounter))
            pingCounter = pingCounter + 1

        time.sleep(3)

        # Verify Sprout and Overwinter mininodes have been dropped, while
        # Sapling mininodes are still connected.
        peerinfo = self.nodes[0].getpeerinfo()
        versions = [x["version"] for x in peerinfo]
        assert_equal(0, versions.count(SPROUT_PROTO_VERSION))
        assert_equal(0, versions.count(OVERWINTER_PROTO_VERSION))
        assert_equal(11, versions.count(SAPLING_PROTO_VERSION))

        # Extend the Sapling chain with another block.
        self.nodes[0].generate(1)

        # Connect a new Sapling mininode to the zcashd node, which is accepted.
        nodes.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test, "regtest", SAPLING_PROTO_VERSION))
        time.sleep(3)
        assert_equal(12, len(self.nodes[0].getpeerinfo()))

        # Try to connect a new Sprout mininode to the zcashd node, which is rejected.
        sprout = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test, "regtest", SPROUT_PROTO_VERSION)
        nodes.append(sprout)
        time.sleep(3)
        assert("Version must be 170006 or greater" in str(sprout.rejectMessage))

        # Try to connect a new Overwinter mininode to the zcashd node, which is rejected.
        sprout = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test, "regtest", OVERWINTER_PROTO_VERSION)
        nodes.append(sprout)
        time.sleep(3)
        assert("Version must be 170006 or greater" in str(sprout.rejectMessage))

        # Verify that only Sapling mininodes are connected.
        peerinfo = self.nodes[0].getpeerinfo()
        versions = [x["version"] for x in peerinfo]
        assert_equal(0, versions.count(SPROUT_PROTO_VERSION))
        assert_equal(0, versions.count(OVERWINTER_PROTO_VERSION))
        assert_equal(12, versions.count(SAPLING_PROTO_VERSION))

        for node in nodes:
            node.disconnect_node()
    def run_test(self):
        block_count = 0

        # Create a P2P connections
        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        node1 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node1)
        node1.add_connection(connection)

        node2 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node2)
        node2.add_connection(connection)

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()
        node1.wait_for_verack()
        node2.wait_for_verack()

        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))
        block = self.chain.next_block(block_count)
        block_count += 1
        self.chain.save_spendable_output()
        node0.send_message(msg_block(block))

        for i in range(100):
            block = self.chain.next_block(block_count)
            block_count += 1
            self.chain.save_spendable_output()
            node0.send_message(msg_block(block))

        out = []
        for i in range(100):
            out.append(self.chain.get_spendable_output())

        self.log.info("waiting for block height 101 via rpc")
        self.nodes[0].waitforblockheight(101)

        tip_block_num = block_count - 1

        # adding extra transactions to get different block hashes
        block2_hard = self.chain.next_block(block_count, spend=out[0], extra_txns=8)
        block_count += 1

        self.chain.set_tip(tip_block_num)

        block3_easier = self.chain.next_block(block_count, spend=out[0], extra_txns=2)
        block_count += 1

        self.chain.set_tip(tip_block_num)

        block4_hard = self.chain.next_block(block_count, spend=out[0], extra_txns=10)
        block_count += 1

        # send two "hard" blocks, with waitaftervalidatingblock we artificially
        # extend validation time.
        self.log.info(f"hard block2 hash: {block2_hard.hash}")
        self.nodes[0].waitaftervalidatingblock(block2_hard.hash, "add")
        self.log.info(f"hard block4 hash: {block4_hard.hash}")
        self.nodes[0].waitaftervalidatingblock(block4_hard.hash, "add")
        # make sure block hashes are in waiting list
        wait_for_waiting_blocks({block2_hard.hash, block4_hard.hash}, self.nodes[0], self.log)

        node0.send_message(msg_block(block2_hard))
        node1.send_message(msg_block(block4_hard))

        # make sure we started validating blocks
        wait_for_validating_blocks({block2_hard.hash, block4_hard.hash}, self.nodes[0], self.log)

        self.log.info(f"easier block3 hash: {block3_easier.hash}")
        node2.send_message(msg_block(block3_easier))

        self.nodes[0].waitforblockheight(102)
        assert_equal(block3_easier.hash, self.nodes[0].getbestblockhash())

        # now we can remove waiting status from blocks and finish their validation
        self.nodes[0].waitaftervalidatingblock(block2_hard.hash, "remove")
        self.nodes[0].waitaftervalidatingblock(block4_hard.hash, "remove")

        # wait till validation of block or blocks finishes
        node0.sync_with_ping()

        # easier block should still be on tip
        assert_equal(block3_easier.hash, self.nodes[0].getbestblockhash())
Exemplo n.º 35
0
    def run_test(self):
        """Main test logic"""

        # Create a P2P connection to one of the nodes
        node0 = BaseNode()
        connections = [
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        ]
        node0.add_connection(connections[0])

        # Start up network handling in another thread. This needs to be called
        # after the P2P connections have been created.
        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        # Generating a block on one of the nodes will get us out of IBD
        blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
        self.sync_all([self.nodes[0:1]])

        # Notice above how we called an RPC by calling a method with the same
        # name on the node object. Notice also how we used a keyword argument
        # to specify a named RPC argument. Neither of those are defined on the
        # node object. Instead there's some __getattr__() magic going on under
        # the covers to dispatch unrecognised attribute calls to the RPC
        # interface.

        # Logs are nice. Do plenty of them. They can be used in place of comments for
        # breaking the test into sub-sections.
        self.log.info("Starting test!")

        self.log.info("Calling a custom function")
        custom_function()

        self.log.info("Calling a custom method")
        self.custom_method()

        self.log.info("Create some blocks")
        self.tip = int(self.nodes[0].getbestblockhash(), 16)
        self.block_time = self.nodes[0].getblock(
            self.nodes[0].getbestblockhash())['time'] + 1

        self.nodes[0].generate(10)

        self.log.info(
            "Wait for node1 to reach current tip (height 11) using RPC")
        self.nodes[1].waitforblockheight(11)

        self.log.info("Connect node2 and node1")
        connect_nodes(self.nodes[1], 2)

        self.log.info("Add P2P connection to node2")
        node2 = BaseNode()
        connections.append(
            NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))
        node2.add_connection(connections[1])
        node2.wait_for_verack()

        self.log.info(
            "Wait for node2 reach current tip. Test that it has propagated all the blocks to us"
        )

        getdata_request = MsgGetdata()
        for block in blocks:
            getdata_request.inv.append(CInv(2, block))
        node2.send_message(getdata_request)
        self.sync_all([self.nodes[1:2]])

        self.log.info("Check that each block was received only once")
        # The network thread uses a global lock on data access to the NodeConn objects when sending and receiving
        # messages. The test thread should acquire the global lock before accessing any NodeConn data to avoid locking
        # and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
        with mininode_lock:
            for block in node2.block_receive_map.values():
                assert_equal(block, 1)
Exemplo n.º 36
0
    def run_test(self):
        # Setup the p2p connections and start up the network thread.
        self.test_node = TestNode()
        self.segwit_node = TestNode()
        self.old_node = TestNode()  # version 1 peer <--> segwit node

        connections = [
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node),
            NodeConn('127.0.0.1',
                     p2p_port(1),
                     self.nodes[1],
                     self.segwit_node,
                     services=NODE_NETWORK | NODE_WITNESS),
            NodeConn('127.0.0.1',
                     p2p_port(1),
                     self.nodes[1],
                     self.old_node,
                     services=NODE_NETWORK)
        ]
        self.test_node.add_connection(connections[0])
        self.segwit_node.add_connection(connections[1])
        self.old_node.add_connection(connections[2])

        NetworkThread().start()  # Start up network handling in another thread

        # Test logic begins here
        self.test_node.wait_for_verack()

        # We will need UTXOs to construct transactions in later tests.
        self.make_utxos()

        self.log.info("Running tests, pre-segwit activation:")

        self.log.info("Testing SENDCMPCT p2p message... ")
        self.test_sendcmpct(self.nodes[0], self.test_node, 1)
        sync_blocks(self.nodes)
        self.test_sendcmpct(self.nodes[1],
                            self.segwit_node,
                            2,
                            old_node=self.old_node)
        sync_blocks(self.nodes)

        self.log.info("Testing compactblock construction...")
        self.test_compactblock_construction(self.nodes[0], self.test_node, 1,
                                            False)
        sync_blocks(self.nodes)
        self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2,
                                            False)
        sync_blocks(self.nodes)

        self.log.info("Testing compactblock requests... ")
        self.test_compactblock_requests(self.nodes[0], self.test_node, 1,
                                        False)
        sync_blocks(self.nodes)
        self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2,
                                        False)
        sync_blocks(self.nodes)

        self.log.info("Testing getblocktxn requests...")
        self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
        sync_blocks(self.nodes)
        self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
        sync_blocks(self.nodes)

        self.log.info("Testing getblocktxn handler...")
        self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1)
        sync_blocks(self.nodes)
        self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
        self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
        sync_blocks(self.nodes)

        self.log.info(
            "Testing compactblock requests/announcements not at chain tip...")
        self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node)
        sync_blocks(self.nodes)
        self.test_compactblocks_not_at_tip(self.nodes[1], self.segwit_node)
        self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node)
        sync_blocks(self.nodes)

        self.log.info("Testing handling of incorrect blocktxn responses...")
        self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1)
        sync_blocks(self.nodes)
        self.test_incorrect_blocktxn_response(self.nodes[1], self.segwit_node,
                                              2)
        sync_blocks(self.nodes)

        # End-to-end block relay tests
        self.log.info("Testing end-to-end block relay...")
        self.request_cb_announcements(self.test_node, self.nodes[0], 1)
        self.request_cb_announcements(self.old_node, self.nodes[1], 1)
        self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
        self.test_end_to_end_block_relay(
            self.nodes[0], [self.segwit_node, self.test_node, self.old_node])
        self.test_end_to_end_block_relay(
            self.nodes[1], [self.segwit_node, self.test_node, self.old_node])

        self.log.info("Testing handling of invalid compact blocks...")
        self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node,
                                             False)
        self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node,
                                             False)
        self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node,
                                             False)

        self.log.info(
            "Testing reconstructing compact blocks from all peers...")
        self.test_compactblock_reconstruction_multiple_peers(
            self.nodes[1], self.segwit_node, self.old_node)
        sync_blocks(self.nodes)

        # Advance to segwit activation
        self.log.info("Advancing to segwit activation")
        self.activate_segwit(self.nodes[1])
        self.log.info("Running tests, post-segwit activation...")

        self.log.info("Testing compactblock construction...")
        self.test_compactblock_construction(self.nodes[1], self.old_node, 1,
                                            True)
        self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2,
                                            True)
        sync_blocks(self.nodes)

        self.log.info("Testing compactblock requests (unupgraded node)... ")
        self.test_compactblock_requests(self.nodes[0], self.test_node, 1, True)

        self.log.info("Testing getblocktxn requests (unupgraded node)...")
        self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)

        # Need to manually sync node0 and node1, because post-segwit activation,
        # node1 will not download blocks from node0.
        self.log.info("Syncing nodes...")
        assert (self.nodes[0].getbestblockhash() !=
                self.nodes[1].getbestblockhash())
        while self.nodes[0].getblockcount() > self.nodes[1].getblockcount():
            block_hash = self.nodes[0].getblockhash(
                self.nodes[1].getblockcount() + 1)
            self.nodes[1].submitblock(self.nodes[0].getblock(
                block_hash, False))
        assert_equal(self.nodes[0].getbestblockhash(),
                     self.nodes[1].getbestblockhash())

        self.log.info("Testing compactblock requests (segwit node)... ")
        self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2,
                                        True)

        self.log.info("Testing getblocktxn requests (segwit node)...")
        self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
        sync_blocks(self.nodes)

        self.log.info(
            "Testing getblocktxn handler (segwit node should return witnesses)..."
        )
        self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
        self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)

        # Test that if we submitblock to node1, we'll get a compact block
        # announcement to all peers.
        # (Post-segwit activation, blocks won't propagate from node0 to node1
        # automatically, so don't bother testing a block announced to node0.)
        self.log.info("Testing end-to-end block relay...")
        self.request_cb_announcements(self.test_node, self.nodes[0], 1)
        self.request_cb_announcements(self.old_node, self.nodes[1], 1)
        self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
        self.test_end_to_end_block_relay(
            self.nodes[1], [self.segwit_node, self.test_node, self.old_node])

        self.log.info("Testing handling of invalid compact blocks...")
        self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node,
                                             False)
        self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node,
                                             True)
        self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node,
                                             True)

        self.log.info("Testing invalid index in cmpctblock message...")
        self.test_invalid_cmpctblock_message()
Exemplo n.º 37
0
    def run_test(self):
        miner = self.nodes[0]
        owner = self.nodes[1]
        remote = self.nodes[2]
        mnPrivkey = "9247iC59poZmqBYt9iDh9wDam6v9S1rW5XekjLGyPnDhrDkP4AK"

        self.log.info("generating 141 blocks...")
        miner.generate(141)
        self.sync_blocks()

        # Create collateral
        self.log.info("funding masternode controller...")
        masternodeAlias = "mnode"
        mnAddress = owner.getnewaddress(masternodeAlias)
        collateralTxId = miner.sendtoaddress(mnAddress, Decimal('100'))
        miner.generate(2)
        self.sync_blocks()
        time.sleep(1)
        collateral_rawTx = owner.getrawtransaction(collateralTxId, 1)
        assert_equal(owner.getbalance(), Decimal('100'))
        assert_greater_than(collateral_rawTx["confirmations"], 0)

        # Block time can be up to median time past +1. We might need to wait...
        wait_time = collateral_rawTx["time"] - int(time.time())
        if wait_time > 0:
            self.log.info("Sleep %d seconds to catch up with the chain..." %
                          wait_time)
            time.sleep(wait_time)

        # Setup controller
        self.log.info("controller setup...")
        o = owner.getmasternodeoutputs()
        assert_equal(len(o), 1)
        assert_equal(o[0]["txhash"], collateralTxId)
        vout = o[0]["outputidx"]
        self.log.info("collateral accepted for " + masternodeAlias +
                      ". Updating masternode.conf...")
        confData = masternodeAlias + " 127.0.0.1:" + str(p2p_port(2)) + " " + \
                   str(mnPrivkey) +  " " + str(collateralTxId) + " " + str(vout)
        destPath = os.path.join(self.options.tmpdir, "node1", "regtest",
                                "masternode.conf")
        with open(destPath, "a+") as file_object:
            file_object.write("\n")
            file_object.write(confData)

        # Init remote
        self.log.info("initializing remote masternode...")
        remote.initmasternode(mnPrivkey, "127.0.0.1:" + str(p2p_port(2)))

        # sanity check, verify that we are not in IBD
        for i in range(0, len(self.nodes)):
            node = self.nodes[i]
            if (node.getblockchaininfo()['initial_block_downloading']):
                raise AssertionError("Error, node(%s) shouldn't be in IBD." %
                                     str(i))

        # Wait until mnsync is complete (max 120 seconds)
        self.log.info("waiting to complete mnsync...")
        start_time = time.time()
        self.wait_until_mnsync_finished()
        self.log.info("MnSync completed in %d seconds" %
                      (time.time() - start_time))
        miner.generate(1)
        self.sync_blocks()
        time.sleep(1)

        # Send Start message
        self.log.info("sending masternode broadcast...")
        self.controller_start_masternode(owner, masternodeAlias)
        miner.generate(1)
        self.sync_blocks()
        time.sleep(1)

        # Wait until masternode is enabled everywhere (max 180 secs)
        self.log.info("waiting till masternode gets enabled...")
        start_time = time.time()
        time.sleep(5)
        self.wait_until_mn_enabled(collateralTxId, 180)
        self.log.info("Masternode enabled in %d seconds" %
                      (time.time() - start_time))
        self.log.info("Good. Masternode enabled")
        miner.generate(1)
        self.sync_blocks()
        time.sleep(1)

        last_seen = [
            self.get_mn_lastseen(node, collateralTxId) for node in self.nodes
        ]
        self.log.info("Current lastseen: %s" % str(last_seen))
        self.log.info("Waiting 2 * 25 seconds and check new lastseen...")
        time.sleep(50)
        new_last_seen = [
            self.get_mn_lastseen(node, collateralTxId) for node in self.nodes
        ]
        self.log.info("New lastseen: %s" % str(new_last_seen))
        for i in range(self.num_nodes):
            assert_greater_than(new_last_seen[i], last_seen[i])
        self.log.info("All good.")
Exemplo n.º 38
0
    def run_test(self):
        block_count = 0

        # Create a P2P connections
        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        node1 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node1)
        node1.add_connection(connection)

        node2 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node2)
        node2.add_connection(connection)

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()
        node1.wait_for_verack()
        node2.wait_for_verack()

        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))
        block = self.chain.next_block(block_count)
        block_count += 1
        self.chain.save_spendable_output()
        node0.send_message(msg_block(block))

        for i in range(100):
            block = self.chain.next_block(block_count)
            block_count += 1
            self.chain.save_spendable_output()
            node0.send_message(msg_block(block))

        out = []
        for i in range(100):
            out.append(self.chain.get_spendable_output())

        self.log.info("waiting for block height 101 via rpc")
        self.nodes[0].waitforblockheight(101)

        tip_block_num = block_count-1

        # left branch
        block2 = self.chain.next_block(block_count, spend=out[0], extra_txns=8)
        block2_num = block_count
        block_count += 1
        node0.send_message(msg_block(block2))
        self.log.info(f"block2 hash: {block2.hash}")

        self.nodes[0].waitforblockheight(102)

        # send blocks 3,4 for parallel validation on left branch
        block3 = self.chain.next_block(block_count, spend=out[1], extra_txns=10)
        block_count += 1

        self.chain.set_tip(block2_num)

        block4 = self.chain.next_block(block_count, spend=out[1], extra_txns=8)
        block_count += 1

        # send two "hard" blocks, with waitaftervalidatingblock we artificially
        # extend validation time.
        self.log.info(f"block3 hash: {block3.hash}")
        self.nodes[0].waitaftervalidatingblock(block3.hash, "add")
        self.log.info(f"block4 hash: {block4.hash}")
        self.nodes[0].waitaftervalidatingblock(block4.hash, "add")
        # make sure block hashes are in waiting list
        wait_for_waiting_blocks({block3.hash, block4.hash}, self.nodes[0], self.log)

        node0.send_message(msg_block(block3))
        node2.send_message(msg_block(block4))

        # make sure we started validating blocks
        wait_for_validating_blocks({block3.hash, block4.hash}, self.nodes[0], self.log)

        # right branch
        self.chain.set_tip(tip_block_num)
        block5 = self.chain.next_block(block_count, spend=out[0], extra_txns=10)
        block_count += 1
        node1.send_message(msg_block(block5))
        self.log.info(f"block5 hash: {block5.hash}")

        # and two blocks to extend second branch to cause reorg
        # - they must be sent from the same node as otherwise they will be
        #   rejected with "prev block not found"
        block6 = self.chain.next_block(block_count)
        node1.send_message(msg_block(block6))
        self.log.info(f"block6 hash: {block6.hash}")
        block_count += 1

        block7 = self.chain.next_block(block_count)
        node1.send_message(msg_block(block7))
        self.log.info(f"block7 hash: {block7.hash}")
        block_count += 1

        self.nodes[0].waitforblockheight(104)
        assert_equal(block7.hash, self.nodes[0].getbestblockhash())

        self.log.info("releasing wait status on parallel blocks to finish their validation")
        self.nodes[0].waitaftervalidatingblock(block3.hash, "remove")
        self.nodes[0].waitaftervalidatingblock(block4.hash, "remove")

        # wait till validation of block or blocks finishes
        node0.sync_with_ping()

        # block that arrived last on competing chain should be active
        assert_equal(block7.hash, self.nodes[0].getbestblockhash())
Exemplo n.º 39
0
    def setup_network(self, split=False):
        if self.options.parent_bitcoin and self.options.parent_binpath == "":
            raise Exception(
                "Can't run with --parent_bitcoin without specifying --parent_binpath"
            )

        self.nodes = []
        # Setup parent nodes
        parent_chain = "elementsregtest" if not self.options.parent_bitcoin else "regtest"
        parent_binary = [self.options.parent_binpath
                         ] if self.options.parent_binpath != "" else None
        for n in range(2):
            extra_args = [
                "-port=" + str(p2p_port(n)), "-rpcport=" + str(rpc_port(n))
            ]
            if self.options.parent_bitcoin:
                # bitcoind can't read elements.conf config files
                extra_args.extend([
                    "-regtest=1",
                    "-printtoconsole=0",
                    "-server=1",
                    "-discover=0",
                    "-keypool=1",
                    "-listenonion=0",
                    "-addresstype=legacy",  # To make sure bitcoind gives back p2pkh no matter version
                    "-fallbackfee=0.0002"
                ])
            else:
                extra_args.extend([
                    "-validatepegin=0",
                    "-initialfreecoins=0",
                    "-anyonecanspendaremine=1",
                    "-signblockscript=51",  # OP_TRUE
                ])

            self.add_nodes(1, [extra_args],
                           chain=[parent_chain],
                           binary=parent_binary,
                           chain_in_args=[not self.options.parent_bitcoin])
            self.start_node(n)
            print("Node {} started".format(n))
        # set hard-coded mining keys for non-Elements chains
        if self.options.parent_bitcoin:
            self.nodes[0].set_deterministic_priv_key(
                '2Mysp7FKKe52eoC2JmU46irt1dt58TpCvhQ',
                'cTNbtVJmhx75RXomhYWSZAafuNNNKPd1cr2ZiUcAeukLNGrHWjvJ')
            self.nodes[1].set_deterministic_priv_key(
                '2N19ZHF3nEzBXzkaZ3N5sVBJXQ8jZ7Udpg5',
                'cRnDSw1JsjmYYEN6xxQvf5pqMENsRE584z6MdWfJ7v85c4ciitkk')

        connect_nodes_bi(self.nodes, 0, 1)
        self.parentgenesisblockhash = self.nodes[0].getblockhash(0)
        if not self.options.parent_bitcoin:
            parent_pegged_asset = self.nodes[0].getsidechaininfo(
            )['pegged_asset']

        # Setup sidechain nodes
        self.fedpeg_script = "512103dff4923d778550cc13ce0d887d737553b4b58f4e8e886507fc39f5e447b2186451ae"
        for n in range(2):
            extra_args = [
                "-printtoconsole=0",
                "-port=" + str(p2p_port(2 + n)),
                "-rpcport=" + str(rpc_port(2 + n)),
                '-validatepegin=1',
                '-fedpegscript=%s' % self.fedpeg_script,
                '-minrelaytxfee=0',
                '-blockmintxfee=0',
                '-initialfreecoins=0',
                '-peginconfirmationdepth=10',
                '-mainchainrpchost=127.0.0.1',
                '-mainchainrpcport=%s' % rpc_port(n),
                '-recheckpeginblockinterval=15',  # Long enough to allow failure and repair before timeout
                '-parentgenesisblockhash=%s' % self.parentgenesisblockhash,
                '-parentpubkeyprefix=111',
                '-parentscriptprefix=196',
                '-parent_bech32_hrp=bcrt',
                # Turn of consistency checks that can cause assert when parent node stops
                # and a peg-in transaction fails this belt-and-suspenders check.
                '-checkmempool=0',
            ]
            if not self.options.parent_bitcoin:
                extra_args.extend([
                    '-parentpubkeyprefix=235',
                    '-parentscriptprefix=75',
                    '-parent_bech32_hrp=ert',
                    '-con_parent_chain_signblockscript=51',
                    '-con_parent_pegged_asset=%s' % parent_pegged_asset,
                ])

            # Immediate activation of dynafed when requested versus "never" from conf
            if self.options.pre_transition or self.options.post_transition:
                extra_args.extend(["-con_dyna_deploy_start=-1"])

            # Use rpcuser auth only for first parent.
            if n == 0:
                # Extract username and password from cookie file and use directly.
                datadir = get_datadir_path(self.options.tmpdir, n)
                rpc_u, rpc_p = get_auth_cookie(datadir, parent_chain)
                extra_args.extend([
                    '-mainchainrpcuser=%s' % rpc_u,
                    '-mainchainrpcpassword=%s' % rpc_p,
                ])
            else:
                # Need to specify where to find parent cookie file
                datadir = get_datadir_path(self.options.tmpdir, n)
                extra_args.append('-mainchainrpccookiefile=' + datadir + "/" +
                                  parent_chain + "/.cookie")

            self.add_nodes(1, [extra_args], chain=["elementsregtest"])
            self.start_node(2 + n)
            print("Node {} started".format(2 + n))

        # We only connect the same-chain nodes, so sync_all works correctly
        connect_nodes_bi(self.nodes, 2, 3)
        self.node_groups = [[self.nodes[0], self.nodes[1]],
                            [self.nodes[2], self.nodes[3]]]
        self.sync_all(self.node_groups)
        print("Setting up network done")
Exemplo n.º 40
0
    def run_test(self):
        self.check_tx_relay()

        self.checkpermission(
            # default permissions (no specific permissions)
            ["-whitelist=127.0.0.1"],
            # Make sure the default values in the command line documentation match the ones here
            ["relay", "noban", "mempool", "download"])

        self.checkpermission(
            # no permission (even with forcerelay)
            ["[email protected]", "-whitelistforcerelay=1"],
            [])

        self.checkpermission(
            # relay permission removed (no specific permissions)
            ["-whitelist=127.0.0.1", "-whitelistrelay=0"],
            ["noban", "mempool", "download"])

        self.checkpermission(
            # forcerelay and relay permission added
            # Legacy parameter interaction which set whitelistrelay to true
            # if whitelistforcerelay is true
            ["-whitelist=127.0.0.1", "-whitelistforcerelay"],
            ["forcerelay", "relay", "noban", "mempool", "download"])

        # Let's make sure permissions are merged correctly
        # For this, we need to use whitebind instead of bind
        # by modifying the configuration file.
        ip_port = "127.0.0.1:{}".format(p2p_port(1))
        self.replaceinconfig(1, "bind=127.0.0.1",
                             "whitebind=bloomfilter,forcerelay@" + ip_port)
        self.checkpermission(
            ["[email protected]"],
            # Check parameter interaction forcerelay should activate relay
            ["noban", "bloomfilter", "forcerelay", "relay", "download"])
        self.replaceinconfig(1, "whitebind=bloomfilter,forcerelay@" + ip_port,
                             "bind=127.0.0.1")

        self.checkpermission(
            # legacy whitelistrelay should be ignored
            ["-whitelist=noban,[email protected]", "-whitelistrelay"],
            ["noban", "mempool", "download"])

        self.checkpermission(
            # legacy whitelistforcerelay should be ignored
            ["-whitelist=noban,[email protected]", "-whitelistforcerelay"],
            ["noban", "mempool", "download"])

        self.checkpermission(
            # missing mempool permission to be considered legacy whitelisted
            ["[email protected]"],
            ["noban", "download"])

        self.checkpermission(
            # all permission added
            ["[email protected]"],
            [
                "forcerelay", "noban", "mempool", "bloomfilter", "relay",
                "download", "addr"
            ])

        self.stop_node(1)
        self.nodes[1].assert_start_raises_init_error(
            ["[email protected]"],
            "Invalid P2P permission",
            match=ErrorMatch.PARTIAL_REGEX)
        self.nodes[1].assert_start_raises_init_error(
            ["[email protected]:230"],
            "Invalid netmask specified in",
            match=ErrorMatch.PARTIAL_REGEX)
        self.nodes[1].assert_start_raises_init_error(
            ["[email protected]/10"],
            "Cannot resolve -whitebind address",
            match=ErrorMatch.PARTIAL_REGEX)
Exemplo n.º 41
0
    def setup_network(self, split=False):
        if self.options.parent_bitcoin and self.options.parent_binpath == "":
            raise Exception("Can't run with --parent_bitcoin without specifying --parent_binpath")

        self.nodes = []
        # Setup parent nodes
        parent_chain = "parent" if not self.options.parent_bitcoin else "regtest"
        parent_binary = [self.options.parent_binpath] if self.options.parent_binpath != "" else None
        for n in range(2):
            extra_args = [
                "-port="+str(p2p_port(n)),
                "-rpcport="+str(rpc_port(n))
            ]
            if self.options.parent_bitcoin:
                # bitcoind can't read elements.conf config files
                extra_args.extend([
                    "-regtest=1",
                    "-printtoconsole=0",
                    "-server=1",
                    "-discover=0",
                    "-keypool=1",
                    "-listenonion=0",
                    "-addresstype=legacy", # To make sure bitcoind gives back p2pkh no matter version
                ])
            else:
                extra_args.extend([
                    "-validatepegin=0",
                    "-initialfreecoins=0",
                    "-anyonecanspendaremine",
                    "-signblockscript=51", # OP_TRUE
                    '-con_blocksubsidy=5000000000',
                    "-pubkeyprefix=111",
                    "-scriptprefix=196",
                ])

            self.add_nodes(1, [extra_args], chain=[parent_chain], binary=parent_binary, chain_in_args=[not self.options.parent_bitcoin])
            self.start_node(n)
            print("Node {} started".format(n))

        connect_nodes_bi(self.nodes, 0, 1)
        self.parentgenesisblockhash = self.nodes[0].getblockhash(0)
        if not self.options.parent_bitcoin:
            parent_pegged_asset = self.nodes[0].getsidechaininfo()['pegged_asset']

        # Setup sidechain nodes
        self.fedpeg_script = "512103dff4923d778550cc13ce0d887d737553b4b58f4e8e886507fc39f5e447b2186451ae"
        for n in range(2):
            extra_args = [
                "-printtoconsole=0",
                "-port="+str(p2p_port(2+n)),
                "-rpcport="+str(rpc_port(2+n)),
                '-parentgenesisblockhash=%s' % self.parentgenesisblockhash,
                '-validatepegin=1',
                '-fedpegscript=%s' % self.fedpeg_script,
                '-anyonecanspendaremine=0',
                '-minrelaytxfee=0',
                '-blockmintxfee=0',
                '-initialfreecoins=0',
                '-peginconfirmationdepth=10',
                '-mainchainrpchost=127.0.0.1',
                '-mainchainrpcport=%s' % rpc_port(n),
                '-recheckpeginblockinterval=15', # Long enough to allow failure and repair before timeout
                '-parentpubkeyprefix=111',
                '-parentscriptprefix=196',
                # Turn of consistency checks that can cause assert when parent node stops
                # and a peg-in transaction fails this belt-and-suspenders check.
                '-checkmempool=0',
            ]
            if not self.options.parent_bitcoin:
                extra_args.extend([
                    '-parentpubkeyprefix=111',
                    '-parentscriptprefix=196',
                    "-parent_bech32_hrp=ert",
                    '-con_parent_chain_signblockscript=51',
                    '-con_parent_pegged_asset=%s' % parent_pegged_asset,
                ])

            # Use rpcuser auth only for first parent.
            if n==0:
                # Extract username and password from cookie file and use directly.
                datadir = get_datadir_path(self.options.tmpdir, n)
                rpc_u, rpc_p = get_auth_cookie(datadir, parent_chain)
                extra_args.extend([
                    '-mainchainrpcuser=%s' % rpc_u,
                    '-mainchainrpcpassword=%s' % rpc_p,
                ])
            else:
                # Need to specify where to find parent cookie file
                datadir = get_datadir_path(self.options.tmpdir, n)
                extra_args.append('-mainchainrpccookiefile='+datadir+"/" + parent_chain + "/.cookie")

            self.add_nodes(1, [extra_args], chain=["sidechain"])
            self.start_node(2+n)
            print("Node {} started".format(2+n))

        # We only connect the same-chain nodes, so sync_all works correctly
        connect_nodes_bi(self.nodes, 2, 3)
        self.node_groups = [[self.nodes[0], self.nodes[1]], [self.nodes[2], self.nodes[3]]]
        self.sync_all(self.node_groups)
        print("Setting up network done")
Exemplo n.º 42
0
    def run_test(self):
        node0 = NodeConnCB()

        connections = []
        connections.append(
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
        node0.add_connection(connections[0])

        NetworkThread().start()
        node0.wait_for_verack()

        # Set node time to 60 days ago
        self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 6)

        # Generating a chain of 10 blocks
        block_hashes = self.nodes[0].generate(nblocks=10)

        # Create longer chain starting 2 blocks before current tip
        height = len(block_hashes) - 2
        block_hash = block_hashes[height - 1]
        block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1
        new_blocks = self.build_chain(5, block_hash, height, block_time)

        # Force reorg to a longer chain
        node0.send_message(msg_headers(new_blocks))
        node0.wait_for_getdata()
        for block in new_blocks:
            node0.send_and_ping(msg_block(block))

        # Check that reorg succeeded
        assert_equal(self.nodes[0].getblockcount(), 13)

        stale_hash = int(block_hashes[-1], 16)

        # Check that getdata request for stale block succeeds
        self.send_block_request(stale_hash, node0)
        test_function = lambda: self.last_block_equals(stale_hash, node0)
        wait_until(test_function, timeout=3)

        # Check that getheader request for stale block header succeeds
        self.send_header_request(stale_hash, node0)
        test_function = lambda: self.last_header_equals(stale_hash, node0)
        wait_until(test_function, timeout=3)

        # Longest chain is extended so stale is much older than chain tip
        self.nodes[0].setmocktime(0)
        tip = self.nodes[0].generate(nblocks=1)[0]
        assert_equal(self.nodes[0].getblockcount(), 14)

        # Send getdata & getheaders to refresh last received getheader message
        block_hash = int(tip, 16)
        self.send_block_request(block_hash, node0)
        self.send_header_request(block_hash, node0)
        node0.sync_with_ping()

        # Request for very old stale block should now fail
        self.send_block_request(stale_hash, node0)
        time.sleep(3)
        assert not self.last_block_equals(stale_hash, node0)

        # Request for very old stale block header should now fail
        self.send_header_request(stale_hash, node0)
        time.sleep(3)
        assert not self.last_header_equals(stale_hash, node0)

        # Verify we can fetch very old blocks and headers on the active chain
        block_hash = int(block_hashes[2], 16)
        self.send_block_request(block_hash, node0)
        self.send_header_request(block_hash, node0)
        node0.sync_with_ping()

        self.send_block_request(block_hash, node0)
        test_function = lambda: self.last_block_equals(block_hash, node0)
        wait_until(test_function, timeout=3)

        self.send_header_request(block_hash, node0)
        test_function = lambda: self.last_header_equals(block_hash, node0)
        wait_until(test_function, timeout=3)
Exemplo n.º 43
0
    def run_test(self):

        # Connect to node0
        node0 = BaseNode()
        connections = []
        connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
        node0.add_connection(connections[0])

        NetworkThread().start()  # Start up network handling in another thread
        node0.wait_for_verack()

        # Build the blockchain
        self.tip = int(self.nodes[0].getbestblockhash(), 16)
        self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1

        self.blocks = []

        # Get a pubkey for the coinbase TXO
        coinbase_key = CECKey()
        coinbase_key.set_secretbytes(b"horsebattery")
        coinbase_pubkey = coinbase_key.get_pubkey()

        # Create the first block with a coinbase output to our key
        height = 1
        block = create_block(self.tip, create_coinbase(height, coinbase_pubkey), self.block_time)
        self.blocks.append(block)
        self.block_time += 1
        block.solve()
        # Save the coinbase for later
        self.block1 = block
        self.tip = block.sha256
        height += 1

        # Bury the block 100 deep so the coinbase output is spendable
        for i in range(100):
            block = create_block(self.tip, create_coinbase(height), self.block_time)
            block.solve()
            self.blocks.append(block)
            self.tip = block.sha256
            self.block_time += 1
            height += 1

        # Create a transaction spending the coinbase output with an invalid (null) signature
        tx = CTransaction()
        tx.vin.append(CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b""))
        tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE])))
        tx.calc_sha256()

        block102 = create_block(self.tip, create_coinbase(height), self.block_time)
        self.block_time += 1
        block102.vtx.extend([tx])
        block102.hashMerkleRoot = block102.calc_merkle_root()
        block102.rehash()
        block102.solve()
        self.blocks.append(block102)
        self.tip = block102.sha256
        self.block_time += 1
        height += 1

        # Bury the assumed valid block 2100 deep
        for i in range(2100):
            block = create_block(self.tip, create_coinbase(height), self.block_time)
            block.nVersion = 4
            block.solve()
            self.blocks.append(block)
            self.tip = block.sha256
            self.block_time += 1
            height += 1

        # Start node1 and node2 with assumevalid so they accept a block with a bad signature.
        self.nodes.append(self.start_node(1, self.options.tmpdir,
                                     ["-assumevalid=" + hex(block102.sha256)]))
        node1 = BaseNode()  # connects to node1
        connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], node1))
        node1.add_connection(connections[1])
        node1.wait_for_verack()

        self.nodes.append(self.start_node(2, self.options.tmpdir,
                                     ["-assumevalid=" + hex(block102.sha256)]))
        node2 = BaseNode()  # connects to node2
        connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))
        node2.add_connection(connections[2])
        node2.wait_for_verack()

        # send header lists to all three nodes
        node0.send_header_for_blocks(self.blocks[0:2000])
        node0.send_header_for_blocks(self.blocks[2000:])
        node1.send_header_for_blocks(self.blocks[0:2000])
        node1.send_header_for_blocks(self.blocks[2000:])
        node2.send_header_for_blocks(self.blocks[0:200])

        # Send blocks to node0. Block 102 will be rejected.
        self.send_blocks_until_disconnected(node0)
        self.assert_blockchain_height(self.nodes[0], 101)

        # Send all blocks to node1. All blocks will be accepted.
        for i in range(2202):
            node1.send_message(msg_block(self.blocks[i]))
        # Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync.
        node1.sync_with_ping(120)
        assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202)

        # Send blocks to node2. Block 102 will be rejected.
        self.send_blocks_until_disconnected(node2)
        self.assert_blockchain_height(self.nodes[2], 101)