Exemplo n.º 1
0
    def verify_inv(self, testnode, txs):
        # Make sure we are synced before sending the mempool message
        testnode.sync_with_ping()

        # Send p2p message "mempool" to receive contents from zcashd node in "inv" message
        with mininode_lock:
            testnode.last_inv = None
            testnode.send_message(msg_mempool())

        # Sync up with node after p2p messages delivered
        testnode.sync_with_ping(waiting_for=lambda x: x.last_inv)

        with mininode_lock:
            msg = testnode.last_inv
            assert_equal(len(msg.inv), len(txs))

            expected_invs = sorted(txs,
                                   key=lambda inv:
                                   (inv.type, inv.hash, inv.hash_aux))
            actual_invs = sorted(msg.inv,
                                 key=lambda inv:
                                 (inv.type, inv.hash, inv.hash_aux))

            for (expected, actual) in zip(expected_invs, actual_invs):
                assert_equal(expected, actual)
Exemplo n.º 2
0
 def runTestWithParams(description, args, expectedReject):
     with self.run_node_with_connections(description, 0, args,
                                         self.num_peers) as connections:
         # request mempool
         connections[0].cb.send_message(msg_mempool())
         if not expectedReject:
             time.sleep(1)
             # mininode must not be disconnected at this point
             assert_equal(len(self.nodes[0].getpeerinfo()), 1)
         else:
             connections[0].cb.wait_for_disconnect()
             # mininode must be disconnected at this point
             assert_equal(len(self.nodes[0].getpeerinfo()), 0)
Exemplo n.º 3
0
    def run_test(self):
        #connect a mininode
        aTestNode = NodeConnCB()
        node = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], aTestNode)
        aTestNode.add_connection(node)
        NetworkThread().start()
        aTestNode.wait_for_verack()

        #request mempool
        aTestNode.send_message(msg_mempool())
        aTestNode.wait_for_disconnect()

        #mininode must be disconnected at this point
        assert_equal(len(self.nodes[0].getpeerinfo()), 0)
Exemplo n.º 4
0
    def verify_inv(self, testnode, tx):
        # Make sure we are synced before sending the mempool message
        testnode.sync_with_ping()

        # Send p2p message "mempool" to receive contents from zelcashd node in "inv" message
        with mininode_lock:
            testnode.last_inv = None
            testnode.send_message(msg_mempool())

        # Sync up with node after p2p messages delivered
        testnode.sync_with_ping()

        with mininode_lock:
            msg = testnode.last_inv
            assert_equal(len(msg.inv), 1)
            assert_equal(tx.sha256, msg.inv[0].hash)
Exemplo n.º 5
0
    def verify_inv(self, testnode, tx):
        # Make sure we are synced before sending the mempool message
        testnode.sync_with_ping()

        # Send p2p message "mempool" to receive contents from zcashd node in "inv" message
        with mininode_lock:
            testnode.last_inv = None
            testnode.send_message(msg_mempool())

        # Sync up with node after p2p messages delivered
        testnode.sync_with_ping()

        with mininode_lock:
            msg = testnode.last_inv
            assert_equal(len(msg.inv), 1)
            assert_equal(tx.sha256, msg.inv[0].hash)
Exemplo n.º 6
0
 def send_get_mempool(self):
     self.send_message(msg_mempool())
    def run_test(self):
        testnode0 = TestNode()
        connections = []
        connections.append(
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], testnode0,
                     "regtest", OVERWINTER_PROTO_VERSION))
        testnode0.add_connection(connections[0])

        # Start up network handling in another thread
        NetworkThread().start()
        testnode0.wait_for_verack()

        # Verify mininodes are connected to litecoinzd nodes
        peerinfo = self.nodes[0].getpeerinfo()
        versions = [x["version"] for x in peerinfo]
        assert_equal(1, versions.count(OVERWINTER_PROTO_VERSION))
        assert_equal(0, peerinfo[0]["banscore"])

        # Mine some blocks so we can spend
        self.coinbase_blocks = self.nodes[0].generate(200)
        self.nodeaddress = self.nodes[0].getnewaddress()

        # Sync nodes 0 and 1
        sync_blocks(self.nodes[:2])
        sync_mempools(self.nodes[:2])

        # Verify block count
        assert_equal(self.nodes[0].getblockcount(), 200)
        assert_equal(self.nodes[1].getblockcount(), 200)
        assert_equal(self.nodes[2].getblockcount(), 0)

        # Mininodes send expiring soon transaction in "tx" message to litecoinzd node
        tx1 = self.create_transaction(self.nodes[0], self.coinbase_blocks[0],
                                      self.nodeaddress, 10.0, 203)
        testnode0.send_message(msg_tx(tx1))

        # Mininodes send transaction in "tx" message to litecoinzd node
        tx2 = self.create_transaction(self.nodes[0], self.coinbase_blocks[1],
                                      self.nodeaddress, 10.0, 204)
        testnode0.send_message(msg_tx(tx2))

        # Sync up with node after p2p messages delivered
        testnode0.sync_with_ping()

        # Sync nodes 0 and 1
        sync_blocks(self.nodes[:2])
        sync_mempools(self.nodes[:2])

        # Verify contents of mempool
        assert (tx1.hash not in self.nodes[0].getrawmempool()
                )  # tx1 rejected as expiring soon
        assert (tx1.hash not in self.nodes[1].getrawmempool())
        assert (tx2.hash in self.nodes[0].getrawmempool())  # tx2 accepted
        assert (tx2.hash in self.nodes[1].getrawmempool())
        assert_equal(len(self.nodes[2].getrawmempool()),
                     0)  # node 2 is isolated and empty

        # Send p2p message "mempool" to receive contents from litecoinzd node in "inv" message
        with mininode_lock:
            testnode0.last_inv = None
            testnode0.send_message(msg_mempool())

        # Sync up with node after p2p messages delivered
        testnode0.sync_with_ping()

        # Verify txid for tx2
        with mininode_lock:
            msg = testnode0.last_inv
            assert_equal(len(msg.inv), 1)
            assert_equal(tx2.sha256, msg.inv[0].hash)

        # Send p2p message "getdata" to verify tx2 gets sent in "tx" message
        getdatamsg = msg_getdata()
        getdatamsg.inv = [CInv(1, tx2.sha256)]
        with mininode_lock:
            testnode0.last_tx = None
            testnode0.send_message(getdatamsg)

        # Sync up with node after p2p messages delivered
        testnode0.sync_with_ping()

        # Verify data received in "tx" message is for tx2
        with mininode_lock:
            incoming_tx = testnode0.last_tx.tx
            incoming_tx.rehash()
            assert_equal(tx2.sha256, incoming_tx.sha256)

        # Sync and mine an empty block with node 2, leaving tx in the mempool of node0 and node1
        for blkhash in self.coinbase_blocks:
            blk = self.nodes[0].getblock(blkhash, 0)
            self.nodes[2].submitblock(blk)
        self.nodes[2].generate(1)

        # Verify block count
        assert_equal(self.nodes[0].getblockcount(), 200)
        assert_equal(self.nodes[1].getblockcount(), 200)
        assert_equal(self.nodes[2].getblockcount(), 201)

        # Reconnect node 2 to the network
        connect_nodes_bi(self.nodes, 1, 2)

        # Set up test node for node 2
        testnode2 = TestNode()
        connections.append(
            NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], testnode2,
                     "regtest", OVERWINTER_PROTO_VERSION))
        testnode2.add_connection(connections[-1])

        # Verify block count
        sync_blocks(self.nodes[:3])
        assert_equal(self.nodes[0].getblockcount(), 201)
        assert_equal(self.nodes[1].getblockcount(), 201)
        assert_equal(self.nodes[2].getblockcount(), 201)

        # Verify contents of mempool
        assert (tx2.hash in self.nodes[0].getrawmempool())
        assert (tx2.hash in self.nodes[1].getrawmempool())
        assert (tx2.hash not in self.nodes[2].getrawmempool())

        # Confirm tx2 cannot be submitted to a mempool because it is expiring soon.
        try:
            rawtx2 = hexlify(tx2.serialize())
            self.nodes[2].sendrawtransaction(rawtx2)
            assert (False)
        except JSONRPCException as e:
            errorString = e.error['message']
            assert ("tx-expiring-soon" in errorString)

        # Ask node 0 for tx2...
        with mininode_lock:
            testnode0.last_notfound = None
            testnode0.last_tx = None
            testnode0.send_message(getdatamsg)

        # Sync up with node after p2p messages delivered
        [x.sync_with_ping() for x in [testnode0, testnode2]]

        # Verify node 0 does not reply to "getdata" by sending "tx" message, as tx2 is expiring soon
        with mininode_lock:
            assert_equal(testnode0.last_tx, None)

        # Verify mininode received a "notfound" message containing the txid of tx2
        with mininode_lock:
            msg = testnode0.last_notfound
            assert_equal(len(msg.inv), 1)
            assert_equal(tx2.sha256, msg.inv[0].hash)

        # Create a transaction to verify that processing of "getdata" messages is functioning
        tx3 = self.create_transaction(self.nodes[0], self.coinbase_blocks[2],
                                      self.nodeaddress, 10.0, 999)

        # Mininodes send tx3 to litecoinzd node
        testnode0.send_message(msg_tx(tx3))
        getdatamsg = msg_getdata()
        getdatamsg.inv = [CInv(1, tx3.sha256)]
        with mininode_lock:
            testnode0.last_tx = None
            testnode0.send_message(getdatamsg)

        # Sync up with node after p2p messages delivered
        [x.sync_with_ping() for x in [testnode0, testnode2]]

        # Verify we received a "tx" message for tx3
        with mininode_lock:
            incoming_tx = testnode0.last_tx.tx
            incoming_tx.rehash()
            assert_equal(tx3.sha256, incoming_tx.sha256)

        # Send p2p message "mempool" to receive contents from litecoinzd node in "inv" message
        with mininode_lock:
            testnode0.last_inv = None
            testnode0.send_message(msg_mempool())

        # Sync up with node after p2p messages delivered
        [x.sync_with_ping() for x in [testnode0, testnode2]]

        # Verify txid for tx3 is returned in "inv", but tx2 which is expiring soon is not returned
        with mininode_lock:
            msg = testnode0.last_inv
            assert_equal(len(msg.inv), 1)
            assert_equal(tx3.sha256, msg.inv[0].hash)

        # Verify contents of mempool
        assert_equal({tx2.hash, tx3.hash}, set(self.nodes[0].getrawmempool()))
        assert_equal({tx2.hash, tx3.hash}, set(self.nodes[1].getrawmempool()))
        assert_equal({tx3.hash}, set(self.nodes[2].getrawmempool()))

        # Verify banscore for nodes are still zero
        assert_equal(
            0, sum(peer["banscore"] for peer in self.nodes[0].getpeerinfo()))
        assert_equal(
            0, sum(peer["banscore"] for peer in self.nodes[2].getpeerinfo()))

        [c.disconnect_node() for c in connections]
Exemplo n.º 8
0
    def run_test(self):
        inv_items = []
        block_priority_block_msg_pos = []
        default_block_msg_pos = []
        last_msg_pos = self.num_txns + 1

        # Initial node setup
        extra_args = [
            '-maxnonstdtxvalidationduration=100000',
            '-maxtxnvalidatorasynctasksrunduration=100001'
        ]
        with self.run_node_with_connections("Setup node", 0, extra_args,
                                            1) as connections:
            conn = connections[0]

            # Create and send some transactions to the node
            node = self.nodes[0]
            node.generate(100)
            funding_tx = make_funding_transaction(node)
            tx_generator = transaction_generator(funding_tx)
            for tx in islice(tx_generator, self.num_txns):
                inv_items.append(CInv(1, tx.sha256))
                conn.send_message(msg_tx(tx))
            wait_until(lambda: node.getmempoolinfo()['size'] == self.num_txns,
                       timeout=240)

        # Restart node with associations
        associations_stream_policies = [
            BlockPriorityStreamPolicy(),
            DefaultStreamPolicy(),
            BlockPriorityStreamPolicy(),
            DefaultStreamPolicy()
        ]
        extra_args = [
            '-whitelist=127.0.0.1', '-maxnonstdtxvalidationduration=100000',
            '-maxtxnvalidatorasynctasksrunduration=100001'
        ]
        with self.run_node_with_associations(
                "Test block priority",
                0,
                extra_args,
                associations_stream_policies,
                cb_class=MyAssociationCB) as associations:
            # Wait for node to fully reinitialise itself
            node = self.nodes[0]
            wait_until(lambda: node.getmempoolinfo()['size'] == self.num_txns,
                       timeout=180)

            # Send MEMPOOL request so node will accept our GETDATA for transactions in the mempool
            for association in associations:
                association.send_message(msg_mempool())
                # This request will result in us requesting all the txns. Wait until that finishes and
                # then reset our message counts in preperation for the real test to come.
                wait_until(
                    lambda: association.callbacks.msg_count == self.num_txns)
                association.callbacks.reset_msg_counts()

            # Send GETDATA to request txns and a block, with the block as the last item in the list
            blockhash = int(node.getbestblockhash(), 16)
            inv_items.append(CInv(2, blockhash))
            for association in associations:
                association.send_message(msg_getdata(inv_items))

            # Wait for all GETDATA requests to have a response
            for association in associations:
                wait_until(lambda: association.callbacks.block_count == 1)

                # Remember at what position we got the block msg for the different policies
                if type(association.stream_policy
                        ) is BlockPriorityStreamPolicy:
                    block_priority_block_msg_pos.append(
                        association.callbacks.block_msg_position)
                    logger.info(
                        "BlockPriority policy block received at position {}".
                        format(association.callbacks.block_msg_position))
                elif type(association.stream_policy) is DefaultStreamPolicy:
                    default_block_msg_pos.append(
                        association.callbacks.block_msg_position)
                    logger.info(
                        "Default policy block received at position {}".format(
                            association.callbacks.block_msg_position))

            # For the DEFAULT policy, the block will have been received last (because it was requested last)
            for pos in default_block_msg_pos:
                assert_equal(pos, last_msg_pos)
            # For the BLOCKPRIORITY policy, the block should have been received sooner (this is possibly
            # slightly racy, but it's been very safe on all systems I've tried it on)
            avg_pos = sum(block_priority_block_msg_pos) / len(
                block_priority_block_msg_pos)
            assert_greater_than(last_msg_pos, avg_pos)

            # Generate a new block to trigger a block INV and wait for the INV
            node.generate(1)
            for association in associations:
                wait_until(lambda: association.callbacks.block_inv_stream_type
                           != StreamType.UNKNOWN)

                # Verify that BlockPriority associations got block INV over the high priority stream
                if type(association.stream_policy
                        ) is BlockPriorityStreamPolicy:
                    assert_equal(association.callbacks.block_inv_stream_type,
                                 StreamType.DATA1)