示例#1
0
def test_announce_address(node_factory, bitcoind):
    """Make sure our announcements are well formed."""

    # We do not allow announcement of duplicates.
    opts = {'announce-addr':
            ['4acth47i6kxnvkewtm6q7ib2s3ufpo5sqbsnzjpbi7utijcltosqemad.onion',
             'silkroad6ownowfk.onion',
             '1.2.3.4:1234',
             '::'],
            'log-level': 'io',
            'dev-allow-localhost': None}
    l1, l2 = node_factory.get_nodes(2, opts=[opts, {}])

    # It should warn about the collision between --addr=127.0.0.1:<ephem>
    # and --announce-addr=1.2.3.4:1234 (may happen before get_nodes returns).
    wait_for(lambda: l1.daemon.is_in_log('Cannot announce address 127.0.0.1:[0-9]*, already announcing 1.2.3.4:1234'))
    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
    scid = l1.fund_channel(l2, 10**6)
    bitcoind.generate_block(5)

    # Activate IO logging for l1.
    subprocess.run(['kill', '-USR1', l1.subd_pid('channeld')])

    l1.wait_channel_active(scid)
    l2.wait_channel_active(scid)

    # We should see it send node announce (257 = 0x0101)
    l1.daemon.wait_for_log(r"\[OUT\] 0101.*004d010102030404d202000000000000000000000000000000002607039216a8b803f3acd758aa260704e00533f3e8f2aedaa8969b3d0fa03a96e857bbb28064dca5e147e934244b9ba50230032607'")
示例#2
0
def test_routing_gossip(node_factory, bitcoind):
    nodes = node_factory.get_nodes(5)

    for i in range(len(nodes) - 1):
        src, dst = nodes[i], nodes[i + 1]
        src.rpc.connect(dst.info['id'], 'localhost', dst.port)
        src.openchannel(dst, 20000)

    # Allow announce messages.
    bitcoind.generate_block(5)

    # Deep check that all channels are in there
    comb = []
    for i in range(len(nodes) - 1):
        comb.append((nodes[i].info['id'], nodes[i + 1].info['id']))
        comb.append((nodes[i + 1].info['id'], nodes[i].info['id']))

    def check_gossip(n):
        seen = []
        channels = n.rpc.listchannels()['channels']
        for c in channels:
            seen.append((c['source'], c['destination']))
        missing = set(comb) - set(seen)
        logging.debug("Node {id} is missing channels {chans}".format(
            id=n.info['id'],
            chans=missing)
        )
        return len(missing) == 0

    for n in nodes:
        wait_for(lambda: check_gossip(n))
示例#3
0
def test_invoice_routeboost_private(node_factory, bitcoind):
    """Test routeboost 'r' hint in bolt11 invoice for private channels
    """
    l1, l2 = node_factory.line_graph(2, fundamount=10**6, announce_channels=False)

    # Attach public channel to l1 so it doesn't look like a dead-end.
    l0 = node_factory.get_node()
    l0.rpc.connect(l1.info['id'], 'localhost', l1.port)
    scid = l0.fund_channel(l1, 2 * (10**4))
    bitcoind.generate_block(5)

    # Make sure channel is totally public.
    wait_for(lambda: [c['public'] for c in l2.rpc.listchannels(scid)['channels']] == [True, True])

    # Since there's only one route, it will reluctantly hint that even
    # though it's private
    inv = l2.rpc.invoice(msatoshi=123456, label="inv0", description="?")
    assert 'warning_capacity' not in inv
    assert 'warning_offline' not in inv
    # Route array has single route with single element.
    r = only_one(only_one(l1.rpc.decodepay(inv['bolt11'])['routes']))
    assert r['pubkey'] == l1.info['id']
    assert r['short_channel_id'] == l1.rpc.listchannels()['channels'][0]['short_channel_id']
    assert r['fee_base_msat'] == 1
    assert r['fee_proportional_millionths'] == 10
    assert r['cltv_expiry_delta'] == 6

    # If we explicitly say not to, it won't expose.
    inv = l2.rpc.invoice(msatoshi=123456, label="inv1", description="?", exposeprivatechannels=False)
    assert 'warning_capacity' in inv
    assert 'routes' not in l1.rpc.decodepay(inv['bolt11'])

    # The existence of a public channel, even without capacity, will suppress
    # the exposure of private channels.
    l3 = node_factory.get_node()
    l3.rpc.connect(l2.info['id'], 'localhost', l2.port)
    scid = l3.fund_channel(l2, (10**4))
    bitcoind.generate_block(5)

    # Make sure channel is totally public.
    wait_for(lambda: [c['public'] for c in l3.rpc.listchannels(scid)['channels']] == [True, True])

    inv = l2.rpc.invoice(msatoshi=10**7, label="inv2", description="?")
    assert 'warning_capacity' in inv

    # Unless we tell it to include it.
    inv = l2.rpc.invoice(msatoshi=10**7, label="inv3", description="?", exposeprivatechannels=True)
    assert 'warning_capacity' not in inv
    assert 'warning_offline' not in inv
    # Route array has single route with single element.
    r = only_one(only_one(l1.rpc.decodepay(inv['bolt11'])['routes']))
    assert r['pubkey'] == l1.info['id']
    assert r['short_channel_id'] == l1.rpc.listchannels()['channels'][0]['short_channel_id']
    assert r['fee_base_msat'] == 1
    assert r['fee_proportional_millionths'] == 10
    assert r['cltv_expiry_delta'] == 6
示例#4
0
def test_gossip_notices_close(node_factory, bitcoind):
    # We want IO logging so we can replay a channel_announce to l1.
    l1 = node_factory.get_node(options={'log-level': 'io'})
    l2, l3 = node_factory.line_graph(2)
    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
    # FIXME: sending SIGUSR1 immediately may kill it before handler installed.
    l1.daemon.wait_for_log('Handed peer, entering loop')
    subprocess.run(['kill', '-USR1', l1.subd_pid('openingd')])

    bitcoind.generate_block(5)

    # Make sure l1 learns about channel.
    wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 2)
    wait_for(lambda: len(l1.rpc.listnodes()['nodes']) == 2)
    l1.rpc.disconnect(l2.info['id'])

    # Grab channel_announcement from io logs (ends in ')
    channel_announcement = l1.daemon.is_in_log(r'\[IN\] 0100').split(' ')[-1][:-1]
    channel_update = l1.daemon.is_in_log(r'\[IN\] 0102').split(' ')[-1][:-1]
    node_announcement = l1.daemon.is_in_log(r'\[IN\] 0101').split(' ')[-1][:-1]

    l2.rpc.close(l3.info['id'])
    wait_for(lambda: only_one(l2.rpc.listpeers(l3.info['id'])['peers'])['channels'][0]['state'] == 'CLOSINGD_COMPLETE')
    bitcoind.generate_block(1)

    wait_for(lambda: l1.rpc.listchannels()['channels'] == [])
    wait_for(lambda: l1.rpc.listnodes()['nodes'] == [])

    # FIXME: This is a hack: we should have a framework for canned conversations
    # This doesn't naturally terminate, so we give it 5 seconds.
    try:
        subprocess.run(['devtools/gossipwith',
                        '{}@localhost:{}'.format(l1.info['id'], l1.port),
                        channel_announcement,
                        channel_update,
                        node_announcement],
                       timeout=5, stdout=subprocess.PIPE)
    except subprocess.TimeoutExpired:
        pass

    # l1 should reject it.
    assert(l1.rpc.listchannels()['channels'] == [])
    assert(l1.rpc.listnodes()['nodes'] == [])

    l1.stop()
    l1.start()
    assert(l1.rpc.listchannels()['channels'] == [])
    assert(l1.rpc.listnodes()['nodes'] == [])
示例#5
0
def test_invoice_routeboost(node_factory, bitcoind):
    """Test routeboost 'r' hint in bolt11 invoice.
    """
    l0, l1, l2 = node_factory.line_graph(3, fundamount=2 * (10**4), wait_for_announce=True)

    # Check routeboost.
    # Make invoice and pay it
    inv = l2.rpc.invoice(msatoshi=123456, label="inv1", description="?")
    # Check routeboost.
    assert 'warning_capacity' not in inv
    assert 'warning_offline' not in inv
    # Route array has single route with single element.
    r = only_one(only_one(l1.rpc.decodepay(inv['bolt11'])['routes']))
    assert r['pubkey'] == l1.info['id']
    assert r['short_channel_id'] == l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'][0]['short_channel_id']
    assert r['fee_base_msat'] == 1
    assert r['fee_proportional_millionths'] == 10
    assert r['cltv_expiry_delta'] == 6

    # Pay it (and make sure it's fully resolved before we take l1 offline!)
    l1.rpc.pay(inv['bolt11'])
    wait_channel_quiescent(l1, l2)

    # Due to reserve & fees, l1 doesn't have capacity to pay this.
    inv = l2.rpc.invoice(msatoshi=2 * (10**7) - 123456, label="inv2", description="?")
    # Check warning
    assert 'warning_capacity' in inv
    assert 'warning_offline' not in inv

    l1.rpc.disconnect(l2.info['id'], True)
    wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])

    inv = l2.rpc.invoice(123456, label="inv3", description="?")
    # Check warning.
    assert 'warning_capacity' not in inv
    assert 'warning_offline' in inv

    # Close l0, l2 will not use l1 at all.
    l0.rpc.close(l1.info['id'])
    l0.wait_for_channel_onchain(l1.info['id'])
    bitcoind.generate_block(100)

    # l2 has to notice channel is gone.
    wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 2)
    inv = l2.rpc.invoice(123456, label="inv4", description="?")
    # Check warning.
    assert 'warning_capacity' in inv
    assert 'warning_offline' not in inv
    def stop(self, blocking=False):
        """
        """
        name = self.__class__.__name__
        logger.info("%s: Stop requested" % name)
        self._stop_requested = True

        if blocking:
            def stopped():
                return self._stopped

            logger.info("%s: Waiting to stop" % name)
            wait_for(stopped, timeout=60)

            if stopped():
                logger.info("%s: stopped successfully." % name)
            else:
                raise Exception("%s did not stop in 60 seconds")
示例#7
0
def test_routing_gossip_reconnect(node_factory):
    # Connect two peers, reconnect and then see if we resume the
    # gossip.
    disconnects = ['-WIRE_CHANNEL_ANNOUNCEMENT']
    l1 = node_factory.get_node(disconnect=disconnects,
                               may_reconnect=True)
    l2 = node_factory.get_node(may_reconnect=True)
    l3 = node_factory.get_node()
    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
    l1.openchannel(l2, 20000)

    # Now open new channels and everybody should sync
    l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
    l2.openchannel(l3, 20000)

    # Settle the gossip
    for n in [l1, l2, l3]:
        wait_for(lambda: len(n.rpc.listchannels()['channels']) == 4)
    def _do_stop(self):
        """
            Triggers the backup system to gracefully stop
        """
        self.info("Stopping backup system gracefully")

        self._stop_requested = True
        # wait until backup system stops

        def stopped():
            return self._stopped

        self.info("Waiting for backup system to stop")
        wait_for(stopped, timeout=60)
        if stopped():
            self.info("Backup system stopped successfully. Bye!")

        else:
            self.error("Backup system did not stop in 60 seconds")
示例#9
0
def test_node_reannounce(node_factory, bitcoind):
    "Test that we reannounce a node when parameters change"
    l1, l2 = node_factory.line_graph(2, opts={'may_reconnect': True,
                                              'log_all_io': True})
    bitcoind.generate_block(5)

    # Wait for node_announcement for l1.
    l2.daemon.wait_for_log(r'\[IN\] 0101.*{}'.format(l1.info['id']))
    # Wait for it to process it.
    wait_for(lambda: l2.rpc.listnodes(l1.info['id'])['nodes'] != [])
    wait_for(lambda: 'alias' in only_one(l2.rpc.listnodes(l1.info['id'])['nodes']))
    assert only_one(l2.rpc.listnodes(l1.info['id'])['nodes'])['alias'].startswith('JUNIORBEAM')

    l1.stop()
    l1.daemon.opts['alias'] = 'SENIORBEAM'
    l1.start()

    # Wait for l1 to send us its own node_announcement.
    nannouncement = l2.daemon.wait_for_log(r'{}.*\[IN\] 0101.*{}'.format(l1.info['id'], l1.info['id'])).split('[IN] ')[1]
    wait_for(lambda: only_one(l2.rpc.listnodes(l1.info['id'])['nodes'])['alias'] == 'SENIORBEAM')

    # Restart should re-xmit exact same update on reconnect.
    l1.restart()

    # l1 should retransmit it exactly the same (no timestamp change!)
    l2.daemon.wait_for_log(r'{}.*\[IN\] {}'.format(l1.info['id'], nannouncement))
示例#10
0
def test_gossip_no_empty_announcements(node_factory, bitcoind):
    # Need full IO logging so we can see gossip
    opts = {'log-level': 'io'}
    l1, l2 = node_factory.get_nodes(2, opts=opts)
    # l3 sends CHANNEL_ANNOUNCEMENT to l2, but not CHANNEL_UDPATE.
    l3 = node_factory.get_node(disconnect=['+WIRE_CHANNEL_ANNOUNCEMENT'],
                               options={'dev-no-reconnect': None},
                               may_reconnect=True)
    l4 = node_factory.get_node(may_reconnect=True)

    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
    l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
    l3.rpc.connect(l4.info['id'], 'localhost', l4.port)

    # Turn on IO logging for openingd (make sure it's ready!)
    l1.daemon.wait_for_log('openingd-.*: Handed peer, entering loop')
    subprocess.run(['kill', '-USR1', l1.subd_pid('openingd')])
    l2.daemon.wait_for_log('openingd-{}.*: Handed peer, entering loop'.format(l3.info['id']))
    subprocess.run(['kill', '-USR1', l2.subd_pid('openingd-{}'.format(l3.info['id']))])

    # Make an announced-but-not-updated channel.
    l3.fund_channel(l4, 10**5)
    bitcoind.generate_block(5)

    # 0x0100 = channel_announcement, which goes to l2 before l3 dies.
    l2.daemon.wait_for_log(r'\[IN\] 0100')

    # l3 actually disconnects from l4 *and* l2!  That means we never see
    # the (delayed) channel_update from l4.
    wait_for(lambda: not l3.rpc.listpeers(l4.info['id'])['peers'][0]['connected'])
    l3.rpc.connect(l4.info['id'], 'localhost', l4.port)

    # But it never goes to l1, as there's no channel_update.
    time.sleep(2)
    assert not l1.daemon.is_in_log(r'\[IN\] 0100')
    assert len(l1.rpc.listchannels()['channels']) == 0

    # If we reconnect, gossip will now flow.
    l3.rpc.connect(l2.info['id'], 'localhost', l2.port)
    wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 2)
示例#11
0
def test_gossip_disable_channels(node_factory, bitcoind):
    """Simple test to check that channels get disabled correctly on disconnect and
    reenabled upon reconnecting

    """
    opts = {'dev-no-reconnect': None, 'may_reconnect': True}
    l1, l2 = node_factory.get_nodes(2, opts=opts)

    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
    scid = l1.fund_channel(l2, 10**6)
    bitcoind.generate_block(5)

    def count_active(node):
        chans = node.rpc.listchannels()['channels']
        active = [c for c in chans if c['active']]
        return len(active)

    l1.wait_channel_active(scid)
    l2.wait_channel_active(scid)

    assert(count_active(l1) == 2)
    assert(count_active(l2) == 2)

    l2.restart()

    wait_for(lambda: count_active(l1) == 0)
    assert(count_active(l2) == 0)

    # Now reconnect, they should re-enable the channels
    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)

    wait_for(lambda: count_active(l1) == 2)
    wait_for(lambda: count_active(l2) == 2)
示例#12
0
def test_gossip_store_private_channels(node_factory, bitcoind):
    l1, l2 = node_factory.line_graph(2, announce_channels=False)

    # We see this channel, even though it's not announced, because it's local.
    wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 2)

    l2.stop()
    l1.restart()

    # We should still see local channels!
    time.sleep(3)  # Make sure store is loaded
    chans = l1.rpc.listchannels()['channels']
    assert len(chans) == 2

    # Now compact store
    l1.rpc.call('dev-compact-gossip-store')
    l1.restart()

    time.sleep(3)  # Make sure store is loaded
    # We should still see local channels!
    chans = l1.rpc.listchannels()['channels']
    assert len(chans) == 2
示例#13
0
def test_gossip_store_load(node_factory):
    """Make sure we can read canned gossip store"""
    l1 = node_factory.get_node(start=False)
    with open(os.path.join(l1.daemon.lightning_dir, 'gossip_store'), 'wb') as f:
        f.write(bytearray.fromhex("03"  # GOSSIP_VERSION
                                  "000001bc"  # len
                                  "521ef598"  # csum
                                  "1000"  # WIRE_GOSSIP_STORE_CHANNEL_ANNOUNCEMENT
                                  "01b00100bb8d7b6998cca3c2b3ce12a6bd73a8872c808bb48de2a30c5ad9cdf835905d1e27505755087e675fb517bbac6beb227629b694ea68f49d357458327138978ebfd7adfde1c69d0d2f497154256f6d5567a5cf2317c589e0046c0cc2b3e986cf9b6d3b44742bd57bce32d72cd1180a7f657795976130b20508b239976d3d4cdc4d0d6e6fbb9ab6471f664a662972e406f519eab8bce87a8c0365646df5acbc04c91540b4c7c518cec680a4a6af14dae1aca0fd5525220f7f0e96fcd2adef3c803ac9427fe71034b55a50536638820ef21903d09ccddd38396675b598587fa886ca711415c813fc6d69f46552b9a0a539c18f265debd0e2e286980a118ba349c216000043497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea33090000000013a63c0000b50001021bf3de4e84e3d52f9a3e36fbdcd2c4e8dbf203b9ce4fc07c2f03be6c21d0c67503f113414ebdc6c1fb0f33c99cd5a1d09dd79e7fdf2468cf1fe1af6674361695d203801fd8ab98032f11cc9e4916dd940417082727077609d5c7f8cc6e9a3ad25dd102517164b97ab46cee3826160841a36c46a2b7b9c74da37bdc070ed41ba172033a0000000001000000"
                                  "00000086"  # len
                                  "88c703c8"  # csum
                                  "1001"  # WIRE_GOSSIP_STORE_CHANNEL_UPDATE
                                  "008201021ea7c2eadf8a29eb8690511a519b5656e29aa0a853771c4e38e65c5abf43d907295a915e69e451f4c7a0c3dc13dd943cfbe3ae88c0b96667cd7d58955dbfedcf43497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea33090000000013a63c0000b500015b8d9b440000009000000000000003e8000003e800000001"
                                  "00000099"  # len
                                  "12abbbba"  # csum
                                  "1002"  # WIRE_GOSSIP_STORE_NODE_ANNOUNCEMENT
                                  "00950101cf5d870bc7ecabcb7cd16898ef66891e5f0c6c5851bd85b670f03d325bc44d7544d367cd852e18ec03f7f4ff369b06860a3b12b07b29f36fb318ca11348bf8ec00005aab817c03f113414ebdc6c1fb0f33c99cd5a1d09dd79e7fdf2468cf1fe1af6674361695d23974b250757a7a6c6549544300000000000000000000000000000000000000000000000007010566933e2607"))

    l1.start()
    # May preceed the Started msg waited for in 'start'.
    wait_for(lambda: l1.daemon.is_in_log('gossip_store: Read 1/1/1/0 cannounce/cupdate/nannounce/cdelete from store in 756 bytes'))
    assert not l1.daemon.is_in_log('gossip_store.*truncating')
示例#14
0
def test_gossip_pruning(node_factory, bitcoind):
    """ Create channel and see it being updated in time before pruning
    """
    opts = {'dev-channel-update-interval': 5}
    l1, l2, l3 = node_factory.get_nodes(3, opts)

    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
    l2.rpc.connect(l3.info['id'], 'localhost', l3.port)

    scid1 = l1.fund_channel(l2, 10**6)
    scid2 = l2.fund_channel(l3, 10**6)

    bitcoind.generate_block(6)

    # Channels should be activated locally
    wait_for(lambda: [c['active'] for c in l1.rpc.listchannels()['channels']] == [True] * 4)
    wait_for(lambda: [c['active'] for c in l2.rpc.listchannels()['channels']] == [True] * 4)
    wait_for(lambda: [c['active'] for c in l3.rpc.listchannels()['channels']] == [True] * 4)

    # All of them should send a keepalive message
    l1.daemon.wait_for_logs([
        'Sending keepalive channel_update for {}'.format(scid1),
    ])
    l2.daemon.wait_for_logs([
        'Sending keepalive channel_update for {}'.format(scid1),
        'Sending keepalive channel_update for {}'.format(scid2),
    ])
    l3.daemon.wait_for_logs([
        'Sending keepalive channel_update for {}'.format(scid2),
    ])

    # Now kill l3, so that l2 and l1 can prune it from their view after 10 seconds

    # FIXME: This sleep() masks a real bug: that channeld sends a
    # channel_update message (to disable the channel) with same
    # timestamp as the last keepalive, and thus is ignored.  The minimal
    # fix is to backdate the keepalives 1 second, but maybe we should
    # simply have gossipd generate all updates?
    time.sleep(1)
    l3.stop()

    l1.daemon.wait_for_log("Pruning channel {} from network view".format(scid2))
    l2.daemon.wait_for_log("Pruning channel {} from network view".format(scid2))

    assert scid2 not in [c['short_channel_id'] for c in l1.rpc.listchannels()['channels']]
    assert scid2 not in [c['short_channel_id'] for c in l2.rpc.listchannels()['channels']]
    assert l3.info['id'] not in [n['nodeid'] for n in l1.rpc.listnodes()['nodes']]
    assert l3.info['id'] not in [n['nodeid'] for n in l2.rpc.listnodes()['nodes']]
示例#15
0
def test_channel_persistence(node_factory, bitcoind, executor):
    # Start two nodes and open a channel (to remember). l2 will
    # mysteriously die while committing the first HTLC so we can
    # check that HTLCs reloaded from the DB work.
    l1 = node_factory.get_node(may_reconnect=True)
    l2 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'],
                               may_reconnect=True)
    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)

    # Neither node should have a channel open, they are just connected
    for n in (l1, l2):
        assert (n.db_query('SELECT COUNT(id) as count FROM channels;')[0]
                ['count'] == 0)

    l1.fund_channel(l2, 100000)

    peers = l1.rpc.listpeers()['peers']
    assert (only_one(peers[0]['channels'])['state'] == 'CHANNELD_NORMAL')

    # Both nodes should now have exactly one channel in the database
    for n in (l1, l2):
        assert (n.db_query('SELECT COUNT(id) as count FROM channels;')[0]
                ['count'] == 1)

    # Fire off a sendpay request, it'll get interrupted by a restart
    executor.submit(l1.pay, l2, 10000)
    # Wait for it to be committed to, i.e., stored in the DB
    l1.daemon.wait_for_log('peer_in WIRE_COMMITMENT_SIGNED')

    # Stop l2, l1 will reattempt to connect
    print("Killing l2 in mid HTLC")
    l2.daemon.kill()

    # Clear the disconnect and timer stop so we can proceed normally
    del l2.daemon.opts['dev-disconnect']

    # Wait for l1 to notice
    wait_for(lambda: 'connected' not in only_one(l1.rpc.listpeers()['peers'][0]
                                                 ['channels']))

    # Now restart l2 and it should reload peers/channels from the DB
    l2.start()
    wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 1)

    # Wait for the restored HTLC to finish
    wait_for(lambda: only_one(l1.rpc.listpeers()['peers'][0]['channels'])[
        'msatoshi_to_us'] == 99990000,
             interval=1)

    wait_for(
        lambda: len([p for p in l1.rpc.listpeers()['peers']
                     if p['connected']]),
        interval=1)
    wait_for(
        lambda: len([p for p in l2.rpc.listpeers()['peers']
                     if p['connected']]),
        interval=1)

    # Now make sure this is really functional by sending a payment
    l1.pay(l2, 10000)

    # L1 doesn't actually update msatoshi_to_us until it receives
    # revoke_and_ack from L2, which can take a little bit.
    wait_for(lambda: only_one(l1.rpc.listpeers()['peers'][0]['channels'])[
        'msatoshi_to_us'] == 99980000)
    assert only_one(
        l2.rpc.listpeers()['peers'][0]['channels'])['msatoshi_to_us'] == 20000

    # Finally restart l1, and make sure it remembers
    l1.restart()
    assert only_one(l1.rpc.listpeers()['peers'][0]
                    ['channels'])['msatoshi_to_us'] == 99980000

    # Now make sure l1 is watching for unilateral closes
    l2.rpc.dev_fail(l1.info['id'])
    l2.daemon.wait_for_log('Failing due to dev-fail command')
    l2.daemon.wait_for_log('sendrawtx exit 0')
    bitcoind.generate_block(1)

    # L1 must notice.
    l1.daemon.wait_for_log(' to ONCHAIN')
示例#16
0
def test_penalty_outhtlc(node_factory, bitcoind, executor):
    """Test penalty transaction with an outgoing HTLC"""
    # First we need to get funds to l2, so suppress after second.
    # Feerates identical so we don't get gratuitous commit to update them
    l1 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED*3-nocommit'], may_fail=True, feerates=(7500, 7500, 7500))
    l2 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED*3-nocommit'])

    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
    l1.fund_channel(l2, 10**6)

    # Move some across to l2.
    l1.pay(l2, 200000000)

    assert not l1.daemon.is_in_log('=WIRE_COMMITMENT_SIGNED')
    assert not l2.daemon.is_in_log('=WIRE_COMMITMENT_SIGNED')

    # Now, this will get stuck due to l1 commit being disabled..
    t = executor.submit(l2.pay, l1, 100000000)

    # Make sure we get signature from them.
    l1.daemon.wait_for_log('peer_in WIRE_UPDATE_ADD_HTLC')
    l1.daemon.wait_for_log('peer_in WIRE_COMMITMENT_SIGNED')

    # They should both have commitments blocked now.
    l1.daemon.wait_for_log('dev_disconnect: =WIRE_COMMITMENT_SIGNED')
    l2.daemon.wait_for_log('dev_disconnect: =WIRE_COMMITMENT_SIGNED')

    # Make sure both sides got revoke_and_ack for that commitment.
    l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
    l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')

    # Take our snapshot.
    tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']

    # Let them continue
    l1.rpc.dev_reenable_commit(l2.info['id'])
    l2.rpc.dev_reenable_commit(l1.info['id'])

    # Thread should complete.
    t.result(timeout=10)

    # Make sure both sides got revoke_and_ack for final.
    l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
    l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')

    # Now we really mess things up!
    bitcoind.rpc.sendrawtransaction(tx)
    bitcoind.generate_block(1)

    l2.daemon.wait_for_log(' to ONCHAIN')
    # FIXME: l1 should try to stumble along!

    # l2 should spend all of the outputs (except to-us).
    # Could happen in any order, depending on commitment tx.
    l2.daemon.wait_for_logs([
        'Ignoring output.*: THEIR_REVOKED_UNILATERAL/OUTPUT_TO_US',
        'Propose handling THEIR_REVOKED_UNILATERAL/DELAYED_OUTPUT_TO_THEM by OUR_PENALTY_TX .* after 0 blocks',
        'sendrawtx exit 0',
        'Propose handling THEIR_REVOKED_UNILATERAL/OUR_HTLC by OUR_PENALTY_TX .* after 0 blocks',
        'sendrawtx exit 0'
    ])

    # FIXME: test HTLC tx race!

    # 100 blocks later, all resolved.
    bitcoind.generate_block(100)

    wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 0)

    outputs = l2.rpc.listfunds()['outputs']
    assert [o['status'] for o in outputs] == ['confirmed'] * 3
    # Allow some lossage for fees.
    assert sum(o['value'] for o in outputs) < 10**6
    assert sum(o['value'] for o in outputs) > 10**6 - 15000
示例#17
0
def test_sign_and_send_psbt(node_factory, bitcoind, chainparams):
    """
    Tests for the sign + send psbt RPCs
    """
    amount = 1000000
    total_outs = 12
    coin_mvt_plugin = os.path.join(os.getcwd(),
                                   'tests/plugins/coin_movements.py')
    l1 = node_factory.get_node(options={'plugin': coin_mvt_plugin},
                               feerates=(7500, 7500, 7500, 7500))
    l2 = node_factory.get_node()
    addr = chainparams['example_addr']

    # Add a medley of funds to withdraw later, bech32 + p2sh-p2wpkh
    for i in range(total_outs // 2):
        bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'], amount / 10**8)
        bitcoind.rpc.sendtoaddress(
            l1.rpc.newaddr('p2sh-segwit')['p2sh-segwit'], amount / 10**8)
    bitcoind.generate_block(1)
    wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == total_outs)

    # Make a PSBT out of our inputs
    reserved = l1.rpc.reserveinputs(
        outputs=[{
            addr: Millisatoshi(3 * amount * 1000)
        }])
    assert len([x for x in l1.rpc.listfunds()['outputs']
                if x['reserved']]) == 4
    psbt = bitcoind.rpc.decodepsbt(reserved['psbt'])
    saved_input = psbt['tx']['vin'][0]

    # Go ahead and unreserve the UTXOs, we'll use a smaller
    # set of them to create a second PSBT that we'll attempt to sign
    # and broadcast (to disastrous results)
    l1.rpc.unreserveinputs(reserved['psbt'])

    # Re-reserve one of the utxos we just unreserved
    utxos = []
    utxos.append(saved_input['txid'] + ":" + str(saved_input['vout']))
    second_reservation = l1.rpc.reserveinputs(
        [{
            addr: Millisatoshi(amount * 0.5 * 1000)
        }],
        feerate='253perkw',
        utxos=utxos)

    # We require the utxos be reserved before signing them
    with pytest.raises(
            RpcError, match=r"Aborting PSBT signing. UTXO .* is not reserved"):
        l1.rpc.signpsbt(reserved['psbt'])['signed_psbt']

    # Now we unreserve the singleton, so we can reserve it again
    l1.rpc.unreserveinputs(second_reservation['psbt'])

    # We re-reserve the first set...
    utxos = []
    for vin in psbt['tx']['vin']:
        utxos.append(vin['txid'] + ':' + str(vin['vout']))
    reserved = l1.rpc.reserveinputs(outputs=[{
        addr:
        Millisatoshi(3 * amount * 1000)
    }],
                                    utxos=utxos)
    # Sign + send the PSBT we've created
    signed_psbt = l1.rpc.signpsbt(reserved['psbt'])['signed_psbt']
    broadcast_tx = l1.rpc.sendpsbt(signed_psbt)

    # Check that it was broadcast successfully
    l1.daemon.wait_for_log(r'sendrawtx exit 0 .* sendrawtransaction {}'.format(
        broadcast_tx['tx']))
    bitcoind.generate_block(1)

    # We expect a change output to be added to the wallet
    expected_outs = total_outs - 4 + 1
    wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == expected_outs)

    # Let's try *sending* a PSBT that can't be finalized (it's unsigned)
    with pytest.raises(RpcError, match=r"PSBT not finalizeable"):
        l1.rpc.sendpsbt(second_reservation['psbt'])

    # Now we try signing a PSBT with an output that's already been spent
    with pytest.raises(
            RpcError,
            match=r"Aborting PSBT signing. UTXO {} is not reserved".format(
                utxos[0])):
        l1.rpc.signpsbt(second_reservation['psbt'])

    # Queue up another node, to make some PSBTs for us
    for i in range(total_outs // 2):
        bitcoind.rpc.sendtoaddress(l2.rpc.newaddr()['bech32'], amount / 10**8)
        bitcoind.rpc.sendtoaddress(
            l2.rpc.newaddr('p2sh-segwit')['p2sh-segwit'], amount / 10**8)
    # Create a PSBT using L2
    bitcoind.generate_block(1)
    wait_for(lambda: len(l2.rpc.listfunds()['outputs']) == total_outs)
    l2_reserved = l2.rpc.reserveinputs(
        outputs=[{
            addr: Millisatoshi(3 * amount * 1000)
        }])

    # Try to get L1 to sign it
    with pytest.raises(RpcError, match=r"No wallet inputs to sign"):
        l1.rpc.signpsbt(l2_reserved['psbt'])

    # Add some of our own PSBT inputs to it
    l1_reserved = l1.rpc.reserveinputs(
        outputs=[{
            addr: Millisatoshi(3 * amount * 1000)
        }])
    joint_psbt = bitcoind.rpc.joinpsbts(
        [l1_reserved['psbt'], l2_reserved['psbt']])

    half_signed_psbt = l1.rpc.signpsbt(joint_psbt)['signed_psbt']
    totally_signed = l2.rpc.signpsbt(half_signed_psbt)['signed_psbt']

    broadcast_tx = l1.rpc.sendpsbt(totally_signed)
    l1.daemon.wait_for_log(r'sendrawtx exit 0 .* sendrawtransaction {}'.format(
        broadcast_tx['tx']))

    # Send a PSBT that's not ours
    l2_reserved = l2.rpc.reserveinputs(
        outputs=[{
            addr: Millisatoshi(3 * amount * 1000)
        }])
    l2_signed_psbt = l2.rpc.signpsbt(l2_reserved['psbt'])['signed_psbt']
    l1.rpc.sendpsbt(l2_signed_psbt)

    # Re-try sending the same tx?
    bitcoind.generate_block(1)
    sync_blockheight(bitcoind, [l1])
    # Expect an error here
    with pytest.raises(JSONRPCError,
                       match=r"Transaction already in block chain"):
        bitcoind.rpc.sendrawtransaction(broadcast_tx['tx'])

    # Try an empty PSBT
    with pytest.raises(RpcError, match=r"should be a PSBT, not"):
        l1.rpc.signpsbt('')
    with pytest.raises(RpcError, match=r"should be a PSBT, not"):
        l1.rpc.sendpsbt('')

    # Try a modified (invalid) PSBT string
    modded_psbt = l2_reserved['psbt'][:-3] + 'A' + l2_reserved['psbt'][-3:]
    with pytest.raises(RpcError, match=r"should be a PSBT, not"):
        l1.rpc.signpsbt(modded_psbt)

    wallet_coin_mvts = [
        {
            'type': 'chain_mvt',
            'credit': 1000000000,
            'debit': 0,
            'tag': 'deposit'
        },
        {
            'type': 'chain_mvt',
            'credit': 1000000000,
            'debit': 0,
            'tag': 'deposit'
        },
        {
            'type': 'chain_mvt',
            'credit': 1000000000,
            'debit': 0,
            'tag': 'deposit'
        },
        {
            'type': 'chain_mvt',
            'credit': 1000000000,
            'debit': 0,
            'tag': 'deposit'
        },
        {
            'type': 'chain_mvt',
            'credit': 1000000000,
            'debit': 0,
            'tag': 'deposit'
        },
        {
            'type': 'chain_mvt',
            'credit': 1000000000,
            'debit': 0,
            'tag': 'deposit'
        },
        {
            'type': 'chain_mvt',
            'credit': 1000000000,
            'debit': 0,
            'tag': 'deposit'
        },
        {
            'type': 'chain_mvt',
            'credit': 1000000000,
            'debit': 0,
            'tag': 'deposit'
        },
        {
            'type': 'chain_mvt',
            'credit': 1000000000,
            'debit': 0,
            'tag': 'deposit'
        },
        {
            'type': 'chain_mvt',
            'credit': 1000000000,
            'debit': 0,
            'tag': 'deposit'
        },
        {
            'type': 'chain_mvt',
            'credit': 1000000000,
            'debit': 0,
            'tag': 'deposit'
        },
        {
            'type': 'chain_mvt',
            'credit': 1000000000,
            'debit': 0,
            'tag': 'deposit'
        },
        {
            'type': 'chain_mvt',
            'credit': 0,
            'debit': 0,
            'tag': 'spend_track'
        },
        {
            'type': 'chain_mvt',
            'credit': 0,
            'debit': 0,
            'tag': 'spend_track'
        },
        {
            'type': 'chain_mvt',
            'credit': 0,
            'debit': 0,
            'tag': 'spend_track'
        },
        {
            'type': 'chain_mvt',
            'credit': 0,
            'debit': 0,
            'tag': 'spend_track'
        },
        # Nicely splits out withdrawals and chain fees, because it's all our tx
        {
            'type': 'chain_mvt',
            'credit': 0,
            'debit': 988255000,
            'tag': 'withdrawal'
        },
        {
            'type': 'chain_mvt',
            'credit': 0,
            'debit': 3000000000,
            'tag': 'withdrawal'
        },
        {
            'type': 'chain_mvt',
            'credit': 0,
            'debit': 11745000,
            'tag': 'chain_fees'
        },
        {
            'type': 'chain_mvt',
            'credit': 988255000,
            'debit': 0,
            'tag': 'deposit'
        },
        {
            'type': 'chain_mvt',
            'credit': 0,
            'debit': 0,
            'tag': 'spend_track'
        },
        {
            'type': 'chain_mvt',
            'credit': 0,
            'debit': 0,
            'tag': 'spend_track'
        },
        {
            'type': 'chain_mvt',
            'credit': 0,
            'debit': 0,
            'tag': 'spend_track'
        },
        {
            'type': 'chain_mvt',
            'credit': 0,
            'debit': 0,
            'tag': 'spend_track'
        },
        # Note that this is technically wrong since we paid 11745sat in fees
        # but since it includes inputs / outputs from a second node, we can't
        # do proper acccounting for it.
        {
            'type': 'chain_mvt',
            'credit': 0,
            'debit': 4000000000,
            'tag': 'withdrawal'
        },
        {
            'type': 'chain_mvt',
            'credit': 0,
            'debit': 0,
            'tag': 'chain_fees'
        },
        {
            'type': 'chain_mvt',
            'credit': 988255000,
            'debit': 0,
            'tag': 'deposit'
        },
    ]
    check_coin_moves(l1, 'wallet', wallet_coin_mvts, chainparams)
示例#18
0
def test_gossip_query_channel_range(node_factory, bitcoind):
    l1, l2, l3, l4 = node_factory.line_graph(4, opts={'log-level': 'io'},
                                             fundchannel=False)

    # Make public channels on consecutive blocks
    l1.fundwallet(10**6)
    l2.fundwallet(10**6)

    num_tx = len(bitcoind.rpc.getrawmempool())
    l1.rpc.fundchannel(l2.info['id'], 10**5)['tx']
    wait_for(lambda: len(bitcoind.rpc.getrawmempool()) == num_tx + 1)
    bitcoind.generate_block(1)

    num_tx = len(bitcoind.rpc.getrawmempool())
    l2.rpc.fundchannel(l3.info['id'], 10**5)['tx']
    wait_for(lambda: len(bitcoind.rpc.getrawmempool()) == num_tx + 1)
    bitcoind.generate_block(1)

    # Get them both to gossip depth.
    bitcoind.generate_block(5)

    # Make sure l2 has received all the gossip.
    l2.daemon.wait_for_logs(['Received node_announcement for node ' + l1.info['id'],
                             'Received node_announcement for node ' + l3.info['id']])

    scid12 = only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels'][0]['short_channel_id']
    scid23 = only_one(l3.rpc.listpeers(l2.info['id'])['peers'])['channels'][0]['short_channel_id']
    block12 = int(scid12.split('x')[0])
    block23 = int(scid23.split('x')[0])

    assert block23 == block12 + 1

    # l1 asks for all channels, gets both.
    ret = l1.rpc.dev_query_channel_range(id=l2.info['id'],
                                         first=0,
                                         num=1000000)

    assert ret['final_first_block'] == 0
    assert ret['final_num_blocks'] == 1000000
    assert ret['final_complete']
    assert len(ret['short_channel_ids']) == 2
    assert ret['short_channel_ids'][0] == scid12
    assert ret['short_channel_ids'][1] == scid23

    # Does not include scid12
    ret = l1.rpc.dev_query_channel_range(id=l2.info['id'],
                                         first=0,
                                         num=block12)
    assert ret['final_first_block'] == 0
    assert ret['final_num_blocks'] == block12
    assert ret['final_complete']
    assert len(ret['short_channel_ids']) == 0

    # Does include scid12
    ret = l1.rpc.dev_query_channel_range(id=l2.info['id'],
                                         first=0,
                                         num=block12 + 1)
    assert ret['final_first_block'] == 0
    assert ret['final_num_blocks'] == block12 + 1
    assert ret['final_complete']
    assert len(ret['short_channel_ids']) == 1
    assert ret['short_channel_ids'][0] == scid12

    # Doesn't include scid23
    ret = l1.rpc.dev_query_channel_range(id=l2.info['id'],
                                         first=0,
                                         num=block23)
    assert ret['final_first_block'] == 0
    assert ret['final_num_blocks'] == block23
    assert ret['final_complete']
    assert len(ret['short_channel_ids']) == 1
    assert ret['short_channel_ids'][0] == scid12

    # Does include scid23
    ret = l1.rpc.dev_query_channel_range(id=l2.info['id'],
                                         first=block12,
                                         num=block23 - block12 + 1)
    assert ret['final_first_block'] == block12
    assert ret['final_num_blocks'] == block23 - block12 + 1
    assert ret['final_complete']
    assert len(ret['short_channel_ids']) == 2
    assert ret['short_channel_ids'][0] == scid12
    assert ret['short_channel_ids'][1] == scid23

    # Only includes scid23
    ret = l1.rpc.dev_query_channel_range(id=l2.info['id'],
                                         first=block23,
                                         num=1)
    assert ret['final_first_block'] == block23
    assert ret['final_num_blocks'] == 1
    assert ret['final_complete']
    assert len(ret['short_channel_ids']) == 1
    assert ret['short_channel_ids'][0] == scid23

    # Past both
    ret = l1.rpc.dev_query_channel_range(id=l2.info['id'],
                                         first=block23 + 1,
                                         num=1000000)
    assert ret['final_first_block'] == block23 + 1
    assert ret['final_num_blocks'] == 1000000
    assert ret['final_complete']
    assert len(ret['short_channel_ids']) == 0

    # Turn on IO logging in l1 channeld.
    subprocess.run(['kill', '-USR1', l1.subd_pid('channeld')])

    # Make l2 split reply into two (technically async)
    l2.rpc.dev_set_max_scids_encode_size(max=9)
    l2.daemon.wait_for_log('Set max_scids_encode_bytes to 9')
    ret = l1.rpc.dev_query_channel_range(id=l2.info['id'],
                                         first=0,
                                         num=1000000)

    # Turns out it sends: 0+53, 53+26, 79+13, 92+7, 99+3, 102+2, 104+1, 105+999895
    l1.daemon.wait_for_logs([r'\[IN\] 0108'] * 8)

    # It should definitely have split
    assert ret['final_first_block'] != 0 or ret['final_num_blocks'] != 1000000
    assert ret['final_complete']
    assert len(ret['short_channel_ids']) == 2
    assert ret['short_channel_ids'][0] == scid12
    assert ret['short_channel_ids'][1] == scid23
    l2.daemon.wait_for_log('queue_channel_ranges full: splitting')

    # Test overflow case doesn't split forever; should still only get 8 for this
    ret = l1.rpc.dev_query_channel_range(id=l2.info['id'],
                                         first=1,
                                         num=429496000)
    l1.daemon.wait_for_logs([r'\[IN\] 0108'] * 8)

    # And no more!
    time.sleep(1)
    assert not l1.daemon.is_in_log(r'\[IN\] 0108', start=l1.daemon.logsearch_start)

    # This should actually be large enough for zlib to kick in!
    l3.fund_channel(l4, 10**5)
    bitcoind.generate_block(5)
    l2.daemon.wait_for_log('Received node_announcement for node ' + l4.info['id'])

    # Restore infinite encode size.
    l2.rpc.dev_set_max_scids_encode_size(max=(2**32 - 1))
    l2.daemon.wait_for_log('Set max_scids_encode_bytes to {}'
                           .format(2**32 - 1))

    ret = l1.rpc.dev_query_channel_range(id=l2.info['id'],
                                         first=0,
                                         num=65535)
    l1.daemon.wait_for_log(
        # WIRE_REPLY_CHANNEL_RANGE
        r'\[IN\] 0108'
        # chain_hash
        + '................................................................'
        # first_blocknum
        + '00000000'
        # number_of_blocks
        + '0000ffff'
        # complete
        + '01'
        # length
        + '....'
        # encoding
        + '01'
    )
示例#19
0
def test_deprecated_txprepare(node_factory, bitcoind):
    """Test the deprecated old-style:
       txprepare {destination} {satoshi} {feerate} {minconf}
    """
    amount = 10**4
    l1 = node_factory.get_node(options={'allow-deprecated-apis': True})
    addr = l1.rpc.newaddr()['bech32']

    for i in range(7):
        l1.fundwallet(10**8)

    bitcoind.generate_block(1)
    sync_blockheight(bitcoind, [l1])

    wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 7)

    # Array type
    with pytest.raises(
            RpcError,
            match=r'.* should be an amount in satoshis or all, not .*'):
        l1.rpc.call('txprepare', [addr, 'slow'])

    with pytest.raises(RpcError, match=r'Need set \'satoshi\' field.'):
        l1.rpc.call('txprepare', [addr])

    with pytest.raises(RpcError,
                       match=r'Could not parse destination address.*'):
        l1.rpc.call('txprepare', [Millisatoshi(amount * 100), 'slow', 1])

    l1.rpc.call('txprepare', [addr, Millisatoshi(amount * 100), 'slow', 1])
    l1.rpc.call('txprepare', [addr, Millisatoshi(amount * 100), 'normal'])
    l1.rpc.call('txprepare', [addr, Millisatoshi(amount * 100), None, 1])
    l1.rpc.call('txprepare', [addr, Millisatoshi(amount * 100)])

    # Object type
    with pytest.raises(RpcError, match=r'Need set \'outputs\' field.'):
        l1.rpc.call('txprepare', {'destination': addr, 'feerate': 'slow'})

    with pytest.raises(RpcError, match=r'Need set \'outputs\' field.'):
        l1.rpc.call(
            'txprepare', {
                'satoshi': Millisatoshi(amount * 100),
                'feerate': '10perkw',
                'minconf': 2
            })

    l1.rpc.call(
        'txprepare', {
            'destination': addr,
            'satoshi': Millisatoshi(amount * 100),
            'feerate': '2000perkw',
            'minconf': 1
        })
    l1.rpc.call(
        'txprepare', {
            'destination': addr,
            'satoshi': Millisatoshi(amount * 100),
            'feerate': '2000perkw'
        })
    l1.rpc.call('txprepare', {
        'destination': addr,
        'satoshi': Millisatoshi(amount * 100)
    })
示例#20
0
def test_gossip_persistence(node_factory, bitcoind):
    """Gossip for a while, restart and it should remember.

    Also tests for funding outpoint spends, and they should be persisted
    too.
    """
    opts = {'dev-no-reconnect': None, 'may_reconnect': True}
    l1, l2, l3, l4 = node_factory.get_nodes(4, opts=opts)

    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
    l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
    l3.rpc.connect(l4.info['id'], 'localhost', l4.port)

    scid12 = l1.fund_channel(l2, 10**6)
    scid23 = l2.fund_channel(l3, 10**6)

    # Make channels public, except for l3 -> l4, which is kept local-only for now
    bitcoind.generate_block(5)
    scid34 = l3.fund_channel(l4, 10**6)
    bitcoind.generate_block(1)

    def active(node):
        chans = node.rpc.listchannels()['channels']
        return sorted([c['short_channel_id'] for c in chans if c['active']])

    def non_public(node):
        chans = node.rpc.listchannels()['channels']
        return sorted([c['short_channel_id'] for c in chans if not c['public']])

    # Channels should be activated
    wait_for(lambda: active(l1) == [scid12, scid12, scid23, scid23])
    wait_for(lambda: active(l2) == [scid12, scid12, scid23, scid23])
    # This one sees its private channel
    wait_for(lambda: active(l3) == [scid12, scid12, scid23, scid23, scid34, scid34])

    # l1 restarts and doesn't connect, but loads from persisted store, all
    # local channels should be disabled, leaving only the two l2 <-> l3
    # directions
    l1.restart()
    wait_for(lambda: active(l1) == [scid23, scid23])

    # Now reconnect, they should re-enable the two l1 <-> l2 directions
    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
    wait_for(lambda: active(l1) == [scid12, scid12, scid23, scid23])

    # Now spend the funding tx, generate a block and see others deleting the
    # channel from their network view
    l1.rpc.dev_fail(l2.info['id'])
    time.sleep(1)
    bitcoind.generate_block(1)

    wait_for(lambda: active(l1) == [scid23, scid23])
    wait_for(lambda: active(l2) == [scid23, scid23])
    wait_for(lambda: active(l3) == [scid23, scid23, scid34, scid34])

    # The channel l3 -> l4 should be known only to them
    assert non_public(l1) == []
    assert non_public(l2) == []
    wait_for(lambda: non_public(l3) == [scid34, scid34])
    wait_for(lambda: non_public(l4) == [scid34, scid34])

    # Finally, it should also remember the deletion after a restart
    l3.restart()
    l4.restart()
    l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
    l3.rpc.connect(l4.info['id'], 'localhost', l4.port)
    wait_for(lambda: active(l3) == [scid23, scid23, scid34, scid34])

    # Both l3 and l4 should remember their local-only channel
    wait_for(lambda: non_public(l3) == [scid34, scid34])
    wait_for(lambda: non_public(l4) == [scid34, scid34])
示例#21
0
def test_forward_event_notification(node_factory, bitcoind, executor):
    """ test 'forward_event' notifications
    """
    amount = 10**8
    disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']

    l1, l2, l3 = node_factory.line_graph(
        3,
        opts=[{}, {
            'plugin':
            os.path.join(os.getcwd(),
                         'tests/plugins/forward_payment_status.py')
        }, {}],
        wait_for_announce=True)
    l4 = node_factory.get_node()
    l5 = node_factory.get_node(disconnect=disconnects)
    l2.openchannel(l4, 10**6, wait_for_announce=False)
    l2.openchannel(l5, 10**6, wait_for_announce=True)

    bitcoind.generate_block(5)

    wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 8)

    payment_hash13 = l3.rpc.invoice(amount, "first", "desc")['payment_hash']
    route = l1.rpc.getroute(l3.info['id'], amount, 1)['route']

    # status: offered -> settled
    l1.rpc.sendpay(route, payment_hash13)
    l1.rpc.waitsendpay(payment_hash13)

    # status: offered -> failed
    route = l1.rpc.getroute(l4.info['id'], amount, 1)['route']
    payment_hash14 = "f" * 64
    with pytest.raises(RpcError):
        l1.rpc.sendpay(route, payment_hash14)
        l1.rpc.waitsendpay(payment_hash14)

    # status: offered -> local_failed
    payment_hash15 = l5.rpc.invoice(amount, 'onchain_timeout',
                                    'desc')['payment_hash']
    fee = amount * 10 // 1000000 + 1
    c12 = l1.get_channel_scid(l2)
    c25 = l2.get_channel_scid(l5)
    route = [{
        'msatoshi': amount + fee - 1,
        'id': l2.info['id'],
        'delay': 12,
        'channel': c12
    }, {
        'msatoshi': amount - 1,
        'id': l5.info['id'],
        'delay': 5,
        'channel': c25
    }]

    executor.submit(l1.rpc.sendpay, route, payment_hash15)

    l5.daemon.wait_for_log('permfail')
    l5.wait_for_channel_onchain(l2.info['id'])
    l2.bitcoin.generate_block(1)
    l2.daemon.wait_for_log(' to ONCHAIN')
    l5.daemon.wait_for_log(' to ONCHAIN')

    l2.daemon.wait_for_log(
        'Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US .* after 6 blocks'
    )
    bitcoind.generate_block(6)

    l2.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
                                   'THEIR_UNILATERAL/OUR_HTLC')

    bitcoind.generate_block(1)
    l2.daemon.wait_for_log(
        'Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US'
    )
    l5.daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')

    bitcoind.generate_block(100)
    sync_blockheight(bitcoind, [l2])

    stats = l2.rpc.listforwards()['forwards']
    assert len(stats) == 3
    plugin_stats = l2.rpc.call('listforwards_plugin')['forwards']
    assert len(plugin_stats) == 6

    # use stats to build what we expect went to plugin.
    expect = stats[0].copy()
    # First event won't have conclusion.
    del expect['resolved_time']
    expect['status'] = 'offered'
    assert plugin_stats[0] == expect
    expect = stats[0].copy()
    assert plugin_stats[1] == expect

    expect = stats[1].copy()
    del expect['resolved_time']
    expect['status'] = 'offered'
    assert plugin_stats[2] == expect
    expect = stats[1].copy()
    assert plugin_stats[3] == expect

    expect = stats[2].copy()
    del expect['failcode']
    del expect['failreason']
    expect['status'] = 'offered'
    assert plugin_stats[4] == expect
    expect = stats[2].copy()
    assert plugin_stats[5] == expect
示例#22
0
def test_penalty_inhtlc(node_factory, bitcoind, executor):
    """Test penalty transaction with an incoming HTLC"""
    # We suppress each one after first commit; HTLC gets added not fulfilled.
    # Feerates identical so we don't get gratuitous commit to update them
    l1 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'], may_fail=True, feerates=(7500, 7500, 7500))
    l2 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'])

    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
    l1.fund_channel(l2, 10**6)

    # Now, this will get stuck due to l1 commit being disabled..
    t = executor.submit(l1.pay, l2, 100000000)

    assert len(l1.getactivechannels()) == 2
    assert len(l2.getactivechannels()) == 2

    # They should both have commitments blocked now.
    l1.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
    l2.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')

    # Make sure l1 got l2's commitment to the HTLC, and sent to master.
    l1.daemon.wait_for_log('UPDATE WIRE_CHANNEL_GOT_COMMITSIG')

    # Take our snapshot.
    tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']

    # Let them continue
    l1.rpc.dev_reenable_commit(l2.info['id'])
    l2.rpc.dev_reenable_commit(l1.info['id'])

    # Should fulfill.
    l1.daemon.wait_for_log('peer_in WIRE_UPDATE_FULFILL_HTLC')
    l1.daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')

    l2.daemon.wait_for_log('peer_out WIRE_UPDATE_FULFILL_HTLC')
    l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')

    # Payment should now complete.
    t.result(timeout=10)

    # Now we really mess things up!
    bitcoind.rpc.sendrawtransaction(tx)
    bitcoind.generate_block(1)

    l2.daemon.wait_for_log(' to ONCHAIN')
    # FIXME: l1 should try to stumble along!
    wait_for(lambda: len(l2.getactivechannels()) == 0)

    # l2 should spend all of the outputs (except to-us).
    # Could happen in any order, depending on commitment tx.
    l2.daemon.wait_for_logs([
        'Propose handling THEIR_REVOKED_UNILATERAL/DELAYED_OUTPUT_TO_THEM by OUR_PENALTY_TX .* after 0 blocks',
        'sendrawtx exit 0',
        'Propose handling THEIR_REVOKED_UNILATERAL/THEIR_HTLC by OUR_PENALTY_TX .* after 0 blocks',
        'sendrawtx exit 0'
    ])

    # FIXME: test HTLC tx race!

    # 100 blocks later, all resolved.
    bitcoind.generate_block(100)

    l2.daemon.wait_for_log('onchaind complete, forgetting peer')

    outputs = l2.rpc.listfunds()['outputs']
    assert [o['status'] for o in outputs] == ['confirmed'] * 2
    # Allow some lossage for fees.
    assert sum(o['value'] for o in outputs) < 10**6
    assert sum(o['value'] for o in outputs) > 10**6 - 15000
示例#23
0
def test_invoice_routeboost_private(node_factory, bitcoind):
    """Test routeboost 'r' hint in bolt11 invoice for private channels
    """
    l1, l2, l3 = node_factory.get_nodes(3)
    node_factory.join_nodes([l1, l2],
                            fundamount=16777215,
                            announce_channels=False)

    scid = l1.get_channel_scid(l2)

    # Attach public channel to l1 so it doesn't look like a dead-end.
    l0 = node_factory.get_node()
    l0.rpc.connect(l1.info['id'], 'localhost', l1.port)
    scid_dummy, _ = l0.fundchannel(l1, 2 * (10**5))
    mine_funding_to_announce(bitcoind, [l0, l1, l2, l3])

    # Make sure channel is totally public.
    wait_for(lambda: [
        c['public'] for c in l2.rpc.listchannels(scid_dummy)['channels']
    ] == [True, True])

    # Since there's only one route, it will reluctantly hint that even
    # though it's private
    inv = l2.rpc.invoice(msatoshi=123456, label="inv0", description="?")
    assert 'warning_private_unused' not in inv
    assert 'warning_capacity' not in inv
    assert 'warning_offline' not in inv
    assert 'warning_deadends' not in inv
    assert 'warning_mpp' not in inv
    # Route array has single route with single element.
    r = only_one(only_one(l1.rpc.decodepay(inv['bolt11'])['routes']))
    assert r['pubkey'] == l1.info['id']
    assert r['short_channel_id'] == l1.rpc.listchannels(
    )['channels'][0]['short_channel_id']
    assert r['fee_base_msat'] == 1
    assert r['fee_proportional_millionths'] == 10
    assert r['cltv_expiry_delta'] == 6

    # If we explicitly say not to, it won't expose.
    inv = l2.rpc.invoice(msatoshi=123456,
                         label="inv1",
                         description="?",
                         exposeprivatechannels=False)
    assert 'warning_private_unused' in inv
    assert 'warning_capacity' not in inv
    assert 'warning_offline' not in inv
    assert 'warning_deadends' not in inv
    assert 'warning_mpp' not in inv
    assert 'routes' not in l1.rpc.decodepay(inv['bolt11'])

    # If we ask for it, we get it.
    inv = l2.rpc.invoice(msatoshi=123456,
                         label="inv1a",
                         description="?",
                         exposeprivatechannels=scid)
    assert 'warning_private_unused' not in inv
    assert 'warning_capacity' not in inv
    assert 'warning_offline' not in inv
    assert 'warning_deadends' not in inv
    assert 'warning_mpp' not in inv
    # Route array has single route with single element.
    r = only_one(only_one(l1.rpc.decodepay(inv['bolt11'])['routes']))
    assert r['pubkey'] == l1.info['id']
    assert r['short_channel_id'] == l1.rpc.listchannels(
    )['channels'][0]['short_channel_id']
    assert r['fee_base_msat'] == 1
    assert r['fee_proportional_millionths'] == 10
    assert r['cltv_expiry_delta'] == 6

    # Similarly if we ask for an array.
    inv = l2.rpc.invoice(msatoshi=123456,
                         label="inv1b",
                         description="?",
                         exposeprivatechannels=[scid])
    assert 'warning_private_unused' not in inv
    assert 'warning_capacity' not in inv
    assert 'warning_offline' not in inv
    assert 'warning_deadends' not in inv
    assert 'warning_mpp' not in inv
    # Route array has single route with single element.
    r = only_one(only_one(l1.rpc.decodepay(inv['bolt11'])['routes']))
    assert r['pubkey'] == l1.info['id']
    assert r['short_channel_id'] == l1.rpc.listchannels(
    )['channels'][0]['short_channel_id']
    assert r['fee_base_msat'] == 1
    assert r['fee_proportional_millionths'] == 10
    assert r['cltv_expiry_delta'] == 6

    # The existence of a public channel, even without capacity, will suppress
    # the exposure of private channels.
    l3.rpc.connect(l2.info['id'], 'localhost', l2.port)
    scid2, _ = l3.fundchannel(l2, (10**5))
    mine_funding_to_announce(bitcoind, [l0, l1, l2, l3])

    # Make sure channel is totally public.
    wait_for(
        lambda: [c['public'] for c in l2.rpc.listchannels(scid2)['channels']
                 ] == [True, True])

    inv = l2.rpc.invoice(msatoshi=10**7, label="inv2", description="?")
    print(inv)
    assert 'warning_deadends' in inv
    assert 'warning_private_unused' not in inv
    assert 'warning_capacity' not in inv
    assert 'warning_offline' not in inv
    assert 'warning_mpp' not in inv

    # Unless we tell it to include it.
    inv = l2.rpc.invoice(msatoshi=10**7,
                         label="inv3",
                         description="?",
                         exposeprivatechannels=True)
    assert 'warning_private_unused' not in inv
    assert 'warning_capacity' not in inv
    assert 'warning_offline' not in inv
    assert 'warning_deadends' not in inv
    assert 'warning_mpp' not in inv
    # Route array has single route with single element.
    r = only_one(only_one(l1.rpc.decodepay(inv['bolt11'])['routes']))
    assert r['pubkey'] == l1.info['id']
    assert r['short_channel_id'] == l1.rpc.listchannels(
    )['channels'][0]['short_channel_id']
    assert r['fee_base_msat'] == 1
    assert r['fee_proportional_millionths'] == 10
    assert r['cltv_expiry_delta'] == 6

    inv = l2.rpc.invoice(msatoshi=10**7,
                         label="inv4",
                         description="?",
                         exposeprivatechannels=scid)
    assert 'warning_private_unused' not in inv
    assert 'warning_capacity' not in inv
    assert 'warning_offline' not in inv
    assert 'warning_deadends' not in inv
    assert 'warning_mpp' not in inv
    # Route array has single route with single element.
    r = only_one(only_one(l1.rpc.decodepay(inv['bolt11'])['routes']))
    assert r['pubkey'] == l1.info['id']
    assert r['short_channel_id'] == scid
    assert r['fee_base_msat'] == 1
    assert r['fee_proportional_millionths'] == 10
    assert r['cltv_expiry_delta'] == 6

    # Ask it explicitly to use a channel it can't (insufficient capacity)
    inv = l2.rpc.invoice(msatoshi=(10**5) * 1000 + 1,
                         label="inv5",
                         description="?",
                         exposeprivatechannels=scid2)
    assert 'warning_private_unused' not in inv
    assert 'warning_deadends' not in inv
    assert 'warning_capacity' in inv
    assert 'warning_offline' not in inv
    assert 'warning_mpp' not in inv

    # Give it two options and it will pick one with suff capacity.
    inv = l2.rpc.invoice(msatoshi=(10**5) * 1000 + 1,
                         label="inv6",
                         description="?",
                         exposeprivatechannels=[scid2, scid])
    assert 'warning_private_unused' not in inv
    assert 'warning_capacity' not in inv
    assert 'warning_offline' not in inv
    assert 'warning_deadends' not in inv
    assert 'warning_mpp' not in inv
    # Route array has single route with single element.
    r = only_one(only_one(l1.rpc.decodepay(inv['bolt11'])['routes']))
    assert r['pubkey'] == l1.info['id']
    assert r['short_channel_id'] == scid
    assert r['fee_base_msat'] == 1
    assert r['fee_proportional_millionths'] == 10
    assert r['cltv_expiry_delta'] == 6

    # It will use an explicit exposeprivatechannels even if it thinks its a dead-end
    l0.rpc.close(l1.info['id'])
    l0.wait_for_channel_onchain(l1.info['id'])
    bitcoind.generate_block(1)
    wait_for(lambda: l2.rpc.listchannels(scid_dummy)['channels'] == [])

    inv = l2.rpc.invoice(msatoshi=123456,
                         label="inv7",
                         description="?",
                         exposeprivatechannels=scid)
    assert 'warning_private_unused' not in inv
    assert 'warning_capacity' not in inv
    assert 'warning_offline' not in inv
    assert 'warning_deadends' not in inv
    assert 'warning_mpp' not in inv
    # Route array has single route with single element.
    r = only_one(only_one(l1.rpc.decodepay(inv['bolt11'])['routes']))
    assert r['pubkey'] == l1.info['id']
    assert r['short_channel_id'] == l1.rpc.listchannels(
    )['channels'][0]['short_channel_id']
    assert r['fee_base_msat'] == 1
    assert r['fee_proportional_millionths'] == 10
    assert r['cltv_expiry_delta'] == 6
示例#24
0
def test_invoice_routeboost(node_factory, bitcoind):
    """Test routeboost 'r' hint in bolt11 invoice.
    """
    l0, l1, l2 = node_factory.line_graph(3,
                                         fundamount=2 * (10**5),
                                         wait_for_announce=True)

    # Check routeboost.
    # Make invoice and pay it
    inv = l2.rpc.invoice(msatoshi=123456, label="inv1", description="?")
    # Check routeboost.
    assert 'warning_private_unused' not in inv
    assert 'warning_capacity' not in inv
    assert 'warning_offline' not in inv
    assert 'warning_deadends' not in inv
    assert 'warning_mpp' not in inv
    # Route array has single route with single element.
    r = only_one(only_one(l1.rpc.decodepay(inv['bolt11'])['routes']))
    assert r['pubkey'] == l1.info['id']
    assert r['short_channel_id'] == l2.rpc.listpeers(
        l1.info['id'])['peers'][0]['channels'][0]['short_channel_id']
    assert r['fee_base_msat'] == 1
    assert r['fee_proportional_millionths'] == 10
    assert r['cltv_expiry_delta'] == 6

    # Pay it (and make sure it's fully resolved before we take l1 offline!)
    l1.rpc.pay(inv['bolt11'])
    wait_channel_quiescent(l1, l2)

    # Due to reserve & fees, l1 doesn't have capacity to pay this.
    inv = l2.rpc.invoice(msatoshi=2 * (10**8) - 123456,
                         label="inv2",
                         description="?")
    # Check warning
    assert 'warning_capacity' in inv
    assert 'warning_private_unused' not in inv
    assert 'warning_offline' not in inv
    assert 'warning_deadends' not in inv
    assert 'warning_mpp' not in inv

    l1.rpc.disconnect(l2.info['id'], True)
    wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])[
        'connected'])

    inv = l2.rpc.invoice(123456, label="inv3", description="?")
    # Check warning.
    assert 'warning_private_unused' not in inv
    assert 'warning_capacity' not in inv
    assert 'warning_deadends' not in inv
    assert 'warning_offline' in inv
    assert 'warning_mpp' not in inv

    # Close l0, l2 will not use l1 at all.
    l0.rpc.close(l1.info['id'])
    l0.wait_for_channel_onchain(l1.info['id'])
    bitcoind.generate_block(100)

    # l2 has to notice channel is gone.
    wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 2)
    inv = l2.rpc.invoice(123456, label="inv4", description="?")
    # Check warning.
    assert 'warning_deadends' in inv
    assert 'warning_private_unused' not in inv
    assert 'warning_capacity' not in inv
    assert 'warning_offline' not in inv
    assert 'warning_mpp' not in inv
示例#25
0
def test_txprepare(node_factory, bitcoind, chainparams):
    amount = 1000000
    l1 = node_factory.get_node(random_hsm=True)
    addr = chainparams['example_addr']

    # Add some funds to withdraw later: both bech32 and p2sh
    for i in range(5):
        bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'], amount / 10**8)
        bitcoind.rpc.sendtoaddress(
            l1.rpc.newaddr('p2sh-segwit')['p2sh-segwit'], amount / 10**8)

    bitcoind.generate_block(1)
    wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10)

    prep = l1.rpc.txprepare([{addr: Millisatoshi(amount * 3 * 1000)}])
    decode = bitcoind.rpc.decoderawtransaction(prep['unsigned_tx'])
    assert decode['txid'] == prep['txid']
    # 4 inputs, 2 outputs (3 if we have a fee output).
    assert len(decode['vin']) == 4
    assert len(decode['vout']) == 2 if not chainparams['feeoutput'] else 3

    # One output will be correct.
    outnum = [
        i for i, o in enumerate(decode['vout'])
        if o['value'] == Decimal(amount * 3) / 10**8
    ][0]

    for i, o in enumerate(decode['vout']):
        if i == outnum:
            assert o['scriptPubKey']['type'] == 'witness_v0_keyhash'
            assert o['scriptPubKey']['addresses'] == [addr]
        else:
            assert o['scriptPubKey']['type'] in ['witness_v0_keyhash', 'fee']

    # Now prepare one with no change.
    prep2 = l1.rpc.txprepare([{addr: 'all'}])
    decode = bitcoind.rpc.decoderawtransaction(prep2['unsigned_tx'])
    assert decode['txid'] == prep2['txid']
    # 6 inputs, 1 outputs.
    assert len(decode['vin']) == 6
    assert len(decode['vout']) == 1 if not chainparams['feeoutput'] else 2

    # Some fees will be paid.
    assert decode['vout'][0]['value'] < Decimal(amount * 6) / 10**8
    assert decode['vout'][0]['value'] > Decimal(
        amount * 6) / 10**8 - Decimal(0.0002)
    assert decode['vout'][0]['scriptPubKey']['type'] == 'witness_v0_keyhash'
    assert decode['vout'][0]['scriptPubKey']['addresses'] == [addr]

    # If I cancel the first one, I can get those first 4 outputs.
    discard = l1.rpc.txdiscard(prep['txid'])
    assert discard['txid'] == prep['txid']
    assert discard['unsigned_tx'] == prep['unsigned_tx']

    prep3 = l1.rpc.txprepare([{addr: 'all'}])
    decode = bitcoind.rpc.decoderawtransaction(prep3['unsigned_tx'])
    assert decode['txid'] == prep3['txid']
    # 4 inputs, 1 outputs.
    assert len(decode['vin']) == 4
    assert len(decode['vout']) == 1 if not chainparams['feeoutput'] else 2

    # Some fees will be taken
    assert decode['vout'][0]['value'] < Decimal(amount * 4) / 10**8
    assert decode['vout'][0]['value'] > Decimal(
        amount * 4) / 10**8 - Decimal(0.0002)
    assert decode['vout'][0]['scriptPubKey']['type'] == 'witness_v0_keyhash'
    assert decode['vout'][0]['scriptPubKey']['addresses'] == [addr]

    # Cannot discard twice.
    with pytest.raises(RpcError, match=r'not an unreleased txid'):
        l1.rpc.txdiscard(prep['txid'])

    # Discard everything, we should now spend all inputs.
    l1.rpc.txdiscard(prep2['txid'])
    l1.rpc.txdiscard(prep3['txid'])
    prep4 = l1.rpc.txprepare([{addr: 'all'}])
    decode = bitcoind.rpc.decoderawtransaction(prep4['unsigned_tx'])
    assert decode['txid'] == prep4['txid']
    # 10 inputs, 1 outputs.
    assert len(decode['vin']) == 10
    assert len(decode['vout']) == 1 if not chainparams['feeoutput'] else 2

    # Some fees will be taken
    assert decode['vout'][0]['value'] < Decimal(amount * 10) / 10**8
    assert decode['vout'][0]['value'] > Decimal(
        amount * 10) / 10**8 - Decimal(0.0003)
    assert decode['vout'][0]['scriptPubKey']['type'] == 'witness_v0_keyhash'
    assert decode['vout'][0]['scriptPubKey']['addresses'] == [addr]
    l1.rpc.txdiscard(prep4['txid'])

    # Try passing in a utxo set
    utxos = [
        utxo["txid"] + ":" + str(utxo["output"])
        for utxo in l1.rpc.listfunds()["outputs"]
    ][:4]

    prep5 = l1.rpc.txprepare([{
        addr: Millisatoshi(amount * 3.5 * 1000)
    }],
                             utxos=utxos)

    decode = bitcoind.rpc.decoderawtransaction(prep5['unsigned_tx'])
    assert decode['txid'] == prep5['txid']

    # Check that correct utxos are included
    assert len(decode['vin']) == 4
    vins = ["{}:{}".format(v['txid'], v['vout']) for v in decode['vin']]
    for utxo in utxos:
        assert utxo in vins

    # We should have a change output, so this is exact
    assert len(decode['vout']) == 3 if chainparams['feeoutput'] else 2
    assert decode['vout'][1]['value'] == Decimal(amount * 3.5) / 10**8
    assert decode['vout'][1]['scriptPubKey']['type'] == 'witness_v0_keyhash'
    assert decode['vout'][1]['scriptPubKey']['addresses'] == [addr]

    # Discard prep4 and get all funds again
    l1.rpc.txdiscard(prep5['txid'])
    with pytest.raises(
            RpcError,
            match=
            r'this destination wants all satoshi. The count of outputs can\'t be more than 1'
    ):
        prep5 = l1.rpc.txprepare([{
            addr: Millisatoshi(amount * 3 * 1000)
        }, {
            addr: 'all'
        }])
    prep5 = l1.rpc.txprepare([{
        addr: Millisatoshi(amount * 3 * 500 + 100000)
    }, {
        addr: Millisatoshi(amount * 3 * 500 - 100000)
    }])
    decode = bitcoind.rpc.decoderawtransaction(prep5['unsigned_tx'])
    assert decode['txid'] == prep5['txid']
    # 4 inputs, 3 outputs(include change).
    assert len(decode['vin']) == 4
    assert len(decode['vout']) == 4 if chainparams['feeoutput'] else 3

    # One output will be correct.
    for i in range(3 + chainparams['feeoutput']):
        if decode['vout'][i - 1]['value'] == Decimal('0.01500100'):
            outnum1 = i - 1
        elif decode['vout'][i - 1]['value'] == Decimal('0.01499900'):
            outnum2 = i - 1
        else:
            changenum = i - 1

    assert decode['vout'][outnum1]['scriptPubKey'][
        'type'] == 'witness_v0_keyhash'
    assert decode['vout'][outnum1]['scriptPubKey']['addresses'] == [addr]

    assert decode['vout'][outnum2]['scriptPubKey'][
        'type'] == 'witness_v0_keyhash'
    assert decode['vout'][outnum2]['scriptPubKey']['addresses'] == [addr]

    assert decode['vout'][changenum]['scriptPubKey'][
        'type'] == 'witness_v0_keyhash'
示例#26
0
def test_withdraw(node_factory, bitcoind):
    amount = 1000000
    # Don't get any funds from previous runs.
    l1 = node_factory.get_node(random_hsm=True)
    l2 = node_factory.get_node(random_hsm=True)
    addr = l1.rpc.newaddr()['bech32']

    # Add some funds to withdraw later
    for i in range(10):
        l1.bitcoin.rpc.sendtoaddress(addr, amount / 10**8 + 0.01)

    bitcoind.generate_block(1)
    wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10)

    # Reach around into the db to check that outputs were added
    assert l1.db_query(
        'SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 10

    waddr = l1.bitcoin.rpc.getnewaddress()
    # Now attempt to withdraw some (making sure we collect multiple inputs)
    with pytest.raises(RpcError):
        l1.rpc.withdraw('not an address', amount)
    with pytest.raises(RpcError):
        l1.rpc.withdraw(waddr, 'not an amount')
    with pytest.raises(RpcError):
        l1.rpc.withdraw(waddr, -amount)
    with pytest.raises(RpcError, match=r'Cannot afford transaction'):
        l1.rpc.withdraw(waddr, amount * 100)

    out = l1.rpc.withdraw(waddr, 2 * amount)

    # Make sure bitcoind received the withdrawal
    unspent = l1.bitcoin.rpc.listunspent(0)
    withdrawal = [u for u in unspent if u['txid'] == out['txid']]

    assert (withdrawal[0]['amount'] == Decimal('0.02'))

    l1.bitcoin.generate_block(1)
    sync_blockheight(l1.bitcoin, [l1])

    # Check that there are no unconfirmed outputs (change should be confirmed)
    for o in l1.rpc.listfunds()['outputs']:
        assert o['status'] == 'confirmed'

    # Now make sure two of them were marked as spent
    assert l1.db_query(
        'SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 2

    # Now send some money to l2.
    # lightningd uses P2SH-P2WPKH
    waddr = l2.rpc.newaddr('bech32')['bech32']
    l1.rpc.withdraw(waddr, 2 * amount)
    bitcoind.generate_block(1)

    # Make sure l2 received the withdrawal.
    wait_for(lambda: len(l2.rpc.listfunds()['outputs']) == 1)
    outputs = l2.db_query('SELECT value FROM outputs WHERE status=0;')
    assert only_one(outputs)['value'] == 2 * amount

    # Now make sure an additional two of them were marked as spent
    assert l1.db_query(
        'SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 4

    # Simple test for withdrawal to P2WPKH
    # Address from: https://bc-2.jp/tools/bech32demo/index.html
    waddr = 'bcrt1qw508d6qejxtdg4y5r3zarvary0c5xw7kygt080'
    with pytest.raises(RpcError):
        l1.rpc.withdraw('xx1qw508d6qejxtdg4y5r3zarvary0c5xw7kxpjzsx',
                        2 * amount)
    with pytest.raises(RpcError):
        l1.rpc.withdraw('tb1pw508d6qejxtdg4y5r3zarvary0c5xw7kdl9fad',
                        2 * amount)
    with pytest.raises(RpcError):
        l1.rpc.withdraw('tb1qw508d6qejxtdg4y5r3zarvary0c5xw7kxxxxxx',
                        2 * amount)
    l1.rpc.withdraw(waddr, 2 * amount)
    bitcoind.generate_block(1)
    # Now make sure additional two of them were marked as spent
    assert l1.db_query(
        'SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 6

    # Simple test for withdrawal to P2WSH
    # Address from: https://bc-2.jp/tools/bech32demo/index.html
    waddr = 'bcrt1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qzf4jry'
    with pytest.raises(RpcError):
        l1.rpc.withdraw(
            'xx1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7',
            2 * amount)
    with pytest.raises(RpcError):
        l1.rpc.withdraw(
            'tb1prp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qsm03tq',
            2 * amount)
    with pytest.raises(RpcError):
        l1.rpc.withdraw(
            'tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qxxxxxx',
            2 * amount)
    l1.rpc.withdraw(waddr, 2 * amount)
    bitcoind.generate_block(1)
    # Now make sure additional two of them were marked as spent
    assert l1.db_query(
        'SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 8

    # failure testing for invalid SegWit addresses, from BIP173
    # HRP character out of range
    with pytest.raises(RpcError):
        l1.rpc.withdraw(' 1nwldj5', 2 * amount)
    # overall max length exceeded
    with pytest.raises(RpcError):
        l1.rpc.withdraw(
            'an84characterslonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1569pvx',
            2 * amount)
    # No separator character
    with pytest.raises(RpcError):
        l1.rpc.withdraw('pzry9x0s0muk', 2 * amount)
    # Empty HRP
    with pytest.raises(RpcError):
        l1.rpc.withdraw('1pzry9x0s0muk', 2 * amount)
    # Invalid witness version
    with pytest.raises(RpcError):
        l1.rpc.withdraw('BC13W508D6QEJXTDG4Y5R3ZARVARY0C5XW7KN40WF2',
                        2 * amount)
    # Invalid program length for witness version 0 (per BIP141)
    with pytest.raises(RpcError):
        l1.rpc.withdraw('BC1QR508D6QEJXTDG4Y5R3ZARVARYV98GJ9P', 2 * amount)
    # Mixed case
    with pytest.raises(RpcError):
        l1.rpc.withdraw(
            'tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sL5k7',
            2 * amount)
    # Non-zero padding in 8-to-5 conversion
    with pytest.raises(RpcError):
        l1.rpc.withdraw(
            'tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3pjxtptv',
            2 * amount)

    # Should have 6 outputs available.
    assert l1.db_query(
        'SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 6

    # Test withdrawal to self.
    l1.rpc.withdraw(l1.rpc.newaddr('bech32')['bech32'], 'all', minconf=0)
    bitcoind.generate_block(1)
    assert l1.db_query(
        'SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 1

    l1.rpc.withdraw(waddr, 'all', minconf=0)
    assert l1.db_query(
        'SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 0

    # This should fail, can't even afford fee.
    with pytest.raises(RpcError, match=r'Cannot afford transaction'):
        l1.rpc.withdraw(waddr, 'all')

    # Add some funds to withdraw
    for i in range(10):
        l1.bitcoin.rpc.sendtoaddress(addr, amount / 10**8 + 0.01)

    bitcoind.generate_block(1)
    wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10)

    # Try passing in a utxo set
    utxos = [
        utxo["txid"] + ":" + str(utxo["output"])
        for utxo in l1.rpc.listfunds()["outputs"]
    ][:4]

    withdrawal = l1.rpc.withdraw(waddr, 2 * amount, utxos=utxos)
    decode = bitcoind.rpc.decoderawtransaction(withdrawal['tx'])
    assert decode['txid'] == withdrawal['txid']

    # Check that correct utxos are included
    assert len(decode['vin']) == 4
    vins = ["{}:{}".format(v['txid'], v['vout']) for v in decode['vin']]
    for utxo in utxos:
        assert utxo in vins
示例#27
0
def test_block_backfill(node_factory, bitcoind):
    """Test whether we backfill data from the blockchain correctly.

    For normal operation we will process any block after the initial start
    height, or rescan height, but for gossip we actually also need to backfill
    the blocks we skipped initially. We do so on-demand, whenever we see a
    channel_announcement referencing a blockheight we haven't processed yet,
    we fetch the entire block, extract P2WSH outputs and ask `bitcoin
    gettxout` for each of them. We then store the block header in the `blocks`
    table and the unspent outputs in the `utxoset` table.

    The test consist of two nodes opening a channel at height X, and an
    unrelated P2WSH transaction being sent at the same height (will be used to
    check for completeness of the backfill). Then a second node starts at
    height X+100 and connect to one of the nodes. It should not have the block
    in its DB before connecting. After connecting it should sync the gossip,
    triggering a backfill of block X, and all associated P2WSH outputs.

    """
    # Need to manually open the channels later since otherwise we can't have a
    # tx in the same block (`line_graph` with `fundchannel=True` generates
    # blocks).
    l1, l2 = node_factory.line_graph(2, fundchannel=False)

    # Get some funds to l1
    addr = l1.rpc.newaddr()['bech32']
    bitcoind.rpc.sendtoaddress(addr, 1)
    bitcoind.generate_block(1)
    wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 1)

    # Now send the needle we will go looking for later:
    bitcoind.rpc.sendtoaddress(
        'bcrt1qtwxd8wg5eanumk86vfeujvp48hfkgannf77evggzct048wggsrxsum2pmm',
        0.00031337)
    l1.rpc.fundchannel(l2.info['id'], 10**6, announce=True)
    wait_for(lambda: len(bitcoind.rpc.getrawmempool()) == 2)

    # Confirm and get some distance between the funding and the l3 wallet birth date
    bitcoind.generate_block(100)
    wait_for(lambda: len(l1.rpc.listnodes()['nodes']) == 2)

    # Start the tester node, and connect it to l1. l0 should sync the gossip
    # and call out to `bitcoind` to backfill the block.
    l3 = node_factory.get_node()
    heights = [r['height'] for r in l3.db_query("SELECT height FROM blocks")]
    assert (103 not in heights)

    l3.rpc.connect(l1.info['id'], 'localhost', l1.port)

    # Make sure we have backfilled the block
    wait_for(lambda: len(l3.rpc.listnodes()['nodes']) == 2)
    heights = [r['height'] for r in l3.db_query("SELECT height FROM blocks")]
    assert (103 in heights)

    # Make sure we also have the needle we added to the haystack above
    assert (31337 in [
        r['satoshis'] for r in l3.db_query("SELECT satoshis FROM utxoset")
    ])

    # Now close the channel and make sure `l3` cleans up correctly:
    txid = l1.rpc.close(l2.info['id'])['txid']
    bitcoind.generate_block(1, wait_for_mempool=txid)
    wait_for(lambda: len(l3.rpc.listchannels()['channels']) == 0)
示例#28
0
def test_gossip_timestamp_filter(node_factory, bitcoind):
    # Need full IO logging so we can see gossip (from gossipd and channeld)
    l1, l2, l3 = node_factory.line_graph(3, opts={'log-level': 'io'}, fundchannel=False)

    # Full IO logging for connectds
    subprocess.run(['kill', '-USR1', l1.subd_pid('connectd')])
    subprocess.run(['kill', '-USR1', l2.subd_pid('connectd')])

    before_anything = int(time.time() - 1.0)

    # Make a public channel.
    chan12 = l1.fund_channel(l2, 10**5)
    bitcoind.generate_block(5)

    l3.wait_for_channel_updates([chan12])
    after_12 = int(time.time())
    # Full IO logging for l1's channeld
    subprocess.run(['kill', '-USR1', l1.subd_pid('channeld')])

    # Make another one, different timestamp.
    chan23 = l2.fund_channel(l3, 10**5)
    bitcoind.generate_block(5)

    l1.wait_for_channel_updates([chan23])
    after_23 = int(time.time())

    # Make sure l1 has received all the gossip.
    wait_for(lambda: ['alias' in node for node in l1.rpc.listnodes()['nodes']] == [True, True, True])

    # l1 sets broad timestamp, will receive info about both channels again.
    l1.rpc.dev_send_timestamp_filter(id=l2.info['id'],
                                     first=0,
                                     range=0xFFFFFFFF)
    before_sendfilter = l1.daemon.logsearch_start

    # 0x0100 = channel_announcement
    # 0x0102 = channel_update
    # 0x0101 = node_announcement
    # The order of node_announcements relative to others is undefined.
    l1.daemon.wait_for_logs([r'\[IN\] 0102',
                             r'\[IN\] 0102',
                             r'\[IN\] 0100',
                             r'\[IN\] 0100',
                             r'\[IN\] 0102',
                             r'\[IN\] 0102',
                             r'\[IN\] 0101',
                             r'\[IN\] 0101',
                             r'\[IN\] 0101'])

    # Now timestamp which doesn't overlap (gives nothing).
    before_sendfilter = l1.daemon.logsearch_start
    l1.rpc.dev_send_timestamp_filter(id=l2.info['id'],
                                     first=0,
                                     range=before_anything)
    time.sleep(1)
    assert not l1.daemon.is_in_log(r'\[IN\] 0100', before_sendfilter)

    # Now choose range which will only give first update.
    l1.rpc.dev_send_timestamp_filter(id=l2.info['id'],
                                     first=before_anything,
                                     range=after_12 - before_anything + 1)
    # 0x0100 = channel_announcement
    l1.daemon.wait_for_log(r'\[IN\] 0100')
    # 0x0102 = channel_update
    # (Node announcement may have any timestamp)
    l1.daemon.wait_for_log(r'\[IN\] 0102')
    l1.daemon.wait_for_log(r'\[IN\] 0102')

    # Now choose range which will only give second update.
    l1.rpc.dev_send_timestamp_filter(id=l2.info['id'],
                                     first=after_12,
                                     range=after_23 - after_12 + 1)
    # 0x0100 = channel_announcement
    l1.daemon.wait_for_log(r'\[IN\] 0100')
    # 0x0102 = channel_update
    # (Node announcement may have any timestamp)
    l1.daemon.wait_for_log(r'\[IN\] 0102')
    l1.daemon.wait_for_log(r'\[IN\] 0102')
示例#29
0
def test_onchain_different_fees(node_factory, bitcoind, executor):
    """Onchain handling when we've had a range of fees"""
    l1, l2 = node_factory.line_graph(2, fundchannel=True, fundamount=10**7,
                                     opts={'may_reconnect': True})

    l2.rpc.dev_ignore_htlcs(id=l1.info['id'], ignore=True)
    p1 = executor.submit(l1.pay, l2, 1000000000)
    l1.daemon.wait_for_log('htlc 0: RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION')

    l1.set_feerates((16000, 7500, 3750))
    p2 = executor.submit(l1.pay, l2, 900000000)
    l1.daemon.wait_for_log('htlc 1: RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION')

    # Restart with different feerate for second HTLC.
    l1.set_feerates((5000, 5000, 3750))
    l1.restart()
    l1.daemon.wait_for_log('peer_out WIRE_UPDATE_FEE')

    p3 = executor.submit(l1.pay, l2, 800000000)
    l1.daemon.wait_for_log('htlc 2: RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION')

    # Drop to chain
    l1.rpc.dev_fail(l2.info['id'])
    l1.daemon.wait_for_log('sendrawtx exit 0')

    bitcoind.generate_block(1)
    l1.daemon.wait_for_log(' to ONCHAIN')
    l2.daemon.wait_for_log(' to ONCHAIN')

    # Both sides should have correct feerate
    assert l1.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{
        'min_possible_feerate': 5000,
        'max_possible_feerate': 16000
    }]
    assert l2.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{
        'min_possible_feerate': 5000,
        'max_possible_feerate': 16000
    }]

    bitcoind.generate_block(5)
    # Three HTLCs, and one for the to-us output.
    l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 4)

    # We use 3 blocks for "reasonable depth"
    bitcoind.generate_block(3)

    with pytest.raises(Exception):
        p1.result(10)
    with pytest.raises(Exception):
        p2.result(10)
    with pytest.raises(Exception):
        p3.result(10)

    # Two more for HTLC timeout tx to be spent.
    bitcoind.generate_block(2)
    l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 3)

    # Now, 100 blocks it should be done.
    bitcoind.generate_block(100)
    wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
    wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
示例#30
0
def test_gossip_jsonrpc(node_factory):
    l1, l2 = node_factory.line_graph(2, fundchannel=True, wait_for_announce=False)

    # Shouldn't send announce signatures until 6 deep.
    assert not l1.daemon.is_in_log('peer_out WIRE_ANNOUNCEMENT_SIGNATURES')

    # Channels should be activated locally
    wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 2)
    wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 2)

    # Make sure we can route through the channel, will raise on failure
    l1.rpc.getroute(l2.info['id'], 100, 1)

    # Outgoing should be active, but not public.
    channels1 = l1.rpc.listchannels()['channels']
    channels2 = l2.rpc.listchannels()['channels']

    assert [c['active'] for c in channels1] == [True, True]
    assert [c['active'] for c in channels2] == [True, True]
    # The incoming direction will be considered public, hence check for out
    # outgoing only
    assert len([c for c in channels1 if not c['public']]) == 2
    assert len([c for c in channels2 if not c['public']]) == 2

    # Test listchannels-by-source
    channels1 = l1.rpc.listchannels(source=l1.info['id'])['channels']
    channels2 = l2.rpc.listchannels(source=l1.info['id'])['channels']
    assert only_one(channels1)['source'] == l1.info['id']
    assert only_one(channels1)['destination'] == l2.info['id']
    assert channels1 == channels2

    l2.rpc.listchannels()['channels']

    # Now proceed to funding-depth and do a full gossip round
    l1.bitcoin.generate_block(5)
    # Could happen in either order.
    l1.daemon.wait_for_logs(['peer_out WIRE_ANNOUNCEMENT_SIGNATURES',
                             'peer_in WIRE_ANNOUNCEMENT_SIGNATURES'])

    # Just wait for the update to kick off and then check the effect
    needle = "Received node_announcement for node"
    l1.daemon.wait_for_log(needle)
    l2.daemon.wait_for_log(needle)
    # Need to increase timeout, intervals cannot be shortened with DEVELOPER=0
    wait_for(lambda: len(l1.getactivechannels()) == 2, timeout=60)
    wait_for(lambda: len(l2.getactivechannels()) == 2, timeout=60)

    nodes = l1.rpc.listnodes()['nodes']
    assert set([n['nodeid'] for n in nodes]) == set([l1.info['id'], l2.info['id']])

    # Test listnodes with an arg, while we're here.
    n1 = l1.rpc.listnodes(l1.info['id'])['nodes'][0]
    n2 = l1.rpc.listnodes(l2.info['id'])['nodes'][0]
    assert n1['nodeid'] == l1.info['id']
    assert n2['nodeid'] == l2.info['id']

    # Might not have seen other node-announce yet.
    assert n1['alias'].startswith('JUNIORBEAM')
    assert n1['color'] == '0266e4'
    if 'alias' not in n2:
        assert 'color' not in n2
        assert 'addresses' not in n2
    else:
        assert n2['alias'].startswith('SILENTARTIST')
        assert n2['color'] == '022d22'

    assert [c['active'] for c in l1.rpc.listchannels()['channels']] == [True, True]
    assert [c['public'] for c in l1.rpc.listchannels()['channels']] == [True, True]
    assert [c['active'] for c in l2.rpc.listchannels()['channels']] == [True, True]
    assert [c['public'] for c in l2.rpc.listchannels()['channels']] == [True, True]
示例#31
0
def test_permfail(node_factory, bitcoind):
    l1, l2 = node_factory.line_graph(2)

    # The funding change should be confirmed and our only output
    assert [o['status'] for o in l1.rpc.listfunds()['outputs']] == ['confirmed']
    l1.pay(l2, 200000000)

    # Make sure l2 has received sig with 0 htlcs!
    l2.daemon.wait_for_log('Received commit_sig with 1 htlc sigs')
    l2.daemon.wait_for_log('Received commit_sig with 0 htlc sigs')

    # Make sure l1 has final revocation.
    l1.daemon.wait_for_log('Sending commit_sig with 1 htlc sigs')
    l1.daemon.wait_for_log('Sending commit_sig with 0 htlc sigs')
    l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')

    # We fail l2, so l1 will reconnect to it.
    l2.rpc.dev_fail(l1.info['id'])
    l2.daemon.wait_for_log('Failing due to dev-fail command')
    l2.daemon.wait_for_log('sendrawtx exit 0')

    assert l1.bitcoin.rpc.getmempoolinfo()['size'] == 1

    # Now grab the close transaction
    closetxid = only_one(l1.bitcoin.rpc.getrawmempool(False))

    # l2 will send out tx (l1 considers it a transient error)
    bitcoind.generate_block(1)

    l1.daemon.wait_for_log('Their unilateral tx, old commit point')
    l1.daemon.wait_for_log(' to ONCHAIN')
    l2.daemon.wait_for_log(' to ONCHAIN')
    l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET (.*) after 5 blocks')

    wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] ==
             ['ONCHAIN:Tracking their unilateral close',
              'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'])

    def check_billboard():
        billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']
        return (
            len(billboard) == 2 and
            billboard[0] == 'ONCHAIN:Tracking our own unilateral close' and
            re.fullmatch('ONCHAIN:.* outputs unresolved: in 4 blocks will spend DELAYED_OUTPUT_TO_US \(.*:0\) using OUR_DELAYED_RETURN_TO_WALLET', billboard[1])
        )
    wait_for(check_billboard)

    # Now, mine 4 blocks so it sends out the spending tx.
    bitcoind.generate_block(4)

    # onchaind notes to-local payment immediately.
    assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']])

    # Restart, should still be confirmed (fails: unwinding blocks erases
    # the confirmation, and we don't re-make it).
    l1.restart()
    wait_for(lambda: (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']]))

    # It should send the to-wallet tx.
    l2.daemon.wait_for_log('Broadcasting OUR_DELAYED_RETURN_TO_WALLET')
    l2.daemon.wait_for_log('sendrawtx exit 0')

    # 100 after l1 sees tx, it should be done.
    bitcoind.generate_block(95)
    wait_for(lambda: l1.rpc.listpeers()['peers'] == [])

    wait_for(lambda: only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'] == [
        'ONCHAIN:Tracking our own unilateral close',
        'ONCHAIN:All outputs resolved: waiting 5 more blocks before forgetting channel'
    ])

    # Now, 100 blocks l2 should be done.
    bitcoind.generate_block(5)
    wait_for(lambda: l2.rpc.listpeers()['peers'] == [])

    # Only l1 has a direct output since all of l2's outputs are respent (it
    # failed). Also the output should now be listed as confirmed since we
    # generated some more blocks.
    assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']])

    addr = l1.bitcoin.rpc.getnewaddress()
    l1.rpc.withdraw(addr, "all")
示例#32
0
def test_closing_different_fees(node_factory, bitcoind, executor):
    l1 = node_factory.get_node()

    # Default feerate = 15000/7500/1000
    # It will start at the second number, accepting anything above the first.
    feerates = [[20000, 15000, 7400], [8000, 1001, 100]]
    amounts = [0, 545999, 546000]
    num_peers = len(feerates) * len(amounts)

    addr = l1.rpc.newaddr()['address']
    bitcoind.rpc.sendtoaddress(addr, 1)
    numfunds = len(l1.rpc.listfunds()['outputs'])
    bitcoind.generate_block(1)
    wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > numfunds)

    # Create them in a batch, for speed!
    peers = []
    for feerate in feerates:
        for amount in amounts:
            p = node_factory.get_node(feerates=feerate)
            p.feerate = feerate
            p.amount = amount
            l1.rpc.connect(p.info['id'], 'localhost', p.port)
            peers.append(p)

    for p in peers:
        p.channel = l1.rpc.fundchannel(p.info['id'], 10**6)['channel_id']
        # Technically, this is async to fundchannel returning.
        l1.daemon.wait_for_log('sendrawtx exit 0')

    bitcoind.generate_block(6)

    # Now wait for them all to hit normal state, do payments
    l1.daemon.wait_for_logs(['update for channel .* now ACTIVE'] * num_peers +
                            ['to CHANNELD_NORMAL'] * num_peers)
    for p in peers:
        if p.amount != 0:
            l1.pay(p, 100000000)

    # Now close all channels
    # All closes occur in parallel, and on Travis,
    # ALL those lightningd are running on a single core,
    # so increase the timeout so that this test will pass
    # when valgrind is enabled.
    # (close timeout defaults to 30 as of this writing)
    closes = [executor.submit(l1.rpc.close, p.channel, False, 90) for p in peers]

    for c in closes:
        c.result(90)

    # close does *not* wait for the sendrawtransaction, so do that!
    # Note that since they disagree on the ideal fee, they may conflict
    # (first one in will win), so we cannot look at logs, we need to
    # wait for mempool.
    wait_for(lambda: bitcoind.rpc.getmempoolinfo()['size'] == num_peers)

    bitcoind.generate_block(1)
    for p in peers:
        p.daemon.wait_for_log(' to ONCHAIN')
        wait_for(lambda: 'ONCHAIN:Tracking mutual close transaction' in only_one(p.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'])

    l1.daemon.wait_for_logs([' to ONCHAIN'] * num_peers)
示例#33
0
def test_gossip_timestamp_filter(node_factory, bitcoind):
    # Need full IO logging so we can see gossip (from gossipd and channeld)
    l1, l2, l3 = node_factory.line_graph(3,
                                         opts={'log-level': 'io'},
                                         fundchannel=False)

    # Full IO logging for connectds
    subprocess.run(['kill', '-USR1', l1.subd_pid('connectd')])
    subprocess.run(['kill', '-USR1', l2.subd_pid('connectd')])

    before_anything = int(time.time() - 1.0)

    # Make a public channel.
    chan12 = l1.fund_channel(l2, 10**5)
    bitcoind.generate_block(5)

    l3.wait_for_routes([chan12])
    after_12 = int(time.time())
    # Full IO logging for l1's channeld
    subprocess.run(['kill', '-USR1', l1.subd_pid('channeld')])

    # Make another one, different timestamp.
    chan23 = l2.fund_channel(l3, 10**5)
    bitcoind.generate_block(5)

    l1.wait_for_routes([chan23])
    after_23 = int(time.time())

    # Make sure l1 has received all the gossip.
    wait_for(lambda: ['alias' in node for node in l1.rpc.listnodes()['nodes']]
             == [True, True, True])

    # l1 sets broad timestamp, will receive info about both channels again.
    l1.rpc.dev_send_timestamp_filter(id=l2.info['id'],
                                     first=0,
                                     range=0xFFFFFFFF)
    before_sendfilter = l1.daemon.logsearch_start

    # 0x0100 = channel_announcement
    # 0x0102 = channel_update
    # 0x0101 = node_announcement
    # The order of node_announcements relative to others is undefined.
    l1.daemon.wait_for_logs([
        r'\[IN\] 0102', r'\[IN\] 0102', r'\[IN\] 0100', r'\[IN\] 0100',
        r'\[IN\] 0102', r'\[IN\] 0102', r'\[IN\] 0101', r'\[IN\] 0101',
        r'\[IN\] 0101'
    ])

    # Now timestamp which doesn't overlap (gives nothing).
    before_sendfilter = l1.daemon.logsearch_start
    l1.rpc.dev_send_timestamp_filter(id=l2.info['id'],
                                     first=0,
                                     range=before_anything)
    time.sleep(1)
    assert not l1.daemon.is_in_log(r'\[IN\] 0100', before_sendfilter)

    # Now choose range which will only give first update.
    l1.rpc.dev_send_timestamp_filter(id=l2.info['id'],
                                     first=before_anything,
                                     range=after_12 - before_anything + 1)
    # 0x0100 = channel_announcement
    l1.daemon.wait_for_log(r'\[IN\] 0100')
    # 0x0102 = channel_update
    # (Node announcement may have any timestamp)
    l1.daemon.wait_for_log(r'\[IN\] 0102')
    l1.daemon.wait_for_log(r'\[IN\] 0102')

    # Now choose range which will only give second update.
    l1.rpc.dev_send_timestamp_filter(id=l2.info['id'],
                                     first=after_12,
                                     range=after_23 - after_12 + 1)
    # 0x0100 = channel_announcement
    l1.daemon.wait_for_log(r'\[IN\] 0100')
    # 0x0102 = channel_update
    # (Node announcement may have any timestamp)
    l1.daemon.wait_for_log(r'\[IN\] 0102')
    l1.daemon.wait_for_log(r'\[IN\] 0102')
示例#34
0
def test_closing(node_factory, bitcoind):
    l1, l2 = node_factory.line_graph(2)
    chan = l1.get_channel_scid(l2)

    l1.pay(l2, 200000000)

    assert bitcoind.rpc.getmempoolinfo()['size'] == 0

    billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
    assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.']
    billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']
    assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.']

    bitcoind.rpc.generate(5)

    # Only wait for the channels to activate with DEVELOPER=1,
    # otherwise it's going to take too long because of the missing
    # --dev-broadcast-interval
    if DEVELOPER:
        wait_for(lambda: len(l1.getactivechannels()) == 2)
        wait_for(lambda: len(l2.getactivechannels()) == 2)
        billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
        # This may either be from a local_update or an announce, so just
        # check for the substring
        assert 'CHANNELD_NORMAL:Funding transaction locked.' in billboard[0]

    # This should return with an error, then close.
    with pytest.raises(RpcError, match=r'Channel close negotiation not finished'):
        l1.rpc.close(chan, False, 0)

    l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
    l2.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')

    l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
    l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')

    # And should put closing into mempool.
    l1.daemon.wait_for_log('sendrawtx exit 0')
    l2.daemon.wait_for_log('sendrawtx exit 0')

    # Both nodes should have disabled the channel in their view
    wait_for(lambda: len(l1.getactivechannels()) == 0)
    wait_for(lambda: len(l2.getactivechannels()) == 0)

    assert bitcoind.rpc.getmempoolinfo()['size'] == 1

    # Now grab the close transaction
    closetxid = only_one(bitcoind.rpc.getrawmempool(False))

    billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
    assert billboard == ['CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of 5430 satoshi']
    bitcoind.rpc.generate(1)

    l1.daemon.wait_for_log(r'Owning output .* txid %s' % closetxid)
    l2.daemon.wait_for_log(r'Owning output .* txid %s' % closetxid)

    # Make sure both nodes have grabbed their close tx funds
    assert closetxid in set([o['txid'] for o in l1.rpc.listfunds()['outputs']])
    assert closetxid in set([o['txid'] for o in l2.rpc.listfunds()['outputs']])

    wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [
        'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of 5430 satoshi',
        'ONCHAIN:Tracking mutual close transaction',
        'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'
    ])

    bitcoind.rpc.generate(9)
    wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [
        'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of 5430 satoshi',
        'ONCHAIN:Tracking mutual close transaction',
        'ONCHAIN:All outputs resolved: waiting 90 more blocks before forgetting channel'
    ])

    # Make sure both have forgotten about it
    bitcoind.rpc.generate(90)
    wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 0)
    wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 0)
示例#35
0
def test_gossip_pruning(node_factory, bitcoind):
    """ Create channel and see it being updated in time before pruning
    """
    opts = {'dev-channel-update-interval': 5}
    l1, l2, l3 = node_factory.get_nodes(3, opts)

    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
    l2.rpc.connect(l3.info['id'], 'localhost', l3.port)

    scid1 = l1.fund_channel(l2, 10**6)
    scid2 = l2.fund_channel(l3, 10**6)

    bitcoind.rpc.generate(6)

    # Channels should be activated locally
    wait_for(lambda: [c['active'] for c in l1.rpc.listchannels()['channels']]
             == [True] * 4)
    wait_for(lambda: [c['active'] for c in l2.rpc.listchannels()['channels']]
             == [True] * 4)
    wait_for(lambda: [c['active'] for c in l3.rpc.listchannels()['channels']]
             == [True] * 4)

    # All of them should send a keepalive message
    l1.daemon.wait_for_logs([
        'Sending keepalive channel_update for {}'.format(scid1),
    ])
    l2.daemon.wait_for_logs([
        'Sending keepalive channel_update for {}'.format(scid1),
        'Sending keepalive channel_update for {}'.format(scid2),
    ])
    l3.daemon.wait_for_logs([
        'Sending keepalive channel_update for {}'.format(scid2),
    ])

    # Now kill l3, so that l2 and l1 can prune it from their view after 10 seconds

    # FIXME: This sleep() masks a real bug: that channeld sends a
    # channel_update message (to disable the channel) with same
    # timestamp as the last keepalive, and thus is ignored.  The minimal
    # fix is to backdate the keepalives 1 second, but maybe we should
    # simply have gossipd generate all updates?
    time.sleep(1)
    l3.stop()

    l1.daemon.wait_for_log(
        "Pruning channel {} from network view".format(scid2))
    l2.daemon.wait_for_log(
        "Pruning channel {} from network view".format(scid2))

    assert scid2 not in [
        c['short_channel_id'] for c in l1.rpc.listchannels()['channels']
    ]
    assert scid2 not in [
        c['short_channel_id'] for c in l2.rpc.listchannels()['channels']
    ]
    assert l3.info['id'] not in [
        n['nodeid'] for n in l1.rpc.listnodes()['nodes']
    ]
    assert l3.info['id'] not in [
        n['nodeid'] for n in l2.rpc.listnodes()['nodes']
    ]
示例#36
0
def test_gossip_jsonrpc(node_factory):
    l1, l2 = node_factory.line_graph(2, fundchannel=True, announce=False)

    # Shouldn't send announce signatures until 6 deep.
    assert not l1.daemon.is_in_log('peer_out WIRE_ANNOUNCEMENT_SIGNATURES')

    # Channels should be activated locally
    wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 2)
    wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 2)

    # Make sure we can route through the channel, will raise on failure
    l1.rpc.getroute(l2.info['id'], 100, 1)

    # Outgoing should be active, but not public.
    channels1 = l1.rpc.listchannels()['channels']
    channels2 = l2.rpc.listchannels()['channels']

    assert [c['active'] for c in channels1] == [True, True]
    assert [c['active'] for c in channels2] == [True, True]
    # The incoming direction will be considered public, hence check for out
    # outgoing only
    assert len([c for c in channels1 if not c['public']]) == 2
    assert len([c for c in channels2 if not c['public']]) == 2

    # Now proceed to funding-depth and do a full gossip round
    l1.bitcoin.generate_block(5)
    # Could happen in either order.
    l1.daemon.wait_for_logs([
        'peer_out WIRE_ANNOUNCEMENT_SIGNATURES',
        'peer_in WIRE_ANNOUNCEMENT_SIGNATURES'
    ])

    # Just wait for the update to kick off and then check the effect
    needle = "Received node_announcement for node"
    l1.daemon.wait_for_log(needle)
    l2.daemon.wait_for_log(needle)
    # Need to increase timeout, intervals cannot be shortened with DEVELOPER=0
    wait_for(lambda: len(l1.getactivechannels()) == 2, timeout=60)
    wait_for(lambda: len(l2.getactivechannels()) == 2, timeout=60)

    nodes = l1.rpc.listnodes()['nodes']
    assert set([n['nodeid']
                for n in nodes]) == set([l1.info['id'], l2.info['id']])

    # Test listnodes with an arg, while we're here.
    n1 = l1.rpc.listnodes(l1.info['id'])['nodes'][0]
    n2 = l1.rpc.listnodes(l2.info['id'])['nodes'][0]
    assert n1['nodeid'] == l1.info['id']
    assert n2['nodeid'] == l2.info['id']

    # Might not have seen other node-announce yet.
    assert n1['alias'].startswith('JUNIORBEAM')
    assert n1['color'] == '0266e4'
    if 'alias' not in n2:
        assert 'color' not in n2
        assert 'addresses' not in n2
    else:
        assert n2['alias'].startswith('SILENTARTIST')
        assert n2['color'] == '022d22'

    assert [c['active']
            for c in l1.rpc.listchannels()['channels']] == [True, True]
    assert [c['public']
            for c in l1.rpc.listchannels()['channels']] == [True, True]
    assert [c['active']
            for c in l2.rpc.listchannels()['channels']] == [True, True]
    assert [c['public']
            for c in l2.rpc.listchannels()['channels']] == [True, True]
示例#37
0
def test_gossip_persistence(node_factory, bitcoind):
    """Gossip for a while, restart and it should remember.

    Also tests for funding outpoint spends, and they should be persisted
    too.
    """
    opts = {'dev-no-reconnect': None, 'may_reconnect': True}
    l1, l2, l3, l4 = node_factory.get_nodes(4, opts=opts)

    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
    l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
    l3.rpc.connect(l4.info['id'], 'localhost', l4.port)

    l1.fund_channel(l2, 10**6)
    l2.fund_channel(l3, 10**6)

    # Make channels public, except for l3 -> l4, which is kept local-only for now
    bitcoind.rpc.generate(5)
    l3.fund_channel(l4, 10**6)
    l1.bitcoin.rpc.generate(1)

    def count_active(node):
        chans = node.rpc.listchannels()['channels']
        active = [c for c in chans if c['active']]
        return len(active)

    # Channels should be activated
    wait_for(lambda: count_active(l1) == 4)
    wait_for(lambda: count_active(l2) == 4)
    wait_for(lambda: count_active(l3) == 6)  # 4 public + 2 local

    # l1 restarts and doesn't connect, but loads from persisted store, all
    # local channels should be disabled, leaving only the two l2 <-> l3
    # directions
    l1.restart()
    wait_for(lambda: count_active(l1) == 2)

    # Now reconnect, they should re-enable the two l1 <-> l2 directions
    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
    wait_for(lambda: count_active(l1) == 4)

    # Now spend the funding tx, generate a block and see others deleting the
    # channel from their network view
    l1.rpc.dev_fail(l2.info['id'])
    time.sleep(1)
    l1.bitcoin.rpc.generate(1)

    wait_for(lambda: count_active(l1) == 2)
    wait_for(lambda: count_active(l2) == 2)
    wait_for(lambda: count_active(l3) == 4)  # 2 public + 2 local

    # We should have one local-only channel
    def count_non_public(node):
        chans = node.rpc.listchannels()['channels']
        nonpublic = [c for c in chans if not c['public']]
        return len(nonpublic)

    # The channel l3 -> l4 should be known only to them
    assert count_non_public(l1) == 0
    assert count_non_public(l2) == 0
    wait_for(lambda: count_non_public(l3) == 2)
    wait_for(lambda: count_non_public(l4) == 2)

    # Finally, it should also remember the deletion after a restart
    l3.restart()
    l4.restart()
    l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
    l3.rpc.connect(l4.info['id'], 'localhost', l4.port)
    wait_for(lambda: count_active(l3) == 4)  # 2 public + 2 local

    # Both l3 and l4 should remember their local-only channel
    wait_for(lambda: count_non_public(l3) == 2)
    wait_for(lambda: count_non_public(l4) == 2)
示例#38
0
def test_gossip_persistence(node_factory, bitcoind):
    """Gossip for a while, restart and it should remember.

    Also tests for funding outpoint spends, and they should be persisted
    too.
    """
    opts = {'dev-no-reconnect': None, 'may_reconnect': True}
    l1, l2, l3, l4 = node_factory.get_nodes(4, opts=opts)

    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
    l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
    l3.rpc.connect(l4.info['id'], 'localhost', l4.port)

    scid12 = l1.fund_channel(l2, 10**6)
    scid23 = l2.fund_channel(l3, 10**6)

    # Make channels public, except for l3 -> l4, which is kept local-only for now
    bitcoind.generate_block(5)
    scid34 = l3.fund_channel(l4, 10**6)
    bitcoind.generate_block(1)

    def active(node):
        chans = node.rpc.listchannels()['channels']
        return sorted([c['short_channel_id'] for c in chans if c['active']])

    def non_public(node):
        chans = node.rpc.listchannels()['channels']
        return sorted(
            [c['short_channel_id'] for c in chans if not c['public']])

    # Channels should be activated
    wait_for(lambda: active(l1) == [scid12, scid12, scid23, scid23])
    wait_for(lambda: active(l2) == [scid12, scid12, scid23, scid23])
    # This one sees its private channel
    wait_for(
        lambda: active(l3) == [scid12, scid12, scid23, scid23, scid34, scid34])

    # l1 restarts and doesn't connect, but loads from persisted store, all
    # local channels should be disabled, leaving only the two l2 <-> l3
    # directions
    l1.restart()
    wait_for(lambda: active(l1) == [scid23, scid23])

    # Now reconnect, they should re-enable the two l1 <-> l2 directions
    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
    wait_for(lambda: active(l1) == [scid12, scid12, scid23, scid23])

    # Now spend the funding tx, generate a block and see others deleting the
    # channel from their network view
    l1.rpc.dev_fail(l2.info['id'])
    time.sleep(1)
    bitcoind.generate_block(1)

    wait_for(lambda: active(l1) == [scid23, scid23])
    wait_for(lambda: active(l2) == [scid23, scid23])
    wait_for(lambda: active(l3) == [scid23, scid23, scid34, scid34])

    # The channel l3 -> l4 should be known only to them
    assert non_public(l1) == []
    assert non_public(l2) == []
    wait_for(lambda: non_public(l3) == [scid34, scid34])
    wait_for(lambda: non_public(l4) == [scid34, scid34])

    # Finally, it should also remember the deletion after a restart
    l3.restart()
    l4.restart()
    l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
    l3.rpc.connect(l4.info['id'], 'localhost', l4.port)
    wait_for(lambda: active(l3) == [scid23, scid23, scid34, scid34])

    # Both l3 and l4 should remember their local-only channel
    wait_for(lambda: non_public(l3) == [scid34, scid34])
    wait_for(lambda: non_public(l4) == [scid34, scid34])
示例#39
0
def test_reserveinputs(node_factory, bitcoind, chainparams):
    """
    Reserve inputs is basically the same as txprepare, with the
    slight exception that 'reserveinputs' doesn't keep the
    unsent transaction around
    """
    amount = 1000000
    total_outs = 12
    l1 = node_factory.get_node(feerates=(7500, 7500, 7500, 7500))
    addr = chainparams['example_addr']

    # Add a medley of funds to withdraw later, bech32 + p2sh-p2wpkh
    for i in range(total_outs // 2):
        bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'], amount / 10**8)
        bitcoind.rpc.sendtoaddress(
            l1.rpc.newaddr('p2sh-segwit')['p2sh-segwit'], amount / 10**8)

    bitcoind.generate_block(1)
    wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == total_outs)

    utxo_count = 8
    sent = Decimal('0.01') * (utxo_count - 1)
    reserved = l1.rpc.reserveinputs(
        outputs=[{
            addr: Millisatoshi(amount * (utxo_count - 1) * 1000)
        }])
    assert reserved['feerate_per_kw'] == 7500
    psbt = bitcoind.rpc.decodepsbt(reserved['psbt'])
    out_found = False

    assert len(psbt['inputs']) == utxo_count
    outputs = l1.rpc.listfunds()['outputs']
    assert len([x for x in outputs
                if not x['reserved']]) == total_outs - utxo_count
    assert len([x for x in outputs if x['reserved']]) == utxo_count
    total_outs -= utxo_count
    saved_input = psbt['tx']['vin'][0]

    # We should have two outputs
    for vout in psbt['tx']['vout']:
        if vout['scriptPubKey']['addresses'][0] == addr:
            assert vout['value'] == sent
            out_found = True
    assert out_found

    # Do it again, but for too many inputs
    utxo_count = 12 - utxo_count + 1
    sent = Decimal('0.01') * (utxo_count - 1)
    with pytest.raises(RpcError, match=r"Cannot afford transaction"):
        reserved = l1.rpc.reserveinputs(
            outputs=[{
                addr: Millisatoshi(amount * (utxo_count - 1) * 1000)
            }])

    utxo_count -= 1
    sent = Decimal('0.01') * (utxo_count - 1)
    reserved = l1.rpc.reserveinputs(outputs=[{
        addr:
        Millisatoshi(amount * (utxo_count - 1) * 1000)
    }],
                                    feerate='10000perkw')

    assert reserved['feerate_per_kw'] == 10000
    psbt = bitcoind.rpc.decodepsbt(reserved['psbt'])

    assert len(psbt['inputs']) == utxo_count
    outputs = l1.rpc.listfunds()['outputs']
    assert len([x for x in outputs if not x['reserved']
                ]) == total_outs - utxo_count == 0
    assert len([x for x in outputs if x['reserved']]) == 12

    # No more available
    with pytest.raises(RpcError, match=r"Cannot afford transaction"):
        reserved = l1.rpc.reserveinputs(outputs=[{
            addr: Millisatoshi(amount * 1)
        }],
                                        feerate='253perkw')

    # Unreserve three, from different psbts
    unreserve_utxos = [{
        'txid': saved_input['txid'],
        'vout': saved_input['vout'],
        'sequence': saved_input['sequence']
    }, {
        'txid': psbt['tx']['vin'][0]['txid'],
        'vout': psbt['tx']['vin'][0]['vout'],
        'sequence': psbt['tx']['vin'][0]['sequence']
    }, {
        'txid': psbt['tx']['vin'][1]['txid'],
        'vout': psbt['tx']['vin'][1]['vout'],
        'sequence': psbt['tx']['vin'][1]['sequence']
    }]
    unreserve_psbt = bitcoind.rpc.createpsbt(unreserve_utxos, [])

    unreserved = l1.rpc.unreserveinputs(unreserve_psbt)
    assert all([x['unreserved'] for x in unreserved['outputs']])
    outputs = l1.rpc.listfunds()['outputs']
    assert len([x for x in outputs
                if not x['reserved']]) == len(unreserved['outputs'])
    for i in range(len(unreserved['outputs'])):
        un = unreserved['outputs'][i]
        u_utxo = unreserve_utxos[i]
        assert un['txid'] == u_utxo['txid'] and un['vout'] == u_utxo[
            'vout'] and un['unreserved']

    # Try unreserving the same utxos again, plus one that's not included
    # We expect this to be a no-op.
    unreserve_utxos.append({'txid': 'b' * 64, 'vout': 0, 'sequence': 0})
    unreserve_psbt = bitcoind.rpc.createpsbt(unreserve_utxos, [])
    unreserved = l1.rpc.unreserveinputs(unreserve_psbt)
    assert not any([x['unreserved'] for x in unreserved['outputs']])
    for un in unreserved['outputs']:
        assert not un['unreserved']
    assert len([x for x in l1.rpc.listfunds()['outputs']
                if not x['reserved']]) == 3

    # passing in an empty string should fail
    with pytest.raises(RpcError, match=r"should be a PSBT, not "):
        l1.rpc.unreserveinputs('')

    # reserve one of the utxos that we just unreserved
    utxos = []
    utxos.append(saved_input['txid'] + ":" + str(saved_input['vout']))
    reserved = l1.rpc.reserveinputs([{
        addr: Millisatoshi(amount * 0.5 * 1000)
    }],
                                    feerate='253perkw',
                                    utxos=utxos)
    assert len([x for x in l1.rpc.listfunds()['outputs']
                if not x['reserved']]) == 2
    psbt = bitcoind.rpc.decodepsbt(reserved['psbt'])
    assert len(psbt['inputs']) == 1
    vin = psbt['tx']['vin'][0]
    assert vin['txid'] == saved_input['txid'] and vin['vout'] == saved_input[
        'vout']

    # reserve them all!
    reserved = l1.rpc.reserveinputs([{addr: 'all'}])
    outputs = l1.rpc.listfunds()['outputs']
    assert len([x for x in outputs if not x['reserved']]) == 0
    assert len([x for x in outputs if x['reserved']]) == 12

    # FIXME: restart the node, nothing will remain reserved
    l1.restart()
    assert len(l1.rpc.listfunds()['outputs']) == 12
示例#40
0
def test_gossip_query_channel_range(node_factory, bitcoind):
    l1, l2, l3, l4 = node_factory.line_graph(4,
                                             opts={'log-level': 'io'},
                                             fundchannel=False)

    # Make public channels on consecutive blocks
    l1.fundwallet(10**6)
    l2.fundwallet(10**6)

    num_tx = len(bitcoind.rpc.getrawmempool())
    l1.rpc.fundchannel(l2.info['id'], 10**5)['tx']
    wait_for(lambda: len(bitcoind.rpc.getrawmempool()) == num_tx + 1)
    bitcoind.generate_block(1)

    num_tx = len(bitcoind.rpc.getrawmempool())
    l2.rpc.fundchannel(l3.info['id'], 10**5)['tx']
    wait_for(lambda: len(bitcoind.rpc.getrawmempool()) == num_tx + 1)
    bitcoind.generate_block(1)

    # Get them both to gossip depth.
    bitcoind.generate_block(5)

    # Make sure l2 has received all the gossip.
    l2.daemon.wait_for_logs([
        'Received node_announcement for node ' + l1.info['id'],
        'Received node_announcement for node ' + l3.info['id']
    ])

    scid12 = only_one(l1.rpc.listpeers(
        l2.info['id'])['peers'])['channels'][0]['short_channel_id']
    scid23 = only_one(l3.rpc.listpeers(
        l2.info['id'])['peers'])['channels'][0]['short_channel_id']
    block12 = int(scid12.split('x')[0])
    block23 = int(scid23.split('x')[0])

    assert block23 == block12 + 1

    # l1 asks for all channels, gets both.
    ret = l1.rpc.dev_query_channel_range(id=l2.info['id'],
                                         first=0,
                                         num=1000000)

    assert ret['final_first_block'] == 0
    assert ret['final_num_blocks'] == 1000000
    assert ret['final_complete']
    assert len(ret['short_channel_ids']) == 2
    assert ret['short_channel_ids'][0] == scid12
    assert ret['short_channel_ids'][1] == scid23

    # Does not include scid12
    ret = l1.rpc.dev_query_channel_range(id=l2.info['id'],
                                         first=0,
                                         num=block12)
    assert ret['final_first_block'] == 0
    assert ret['final_num_blocks'] == block12
    assert ret['final_complete']
    assert len(ret['short_channel_ids']) == 0

    # Does include scid12
    ret = l1.rpc.dev_query_channel_range(id=l2.info['id'],
                                         first=0,
                                         num=block12 + 1)
    assert ret['final_first_block'] == 0
    assert ret['final_num_blocks'] == block12 + 1
    assert ret['final_complete']
    assert len(ret['short_channel_ids']) == 1
    assert ret['short_channel_ids'][0] == scid12

    # Doesn't include scid23
    ret = l1.rpc.dev_query_channel_range(id=l2.info['id'],
                                         first=0,
                                         num=block23)
    assert ret['final_first_block'] == 0
    assert ret['final_num_blocks'] == block23
    assert ret['final_complete']
    assert len(ret['short_channel_ids']) == 1
    assert ret['short_channel_ids'][0] == scid12

    # Does include scid23
    ret = l1.rpc.dev_query_channel_range(id=l2.info['id'],
                                         first=block12,
                                         num=block23 - block12 + 1)
    assert ret['final_first_block'] == block12
    assert ret['final_num_blocks'] == block23 - block12 + 1
    assert ret['final_complete']
    assert len(ret['short_channel_ids']) == 2
    assert ret['short_channel_ids'][0] == scid12
    assert ret['short_channel_ids'][1] == scid23

    # Only includes scid23
    ret = l1.rpc.dev_query_channel_range(id=l2.info['id'],
                                         first=block23,
                                         num=1)
    assert ret['final_first_block'] == block23
    assert ret['final_num_blocks'] == 1
    assert ret['final_complete']
    assert len(ret['short_channel_ids']) == 1
    assert ret['short_channel_ids'][0] == scid23

    # Past both
    ret = l1.rpc.dev_query_channel_range(id=l2.info['id'],
                                         first=block23 + 1,
                                         num=1000000)
    assert ret['final_first_block'] == block23 + 1
    assert ret['final_num_blocks'] == 1000000
    assert ret['final_complete']
    assert len(ret['short_channel_ids']) == 0

    # Turn on IO logging in l1 channeld.
    subprocess.run(['kill', '-USR1', l1.subd_pid('channeld')])

    # Make l2 split reply into two (technically async)
    l2.rpc.dev_set_max_scids_encode_size(max=9)
    l2.daemon.wait_for_log('Set max_scids_encode_bytes to 9')
    ret = l1.rpc.dev_query_channel_range(id=l2.info['id'],
                                         first=0,
                                         num=1000000)

    # Turns out it sends: 0+53, 53+26, 79+13, 92+7, 99+3, 102+2, 104+1, 105+999895
    l1.daemon.wait_for_logs([r'\[IN\] 0108'] * 8)

    # It should definitely have split
    assert ret['final_first_block'] != 0 or ret['final_num_blocks'] != 1000000
    assert ret['final_complete']
    assert len(ret['short_channel_ids']) == 2
    assert ret['short_channel_ids'][0] == scid12
    assert ret['short_channel_ids'][1] == scid23
    l2.daemon.wait_for_log('queue_channel_ranges full: splitting')

    # Test overflow case doesn't split forever; should still only get 8 for this
    ret = l1.rpc.dev_query_channel_range(id=l2.info['id'],
                                         first=1,
                                         num=429496000)
    l1.daemon.wait_for_logs([r'\[IN\] 0108'] * 8)

    # And no more!
    time.sleep(1)
    assert not l1.daemon.is_in_log(r'\[IN\] 0108',
                                   start=l1.daemon.logsearch_start)

    # This should actually be large enough for zlib to kick in!
    l3.fund_channel(l4, 10**5)
    bitcoind.generate_block(5)
    l2.daemon.wait_for_log('Received node_announcement for node ' +
                           l4.info['id'])

    # Restore infinite encode size.
    l2.rpc.dev_set_max_scids_encode_size(max=(2**32 - 1))
    l2.daemon.wait_for_log('Set max_scids_encode_bytes to {}'.format(2**32 -
                                                                     1))

    ret = l1.rpc.dev_query_channel_range(id=l2.info['id'], first=0, num=65535)
    l1.daemon.wait_for_log(
        # WIRE_REPLY_CHANNEL_RANGE
        r'\[IN\] 0108'
        # chain_hash
        + '................................................................'
        # first_blocknum
        + '00000000'
        # number_of_blocks
        + '0000ffff'
        # complete
        + '01'
        # length
        + '....'
        # encoding
        + '01')
示例#41
0
#        r = gevent.wait([ch2])
#        m = r[0].read()
#        r[0].reset()
#        print('bar', m)
#        pump.write('')
#
#def run():
#    gevent.spawn(foo)
#    gevent.spawn(bar)
#
#gevent.spawn(run)
#
#ch1.write('hello')
#gevent.wait([pump])
#pump.reset()
#ch2.write('world')
#gevent.wait([pump])
#pump.reset()

ch1 = GenChannel()

ch12 = wrapwrite(ch1, 'A2P')
ch13 = wrapwrite(ch1, 'A2F')

ch12.write(('commit', 2, 3))
m = wait_for(ch1)
print('a2p', m)
ch13.write(('send', 'asa'))
m = wait_for(ch1)
print('a2f', m)