def test_node_reannounce(node_factory, bitcoind): "Test that we reannounce a node when parameters change" l1, l2 = node_factory.line_graph(2, opts={'may_reconnect': True, 'log_all_io': True}) bitcoind.generate_block(5) # Wait for node_announcement for l1. l2.daemon.wait_for_log(r'\[IN\] 0101.*{}'.format(l1.info['id'])) # Wait for it to process it. wait_for(lambda: l2.rpc.listnodes(l1.info['id'])['nodes'] != []) wait_for(lambda: 'alias' in only_one(l2.rpc.listnodes(l1.info['id'])['nodes'])) assert only_one(l2.rpc.listnodes(l1.info['id'])['nodes'])['alias'].startswith('JUNIORBEAM') l1.stop() l1.daemon.opts['alias'] = 'SENIORBEAM' l1.start() # Wait for l1 to send us its own node_announcement. nannouncement = l2.daemon.wait_for_log(r'{}.*\[IN\] 0101.*{}'.format(l1.info['id'], l1.info['id'])).split('[IN] ')[1] wait_for(lambda: only_one(l2.rpc.listnodes(l1.info['id'])['nodes'])['alias'] == 'SENIORBEAM') # Restart should re-xmit exact same update on reconnect. l1.restart() # l1 should retransmit it exactly the same (no timestamp change!) l2.daemon.wait_for_log(r'{}.*\[IN\] {}'.format(l1.info['id'], nannouncement))
def test_invoice_routeboost_private(node_factory, bitcoind): """Test routeboost 'r' hint in bolt11 invoice for private channels """ l1, l2 = node_factory.line_graph(2, fundamount=10**6, announce_channels=False) # Attach public channel to l1 so it doesn't look like a dead-end. l0 = node_factory.get_node() l0.rpc.connect(l1.info['id'], 'localhost', l1.port) scid = l0.fund_channel(l1, 2 * (10**4)) bitcoind.generate_block(5) # Make sure channel is totally public. wait_for(lambda: [c['public'] for c in l2.rpc.listchannels(scid)['channels']] == [True, True]) # Since there's only one route, it will reluctantly hint that even # though it's private inv = l2.rpc.invoice(msatoshi=123456, label="inv0", description="?") assert 'warning_capacity' not in inv assert 'warning_offline' not in inv # Route array has single route with single element. r = only_one(only_one(l1.rpc.decodepay(inv['bolt11'])['routes'])) assert r['pubkey'] == l1.info['id'] assert r['short_channel_id'] == l1.rpc.listchannels()['channels'][0]['short_channel_id'] assert r['fee_base_msat'] == 1 assert r['fee_proportional_millionths'] == 10 assert r['cltv_expiry_delta'] == 6 # If we explicitly say not to, it won't expose. inv = l2.rpc.invoice(msatoshi=123456, label="inv1", description="?", exposeprivatechannels=False) assert 'warning_capacity' in inv assert 'routes' not in l1.rpc.decodepay(inv['bolt11']) # The existence of a public channel, even without capacity, will suppress # the exposure of private channels. l3 = node_factory.get_node() l3.rpc.connect(l2.info['id'], 'localhost', l2.port) scid = l3.fund_channel(l2, (10**4)) bitcoind.generate_block(5) # Make sure channel is totally public. wait_for(lambda: [c['public'] for c in l3.rpc.listchannels(scid)['channels']] == [True, True]) inv = l2.rpc.invoice(msatoshi=10**7, label="inv2", description="?") assert 'warning_capacity' in inv # Unless we tell it to include it. inv = l2.rpc.invoice(msatoshi=10**7, label="inv3", description="?", exposeprivatechannels=True) assert 'warning_capacity' not in inv assert 'warning_offline' not in inv # Route array has single route with single element. r = only_one(only_one(l1.rpc.decodepay(inv['bolt11'])['routes'])) assert r['pubkey'] == l1.info['id'] assert r['short_channel_id'] == l1.rpc.listchannels()['channels'][0]['short_channel_id'] assert r['fee_base_msat'] == 1 assert r['fee_proportional_millionths'] == 10 assert r['cltv_expiry_delta'] == 6
def test_invoice_routeboost(node_factory, bitcoind): """Test routeboost 'r' hint in bolt11 invoice. """ l0, l1, l2 = node_factory.line_graph(3, fundamount=2 * (10**4), wait_for_announce=True) # Check routeboost. # Make invoice and pay it inv = l2.rpc.invoice(msatoshi=123456, label="inv1", description="?") # Check routeboost. assert 'warning_capacity' not in inv assert 'warning_offline' not in inv # Route array has single route with single element. r = only_one(only_one(l1.rpc.decodepay(inv['bolt11'])['routes'])) assert r['pubkey'] == l1.info['id'] assert r['short_channel_id'] == l2.rpc.listpeers( l1.info['id'])['peers'][0]['channels'][0]['short_channel_id'] assert r['fee_base_msat'] == 1 assert r['fee_proportional_millionths'] == 10 assert r['cltv_expiry_delta'] == 6 # Pay it (and make sure it's fully resolved before we take l1 offline!) l1.rpc.pay(inv['bolt11']) wait_channel_quiescent(l1, l2) # Due to reserve & fees, l1 doesn't have capacity to pay this. inv = l2.rpc.invoice(msatoshi=2 * (10**7) - 123456, label="inv2", description="?") # Check warning assert 'warning_capacity' in inv assert 'warning_offline' not in inv l1.rpc.disconnect(l2.info['id'], True) wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])[ 'connected']) inv = l2.rpc.invoice(123456, label="inv3", description="?") # Check warning. assert 'warning_capacity' not in inv assert 'warning_offline' in inv # Close l0, l2 will not use l1 at all. l0.rpc.close(l1.info['id']) l0.wait_for_channel_onchain(l1.info['id']) bitcoind.generate_block(100) # l2 has to notice channel is gone. wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 2) inv = l2.rpc.invoice(123456, label="inv4", description="?") # Check warning. assert 'warning_capacity' in inv assert 'warning_offline' not in inv
def test_funding_all_too_much(node_factory): """Add more than max possible funds, fund a channel using all funds we can. """ l1, l2 = node_factory.line_graph(2, fundchannel=False) l1.fundwallet(2**24 + 10000) l1.rpc.fundchannel(l2.info['id'], "all") assert only_one(l1.rpc.listfunds()['outputs'])['status'] == 'unconfirmed' assert only_one( l1.rpc.listfunds()['channels'])['channel_total_sat'] == 2**24 - 1
def test_htlc_send_timeout(node_factory, bitcoind): """Test that we don't commit an HTLC to an unreachable node.""" l1 = node_factory.get_node(options={'log-level': 'io'}) # Blackhole it after it sends HTLC_ADD to l3. l2 = node_factory.get_node(disconnect=['0WIRE_UPDATE_ADD_HTLC'], options={'log-level': 'io'}) l3 = node_factory.get_node() l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l2.rpc.connect(l3.info['id'], 'localhost', l3.port) l1.fund_channel(l2, 10**6) chanid2 = l2.fund_channel(l3, 10**6) subprocess.run(['kill', '-USR1', l1.subd_pid('channeld')]) subprocess.run(['kill', '-USR1', l2.subd_pid('channeld')]) # Make sure channels get announced. bitcoind.generate_block(5) # Make sure we have 30 seconds without any incoming traffic from l3 to l2 # so it tries to ping before sending WIRE_COMMITMENT_SIGNED. timedout = False while not timedout: try: l2.daemon.wait_for_log( 'channeld-{} chan #[0-9]*:\[IN\] 0101'.format(l3.info['id']), timeout=30) except TimeoutError: timedout = True inv = l3.rpc.invoice(123000, 'test_htlc_send_timeout', 'description') with pytest.raises(RpcError) as excinfo: l1.rpc.pay(inv['bolt11']) err = excinfo.value # Complaints it couldn't find route. assert err.error['code'] == 205 # Temporary channel failure assert only_one(err.error['data']['failures'])['failcode'] == 0x1007 assert only_one( err.error['data']['failures'])['erring_node'] == l2.info['id'] assert only_one(err.error['data']['failures'])['erring_channel'] == chanid2 # L2 should send ping, but never receive pong so never send commitment. l2.daemon.wait_for_log('channeld.*:\[OUT\] 0012') assert not l2.daemon.is_in_log('channeld.*:\[IN\] 0013') assert not l2.daemon.is_in_log('channeld.*:\[OUT\] 0084') # L2 killed the channel with l3 because it was too slow. l2.daemon.wait_for_log( 'channeld-{}.*Adding HTLC too slow: killing channel'.format( l3.info['id']))
def test_pay_plugin(node_factory): l1, l2 = node_factory.line_graph(2) inv = l2.rpc.invoice(123000, 'label', 'description', 3700) res = l1.rpc.pay(bolt11=inv['bolt11']) assert res['status'] == 'complete' with pytest.raises(RpcError, match=r'missing required parameter'): l1.rpc.call('pay') # Make sure usage messages are present. assert only_one(l1.rpc.help('pay')['help'])['command'] == 'pay bolt11 [msatoshi] [label] [riskfactor] [maxfeepercent] [retry_for] [maxdelay] [exemptfee]' assert only_one(l1.rpc.help('paystatus')['help'])['command'] == 'paystatus [bolt11]'
def test_invoice_deschash(node_factory, chainparams): l1, l2 = node_factory.line_graph(2) # BOLT #11: # * `h`: tagged field: hash of description # * `p5`: `data_length` (`p` = 1, `5` = 20; 1 * 32 + 20 == 52) # * `8yjmdan79s6qqdhdzgynm4zwqd5d7xmw5fk98klysy043l2ahrqs`: SHA256 of 'One piece of chocolate cake, one icecream cone, one pickle, one slice of swiss cheese, one slice of salami, one lollypop, one piece of cherry pie, one sausage, one cupcake, and one slice of watermelon' inv = l2.rpc.invoice( 42, 'label', 'One piece of chocolate cake, one icecream cone, one pickle, one slice of swiss cheese, one slice of salami, one lollypop, one piece of cherry pie, one sausage, one cupcake, and one slice of watermelon', deschashonly=True) assert '8yjmdan79s6qqdhdzgynm4zwqd5d7xmw5fk98klysy043l2ahrqs' in inv[ 'bolt11'] b11 = l2.rpc.decodepay(inv['bolt11']) assert 'description' not in b11 assert b11[ 'description_hash'] == '3925b6f67e2c340036ed12093dd44e0368df1b6ea26c53dbe4811f58fd5db8c1' listinv = only_one(l2.rpc.listinvoices()['invoices']) assert listinv[ 'description'] == 'One piece of chocolate cake, one icecream cone, one pickle, one slice of swiss cheese, one slice of salami, one lollypop, one piece of cherry pie, one sausage, one cupcake, and one slice of watermelon' # To pay it we need to provide the (correct!) description. with pytest.raises(RpcError, match=r'you did not provide description parameter'): l1.rpc.pay(inv['bolt11']) with pytest.raises(RpcError, match=r'does not match description'): l1.rpc.pay(inv['bolt11'], description=listinv['description'][:-1]) l1.rpc.pay(inv['bolt11'], description=listinv['description']) # Description will be in some. found = False for p in l1.rpc.listsendpays()['payments']: if 'description' in p: found = True assert p['description'] == listinv['description'] assert found assert only_one(l1.rpc.listpays( inv['bolt11'])['pays'])['description'] == listinv['description'] # Try removing description. l2.rpc.delinvoice('label', "paid", desconly=True) assert 'description' not in only_one(l2.rpc.listinvoices()['invoices']) with pytest.raises(RpcError, match=r'description already removed'): l2.rpc.delinvoice('label', "paid", desconly=True)
def test_io_logging(node_factory, executor): l1 = node_factory.get_node(options={'log-level': 'io'}) l2 = node_factory.get_node() l1.rpc.connect(l2.info['id'], 'localhost', l2.port) # Fundchannel manually so we get channeld pid. l1.fundwallet(10**6 + 1000000) l1.rpc.fundchannel(l2.info['id'], 10**6)['tx'] pid1 = l1.subd_pid('channeld') l1.daemon.wait_for_log('sendrawtx exit 0') l1.bitcoin.generate_block(1) l1.daemon.wait_for_log(' to CHANNELD_NORMAL') pid2 = l2.subd_pid('channeld') l2.daemon.wait_for_log(' to CHANNELD_NORMAL') # Send it sigusr1: should turn on logging. subprocess.run(['kill', '-USR1', pid1]) fut = executor.submit(l1.pay, l2, 200000000) # WIRE_UPDATE_ADD_HTLC = 128 = 0x0080 l1.daemon.wait_for_log(r'channeld.*:\[OUT\] 0080') # WIRE_UPDATE_FULFILL_HTLC = 130 = 0x0082 l1.daemon.wait_for_log(r'channeld.*:\[IN\] 0082') fut.result(10) # Send it sigusr1: should turn off logging. subprocess.run(['kill', '-USR1', pid1]) l1.pay(l2, 200000000) assert not l1.daemon.is_in_log(r'channeld.*:\[OUT\] 0080', start=l1.daemon.logsearch_start) assert not l1.daemon.is_in_log(r'channeld.*:\[IN\] 0082', start=l1.daemon.logsearch_start) # IO logs should not appear in peer logs. peerlog = only_one(l2.rpc.listpeers(l1.info['id'], "io")['peers'])['log'] assert not any(l['type'] == 'IO_OUT' or l['type'] == 'IO_IN' for l in peerlog) # Turn on in l2 channel logging. subprocess.run(['kill', '-USR1', pid2]) l1.pay(l2, 200000000) # Now it should find it. peerlog = only_one(l2.rpc.listpeers(l1.info['id'], "io")['peers'])['log'] assert any(l['type'] == 'IO_OUT' for l in peerlog) assert any(l['type'] == 'IO_IN' for l in peerlog)
def test_multiwithdraw_simple(node_factory, bitcoind): """ Test simple multiwithdraw usage. """ l1, l2, l3 = node_factory.get_nodes(3) l1.fundwallet(10**8) addr2 = l2.rpc.newaddr()['bech32'] amount2 = Millisatoshi(2222 * 1000) addr3 = l3.rpc.newaddr()['bech32'] amount3 = Millisatoshi(3333 * 1000) # Multiwithdraw! txid = l1.rpc.multiwithdraw([{addr2: amount2}, {addr3: amount3}])["txid"] bitcoind.generate_block(1) sync_blockheight(bitcoind, [l1, l2, l3]) # l2 shoulda gotten money. funds2 = l2.rpc.listfunds()['outputs'] assert only_one(funds2)["txid"] == txid assert only_one(funds2)["address"] == addr2 assert only_one(funds2)["status"] == "confirmed" assert only_one(funds2)["amount_msat"] == amount2 # l3 shoulda gotten money. funds3 = l3.rpc.listfunds()['outputs'] assert only_one(funds3)["txid"] == txid assert only_one(funds3)["address"] == addr3 assert only_one(funds3)["status"] == "confirmed" assert only_one(funds3)["amount_msat"] == amount3
def test_autocleaninvoice(node_factory): l1 = node_factory.get_node() l1.rpc.invoice(msatoshi=12300, label='inv1', description='description1', expiry=4) l1.rpc.invoice(msatoshi=12300, label='inv2', description='description2', expiry=12) l1.rpc.autocleaninvoice(cycle_seconds=8, expired_by=2) start_time = time.time() # time 0 # Both should still be there. assert len(l1.rpc.listinvoices('inv1')['invoices']) == 1 assert len(l1.rpc.listinvoices('inv2')['invoices']) == 1 assert l1.rpc.listinvoices( 'inv1')['invoices'][0]['description'] == 'description1' time.sleep(start_time - time.time() + 6) # total 6 # Both should still be there - auto clean cycle not started. # inv1 should be expired assert len(l1.rpc.listinvoices('inv1')['invoices']) == 1 assert only_one( l1.rpc.listinvoices('inv1')['invoices'])['status'] == 'expired' assert len(l1.rpc.listinvoices('inv2')['invoices']) == 1 assert only_one( l1.rpc.listinvoices('inv2')['invoices'])['status'] != 'expired' time.sleep(start_time - time.time() + 10) # total 10 # inv1 should have deleted, inv2 still there and unexpired. assert len(l1.rpc.listinvoices('inv1')['invoices']) == 0 assert len(l1.rpc.listinvoices('inv2')['invoices']) == 1 assert only_one( l1.rpc.listinvoices('inv2')['invoices'])['status'] != 'expired' time.sleep(start_time - time.time() + 14) # total 14 # inv2 should still be there, but expired assert len(l1.rpc.listinvoices('inv1')['invoices']) == 0 assert len(l1.rpc.listinvoices('inv2')['invoices']) == 1 assert only_one( l1.rpc.listinvoices('inv2')['invoices'])['status'] == 'expired' time.sleep(start_time - time.time() + 18) # total 18 # Everything deleted assert len(l1.rpc.listinvoices('inv1')['invoices']) == 0 assert len(l1.rpc.listinvoices('inv2')['invoices']) == 0
def test_peerinfo(node_factory, bitcoind): l1, l2 = node_factory.line_graph(2, fundchannel=False, opts={'may_reconnect': True}) # Gossiping but no node announcement yet assert l1.rpc.getpeer(l2.info['id'])['connected'] assert len(l1.rpc.getpeer(l2.info['id'])['channels']) == 0 assert l1.rpc.getpeer(l2.info['id'])['local_features'] == '8a' assert l1.rpc.getpeer(l2.info['id'])['global_features'] == '' # Fund a channel to force a node announcement chan = l1.fund_channel(l2, 10**6) # Now proceed to funding-depth and do a full gossip round bitcoind.generate_block(5) l1.daemon.wait_for_logs( ['Received node_announcement for node ' + l2.info['id']]) l2.daemon.wait_for_logs( ['Received node_announcement for node ' + l1.info['id']]) # Should have announced the same global features as told to peer. nodes1 = l1.rpc.listnodes(l2.info['id'])['nodes'] nodes2 = l2.rpc.listnodes(l2.info['id'])['nodes'] peer1 = l1.rpc.getpeer(l2.info['id']) peer2 = l2.rpc.getpeer(l1.info['id']) assert only_one(nodes1)['global_features'] == peer1['global_features'] assert only_one(nodes2)['global_features'] == peer2['global_features'] assert l1.rpc.getpeer(l2.info['id'])['local_features'] == '8a' assert l2.rpc.getpeer(l1.info['id'])['local_features'] == '8a' # If it reconnects after db load, it should know features. l1.restart() wait_for(lambda: l1.rpc.getpeer(l2.info['id'])['connected']) wait_for(lambda: l2.rpc.getpeer(l1.info['id'])['connected']) assert l1.rpc.getpeer(l2.info['id'])['local_features'] == '8a' assert l2.rpc.getpeer(l1.info['id'])['local_features'] == '8a' # Close the channel to forget the peer with pytest.raises(RpcError, match=r'Channel close negotiation not finished'): l1.rpc.close(chan, False, 0) l1.daemon.wait_for_log('Forgetting peer') bitcoind.generate_block(100) l1.daemon.wait_for_log('WIRE_ONCHAIN_ALL_IRREVOCABLY_RESOLVED') l2.daemon.wait_for_log('WIRE_ONCHAIN_ALL_IRREVOCABLY_RESOLVED') # The only channel was closed, everybody should have forgotten the nodes assert l1.rpc.listnodes()['nodes'] == [] assert l2.rpc.listnodes()['nodes'] == []
def test_invoice_routeboost(node_factory, bitcoind): """Test routeboost 'r' hint in bolt11 invoice. """ l1, l2 = node_factory.line_graph(2, fundamount=10**4) # Won't get reference to route until channel is public. inv = l2.rpc.invoice(msatoshi=123456, label="inv0", description="?") assert 'warning_capacity' in inv bitcoind.generate_block(5) wait_for(lambda: [c['public'] for c in l2.rpc.listchannels()['channels']] == [True, True]) # Check routeboost. # Make invoice and pay it inv = l2.rpc.invoice(msatoshi=123456, label="inv1", description="?") # Check routeboost. assert 'warning_capacity' not in inv assert 'warning_offline' not in inv # Route array has single route with single element. r = only_one(only_one(l1.rpc.decodepay(inv['bolt11'])['routes'])) assert r['pubkey'] == l1.info['id'] assert r['short_channel_id'] == l1.rpc.listchannels( )['channels'][0]['short_channel_id'] assert r['fee_base_msat'] == 1 assert r['fee_proportional_millionths'] == 10 assert r['cltv_expiry_delta'] == 6 # Pay it (and make sure it's fully resolved before we take l1 offline!) l1.rpc.pay(inv['bolt11']) wait_channel_quiescent(l1, l2) # Due to reserve, l1 doesn't have capacity to pay this. inv = l2.rpc.invoice(msatoshi=10**7 - 123456, label="inv2", description="?") # Check warning assert 'warning_capacity' in inv assert 'warning_offline' not in inv l1.stop() wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])[ 'connected']) inv = l2.rpc.invoice(123456, label="inv3", description="?") # Check warning. assert 'warning_capacity' not in inv assert 'warning_offline' in inv
def test_invoice_routeboost(node_factory, bitcoind): """Test routeboost 'r' hint in bolt11 invoice. """ l0, l1, l2 = node_factory.line_graph(3, fundamount=2 * (10**4), wait_for_announce=True) # Check routeboost. # Make invoice and pay it inv = l2.rpc.invoice(msatoshi=123456, label="inv1", description="?") # Check routeboost. assert 'warning_capacity' not in inv assert 'warning_offline' not in inv # Route array has single route with single element. r = only_one(only_one(l1.rpc.decodepay(inv['bolt11'])['routes'])) assert r['pubkey'] == l1.info['id'] assert r['short_channel_id'] == l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'][0]['short_channel_id'] assert r['fee_base_msat'] == 1 assert r['fee_proportional_millionths'] == 10 assert r['cltv_expiry_delta'] == 6 # Pay it (and make sure it's fully resolved before we take l1 offline!) l1.rpc.pay(inv['bolt11']) wait_channel_quiescent(l1, l2) # Due to reserve & fees, l1 doesn't have capacity to pay this. inv = l2.rpc.invoice(msatoshi=2 * (10**7) - 123456, label="inv2", description="?") # Check warning assert 'warning_capacity' in inv assert 'warning_offline' not in inv l1.rpc.disconnect(l2.info['id'], True) wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected']) inv = l2.rpc.invoice(123456, label="inv3", description="?") # Check warning. assert 'warning_capacity' not in inv assert 'warning_offline' in inv # Close l0, l2 will not use l1 at all. l0.rpc.close(l1.info['id']) l0.wait_for_channel_onchain(l1.info['id']) bitcoind.generate_block(100) # l2 has to notice channel is gone. wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 2) inv = l2.rpc.invoice(123456, label="inv4", description="?") # Check warning. assert 'warning_capacity' in inv assert 'warning_offline' not in inv
def test_last_tx_inflight_psbt_upgrade(node_factory, bitcoind): bitcoind.generate_block(12) prior_txs = [ '02000000019CCCA2E59D863B00B5BD835BF7BA93CC257932D2C7CDBE51EFE2EE4A9D29DFCB01000000009DB0E280024A01000000000000220020BE7935A77CA9AB70A4B8B1906825637767FED3C00824AA90C988983587D68488F0820100000000002200209F4684DDB28ACDC73959BC194D1A25DF906F61ED030F52D163E6F1E247D32CBB9A3ED620', '020000000122F9EBE38F54208545B681AD7F73A7AE3504A09C8201F502673D34E28424687C01000000009DB0E280024A01000000000000220020BE7935A77CA9AB70A4B8B1906825637767FED3C00824AA90C988983587D68488F0820100000000002200209F4684DDB28ACDC73959BC194D1A25DF906F61ED030F52D163E6F1E247D32CBB9A3ED620' ] l1 = node_factory.get_node(dbfile='upgrade_inflight.sqlite3.xz') b64_last_txs = [ base64.b64encode(x['last_tx']).decode('utf-8') for x in l1.db_query( 'SELECT last_tx FROM channel_funding_inflights ORDER BY channel_id, funding_feerate;' ) ] for i in range(len(b64_last_txs)): bpsbt = b64_last_txs[i] psbt = bitcoind.rpc.decodepsbt(bpsbt) tx = prior_txs[i] assert psbt['tx']['txid'] == bitcoind.rpc.decoderawtransaction( tx)['txid'] funding_input = only_one(psbt['inputs']) assert funding_input['witness_utxo']['amount'] == Decimal('0.001') assert funding_input['witness_utxo']['scriptPubKey'][ 'type'] == 'witness_v0_scripthash' assert funding_input['witness_script']['type'] == 'multisig'
def test_reconnect_sender_add1(node_factory): # Fail after add is OK, will cause payment failure though. disconnects = ['-WIRE_UPDATE_ADD_HTLC-nocommit', '+WIRE_UPDATE_ADD_HTLC-nocommit', '@WIRE_UPDATE_ADD_HTLC-nocommit'] # Feerates identical so we don't get gratuitous commit to update them l1 = node_factory.get_node(disconnect=disconnects, may_reconnect=True, feerates=(7500, 7500, 7500)) l2 = node_factory.get_node(may_reconnect=True) l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.fund_channel(l2, 10**6) amt = 200000000 rhash = l2.rpc.invoice(amt, 'test_reconnect_sender_add1', 'desc')['payment_hash'] assert only_one(l2.rpc.listinvoices('test_reconnect_sender_add1')['invoices'])['status'] == 'unpaid' route = [{'msatoshi': amt, 'id': l2.info['id'], 'delay': 5, 'channel': '1:1:1'}] for i in range(0, len(disconnects)): l1.rpc.sendpay(route, rhash) with pytest.raises(RpcError): l1.rpc.waitsendpay(rhash) # Wait for reconnection. l1.daemon.wait_for_log('Already have funding locked in') # This will send commit, so will reconnect as required. l1.rpc.sendpay(route, rhash)
def check_billboard(): billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'] return ( len(billboard) == 2 and billboard[0] == 'ONCHAIN:Tracking our own unilateral close' and re.fullmatch('ONCHAIN:.* outputs unresolved: in 4 blocks will spend DELAYED_OUTPUT_TO_US \(.*:0\) using OUR_DELAYED_RETURN_TO_WALLET', billboard[1]) )
def test_rpc_passthrough(node_factory): """Starting with a plugin exposes its RPC methods. First check that the RPC method appears in the help output and then try to call it. """ plugin_path = 'contrib/plugins/helloworld.py' n = node_factory.get_node(options={ 'plugin': plugin_path, 'greeting': 'Ciao' }) # Make sure that the 'hello' command that the helloworld.py plugin # has registered is available. cmd = [hlp for hlp in n.rpc.help()['help'] if 'hello' in hlp['command']] assert (len(cmd) == 1) # Make sure usage message is present. assert only_one(n.rpc.help('hello')['help'])['command'] == 'hello [name]' # While we're at it, let's check that helloworld.py is logging # correctly via the notifications plugin->lightningd assert n.daemon.is_in_log('Plugin helloworld.py initialized') # Now try to call it and see what it returns: greet = n.rpc.hello(name='World') assert (greet == "Ciao World") with pytest.raises(RpcError): n.rpc.fail()
def test_reconnect_sender_add(node_factory): disconnects = [ '-WIRE_COMMITMENT_SIGNED', '@WIRE_COMMITMENT_SIGNED', '+WIRE_COMMITMENT_SIGNED', '-WIRE_REVOKE_AND_ACK', '@WIRE_REVOKE_AND_ACK', '+WIRE_REVOKE_AND_ACK' ] # Feerates identical so we don't get gratuitous commit to update them l1 = node_factory.get_node(disconnect=disconnects, may_reconnect=True, feerates=(7500, 7500, 7500)) l2 = node_factory.get_node(may_reconnect=True) l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.fund_channel(l2, 10**6) amt = 200000000 rhash = l2.rpc.invoice(amt, 'testpayment', 'desc')['payment_hash'] assert only_one( l2.rpc.listinvoices('testpayment')['invoices'])['status'] == 'unpaid' route = [{ 'msatoshi': amt, 'id': l2.info['id'], 'delay': 5, 'channel': '1:1:1' }] # This will send commit, so will reconnect as required. l1.rpc.sendpay(route, rhash) # Should have printed this for every reconnect. for i in range(0, len(disconnects)): l1.daemon.wait_for_log('Already have funding locked in')
def test_rpc_passthrough(node_factory): """Starting with a plugin exposes its RPC methods. First check that the RPC method appears in the help output and then try to call it. """ plugin_path = 'contrib/plugins/helloworld.py' n = node_factory.get_node(options={'plugin': plugin_path, 'greeting': 'Ciao'}) # Make sure that the 'hello' command that the helloworld.py plugin # has registered is available. cmd = [hlp for hlp in n.rpc.help()['help'] if 'hello' in hlp['command']] assert(len(cmd) == 1) # Make sure usage message is present. assert only_one(n.rpc.help('hello')['help'])['command'] == 'hello [name]' # While we're at it, let's check that helloworld.py is logging # correctly via the notifications plugin->lightningd assert n.daemon.is_in_log('Plugin helloworld.py initialized') # Now try to call it and see what it returns: greet = n.rpc.hello(name='World') assert(greet == "Ciao World") with pytest.raises(RpcError): n.rpc.fail()
def test_last_tx_psbt_upgrade(node_factory, bitcoind): bitcoind.generate_block(12) prior_txs = ['02000000018DD699861B00061E50937A233DB584BF8ED4C0BF50B44C0411F71B031A06455000000000000EF7A9800350C300000000000022002073356CFF7E1588F14935EF138E142ABEFB5F7E3D51DE942758DCD5A179449B6250A90600000000002200202DF545EA882889846C52FC5E111AC07CE07E0C09418AC15743A6F6284C2A4FA720A1070000000000160014E89954FAC8F7A2DCE51E095D7BEB5271C3F7DA56EF81DC20', '02000000018A0AE4C63BCDF9D78B07EB4501BB23404FDDBC73973C592793F047BE1495074B010000000074D99980010A2D0F00000000002200203B8CB644781CBECA96BE8B2BF1827AFD908B3CFB5569AC74DAB9395E8DDA39E4C9555420', '020000000135DAB2996E57762E3EC158C0D57D39F43CA657E882D93FC24F5FEBAA8F36ED9A0100000000566D1D800350C30000000000002200205679A7D06E1BD276AA25F56E9E4DF7E07D9837EFB0C5F63604F10CD9F766A03ED4DD0600000000001600147E5B5C8F4FC1A9484E259F92CA4CBB7FA2814EA49A6C070000000000220020AB6226DEBFFEFF4A741C01367FA3C875172483CFB3E327D0F8C7AA4C51EDECAA27AA4720'] l1 = node_factory.get_node(dbfile='last_tx_upgrade.sqlite3.xz') b64_last_txs = [base64.b64encode(x['last_tx']).decode('utf-8') for x in l1.db_query('SELECT last_tx FROM channels ORDER BY id;')] for i in range(len(b64_last_txs)): bpsbt = b64_last_txs[i] psbt = bitcoind.rpc.decodepsbt(bpsbt) tx = prior_txs[i] assert psbt['tx']['txid'] == bitcoind.rpc.decoderawtransaction(tx)['txid'] funding_input = only_one(psbt['inputs']) # Every opened channel was funded with the same amount: 1M sats assert funding_input['witness_utxo']['amount'] == Decimal('0.01') assert funding_input['witness_utxo']['scriptPubKey']['type'] == 'witness_v0_scripthash' assert funding_input['witness_script']['type'] == 'multisig' l1.stop() # Test again, but this time with a database with a closed channel + forgotten peer # We need to get to block #232 from block #113 bitcoind.generate_block(232 - 113) # We need to give it a chance to update time.sleep(2) l2 = node_factory.get_node(dbfile='last_tx_closed.sqlite3.xz') last_txs = [x['last_tx'] for x in l2.db_query('SELECT last_tx FROM channels ORDER BY id;')] # The first tx should be psbt, the second should still be hex bitcoind.rpc.decodepsbt(base64.b64encode(last_txs[0]).decode('utf-8')) bitcoind.rpc.decoderawtransaction(last_txs[1].hex())
def test_bech32_funding(node_factory): # Don't get any funds from previous runs. l1 = node_factory.get_node(random_hsm=True) l2 = node_factory.get_node(random_hsm=True) # connect l1.rpc.connect(l2.info['id'], 'localhost', l2.port) # fund a bech32 address and then open a channel with it res = l1.openchannel(l2, 20000, 'bech32') address = res['address'] assert address[0:4] == "bcrt" # probably overly paranoid checking wallettxid = res['wallettxid'] wallettx = l1.bitcoin.rpc.getrawtransaction(wallettxid, True) fundingtx = l1.bitcoin.rpc.decoderawtransaction(res['fundingtx']['tx']) def is_p2wpkh(output): return output['type'] == 'witness_v0_keyhash' and \ address == only_one(output['addresses']) assert any(is_p2wpkh(output['scriptPubKey']) for output in wallettx['vout']) assert only_one(fundingtx['vin'])['txid'] == res['wallettxid']
def test_reserve_enforcement(node_factory, executor): """Channeld should disallow you spending into your reserve""" l1, l2 = node_factory.line_graph(2, opts={'may_reconnect': True}) # Pay 1000 satoshi to l2. l1.pay(l2, 1000000) l2.stop() # They should both aim for 1%. reserves = l2.db_query('SELECT channel_reserve_satoshis FROM channel_configs') assert reserves == [{'channel_reserve_satoshis': 10**6 // 100}] * 2 # Edit db to reduce reserve to 0 so it will try to violate it. l2.db_query('UPDATE channel_configs SET channel_reserve_satoshis=0', use_copy=False) l2.start() wait_for(lambda: only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected']) # This should be impossible to pay entire thing back: l1 should # kill us for trying to violate reserve. executor.submit(l2.pay, l1, 1000000) l1.daemon.wait_for_log( 'Peer permanent failure in CHANNELD_NORMAL: lightning_channeld: sent ' 'ERROR Bad peer_add_htlc: CHANNEL_ERR_CHANNEL_CAPACITY_EXCEEDED' )
def test_rbf_reconnect_tx_construct(node_factory, bitcoind, chainparams): disconnects = ['=WIRE_TX_ADD_INPUT', # Initial funding succeeds '-WIRE_TX_ADD_INPUT', '+WIRE_TX_ADD_INPUT', '-WIRE_TX_ADD_OUTPUT', '+WIRE_TX_ADD_OUTPUT', '-WIRE_TX_COMPLETE', '+WIRE_TX_COMPLETE'] l1, l2 = node_factory.get_nodes(2, opts=[{'disconnect': disconnects, 'may_reconnect': True}, {'may_reconnect': True}]) l1.rpc.connect(l2.info['id'], 'localhost', l2.port) amount = 2**24 chan_amount = 100000 bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'], amount / 10**8 + 0.01) bitcoind.generate_block(1) # Wait for it to arrive. wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > 0) res = l1.rpc.fundchannel(l2.info['id'], chan_amount) chan_id = res['channel_id'] vins = bitcoind.rpc.decoderawtransaction(res['tx'])['vin'] assert(only_one(vins)) prev_utxos = ["{}:{}".format(vins[0]['txid'], vins[0]['vout'])] # Check that we're waiting for lockin l1.daemon.wait_for_log(' to DUALOPEND_AWAITING_LOCKIN') next_feerate = find_next_feerate(l1, l2) # Initiate an RBF startweight = 42 + 172 # base weight, funding output initpsbt = l1.rpc.utxopsbt(chan_amount, next_feerate, startweight, prev_utxos, reservedok=True, min_witness_weight=110, excess_as_change=True) # Run through TX_ADD wires for d in disconnects[1:-2]: l1.rpc.connect(l2.info['id'], 'localhost', l2.port) with pytest.raises(RpcError): l1.rpc.openchannel_bump(chan_id, chan_amount, initpsbt['psbt']) assert l1.rpc.getpeer(l2.info['id']) is not None # Now we finish off the completes failure check for d in disconnects[-2:]: l1.rpc.connect(l2.info['id'], 'localhost', l2.port) bump = l1.rpc.openchannel_bump(chan_id, chan_amount, initpsbt['psbt']) with pytest.raises(RpcError): update = l1.rpc.openchannel_update(chan_id, bump['psbt']) # Now we succeed l1.rpc.connect(l2.info['id'], 'localhost', l2.port) bump = l1.rpc.openchannel_bump(chan_id, chan_amount, initpsbt['psbt']) update = l1.rpc.openchannel_update(chan_id, bump['psbt']) assert update['commitments_secured']
def test_htlcs_cltv_only_difference(node_factory, bitcoind): # l1 -> l2 -> l3 -> l4 # l4 ignores htlcs, so they stay. # l3 will see a reconnect from l4 when l4 restarts. l1, l2, l3, l4 = node_factory.line_graph(4, announce=True, opts=[{}] * 2 + [{'dev-no-reconnect': None, 'may_reconnect': True}] * 2) h = l4.rpc.invoice(msatoshi=10**8, label='x', description='desc')['payment_hash'] l4.rpc.dev_ignore_htlcs(id=l3.info['id'], ignore=True) # L2 tries to pay r = l2.rpc.getroute(l4.info['id'], 10**8, 1)["route"] l2.rpc.sendpay(r, h) # Now increment CLTV bitcoind.generate_block(1) sync_blockheight(bitcoind, [l1, l2, l3, l4]) # L1 tries to pay r = l1.rpc.getroute(l4.info['id'], 10**8, 1)["route"] l1.rpc.sendpay(r, h) # Now increment CLTV bitcoind.generate_block(1) sync_blockheight(bitcoind, [l1, l2, l3, l4]) # L3 tries to pay r = l3.rpc.getroute(l4.info['id'], 10**8, 1)["route"] l3.rpc.sendpay(r, h) # Give them time to go through. time.sleep(5) # Will all be connected OK. assert only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'] assert only_one(l2.rpc.listpeers(l3.info['id'])['peers'])['connected'] assert only_one(l3.rpc.listpeers(l4.info['id'])['peers'])['connected'] # Restarting tail node will stop it ignoring HTLCs (it will actually # fail them immediately). l4.restart() l3.rpc.connect(l4.info['id'], 'localhost', l4.port) wait_for(lambda: only_one(l1.rpc.listpayments()['payments'])['status'] == 'failed') wait_for(lambda: only_one(l2.rpc.listpayments()['payments'])['status'] == 'failed') wait_for(lambda: only_one(l3.rpc.listpayments()['payments'])['status'] == 'failed') # Should all still be connected. assert only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'] assert only_one(l2.rpc.listpeers(l3.info['id'])['peers'])['connected'] assert only_one(l3.rpc.listpeers(l4.info['id'])['peers'])['connected']
def test_htlc_out_timeout(node_factory, bitcoind, executor): """Test that we drop onchain if the peer doesn't time out HTLC""" # HTLC 1->2, 1 fails after it's irrevocably committed, can't reconnect disconnects = ['@WIRE_REVOKE_AND_ACK'] # Feerates identical so we don't get gratuitous commit to update them l1 = node_factory.get_node(disconnect=disconnects, options={'dev-no-reconnect': None}, feerates=(7500, 7500, 7500)) l2 = node_factory.get_node() l1.rpc.connect(l2.info['id'], 'localhost', l2.port) chanid = l1.fund_channel(l2, 10**6) # Wait for route propagation. l1.wait_channel_active(chanid) amt = 200000000 inv = l2.rpc.invoice(amt, 'test_htlc_out_timeout', 'desc')['bolt11'] assert only_one(l2.rpc.listinvoices('test_htlc_out_timeout')['invoices'])['status'] == 'unpaid' executor.submit(l1.rpc.pay, inv) # l1 will disconnect, and not reconnect. l1.daemon.wait_for_log('dev_disconnect: @WIRE_REVOKE_AND_ACK') # Takes 6 blocks to timeout (cltv-final + 1), but we also give grace period of 1 block. bitcoind.generate_block(5 + 1) assert not l1.daemon.is_in_log('hit deadline') bitcoind.generate_block(1) l1.daemon.wait_for_log('Offered HTLC 0 SENT_ADD_ACK_REVOCATION cltv .* hit deadline') l1.daemon.wait_for_log('sendrawtx exit 0') l1.bitcoin.generate_block(1) l1.daemon.wait_for_log(' to ONCHAIN') l2.daemon.wait_for_log(' to ONCHAIN') # L1 will timeout HTLC immediately l1.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX .* after 0 blocks', 'Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks']) l1.daemon.wait_for_log('sendrawtx exit 0') bitcoind.generate_block(1) l1.daemon.wait_for_log('Propose handling OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks') bitcoind.generate_block(4) # It should now claim both the to-local and htlc-timeout-tx outputs. l1.daemon.wait_for_logs(['Broadcasting OUR_DELAYED_RETURN_TO_WALLET', 'Broadcasting OUR_DELAYED_RETURN_TO_WALLET', 'sendrawtx exit 0', 'sendrawtx exit 0']) # Now, 100 blocks it should be done. bitcoind.generate_block(100) l1.daemon.wait_for_log('onchaind complete, forgetting peer') l2.daemon.wait_for_log('onchaind complete, forgetting peer')
def test_htlc_in_timeout(node_factory, bitcoind, executor): """Test that we drop onchain if the peer doesn't accept fulfilled HTLC""" # HTLC 1->2, 1 fails after 2 has sent committed the fulfill disconnects = ['-WIRE_REVOKE_AND_ACK*2'] # Feerates identical so we don't get gratuitous commit to update them l1 = node_factory.get_node(disconnect=disconnects, options={'dev-no-reconnect': None}, feerates=(7500, 7500, 7500)) l2 = node_factory.get_node() l1.rpc.connect(l2.info['id'], 'localhost', l2.port) chanid = l1.fund_channel(l2, 10**6) l1.wait_channel_active(chanid) sync_blockheight(bitcoind, [l1, l2]) amt = 200000000 inv = l2.rpc.invoice(amt, 'test_htlc_in_timeout', 'desc')['bolt11'] assert only_one(l2.rpc.listinvoices('test_htlc_in_timeout') ['invoices'])['status'] == 'unpaid' executor.submit(l1.rpc.pay, inv) # l1 will disconnect and not reconnect. l1.daemon.wait_for_log('dev_disconnect: -WIRE_REVOKE_AND_ACK') # Deadline HTLC expiry minus 1/2 cltv-expiry delta (rounded up) (== cltv - 3). cltv is 5+1. bitcoind.generate_block(2) assert not l2.daemon.is_in_log('hit deadline') bitcoind.generate_block(1) l2.daemon.wait_for_log( 'Fulfilled HTLC 0 SENT_REMOVE_COMMIT cltv .* hit deadline') l2.daemon.wait_for_log('sendrawtx exit 0') l2.bitcoin.generate_block(1) l2.daemon.wait_for_log(' to ONCHAIN') l1.daemon.wait_for_log(' to ONCHAIN') # L2 will collect HTLC l2.daemon.wait_for_log( 'Propose handling OUR_UNILATERAL/THEIR_HTLC by OUR_HTLC_SUCCESS_TX .* after 0 blocks' ) l2.daemon.wait_for_log('sendrawtx exit 0') bitcoind.generate_block(1) l2.daemon.wait_for_log( 'Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks' ) bitcoind.generate_block(4) l2.daemon.wait_for_log('Broadcasting OUR_DELAYED_RETURN_TO_WALLET') l2.daemon.wait_for_log('sendrawtx exit 0') # Now, 100 blocks it should be both done. bitcoind.generate_block(100) l1.daemon.wait_for_log('onchaind complete, forgetting peer') l2.daemon.wait_for_log('onchaind complete, forgetting peer')
def test_openchannel_hook(node_factory, bitcoind): """ l2 uses the reject_odd_funding_amounts plugin to reject some openings. """ opts = [{}, { 'plugin': os.path.join(os.getcwd(), 'tests/plugins/reject_odd_funding_amounts.py') }] l1, l2 = node_factory.line_graph(2, fundchannel=False, opts=opts) # Get some funds. addr = l1.rpc.newaddr()['bech32'] txid = bitcoind.rpc.sendtoaddress(addr, 10) numfunds = len(l1.rpc.listfunds()['outputs']) bitcoind.generate_block(1, txid) wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > numfunds) # Even amount: works. l1.rpc.fundchannel(l2.info['id'], 100000) # Make sure plugin got all the vars we expect l2.daemon.wait_for_log('reject_odd_funding_amounts.py: 11 VARS') l2.daemon.wait_for_log('reject_odd_funding_amounts.py: channel_flags=1') l2.daemon.wait_for_log( 'reject_odd_funding_amounts.py: channel_reserve_satoshis=1000000msat') l2.daemon.wait_for_log( 'reject_odd_funding_amounts.py: dust_limit_satoshis=546000msat') l2.daemon.wait_for_log( 'reject_odd_funding_amounts.py: feerate_per_kw=7500') l2.daemon.wait_for_log( 'reject_odd_funding_amounts.py: funding_satoshis=100000000msat') l2.daemon.wait_for_log( 'reject_odd_funding_amounts.py: htlc_minimum_msat=0msat') l2.daemon.wait_for_log('reject_odd_funding_amounts.py: id={}'.format( l1.info['id'])) l2.daemon.wait_for_log( 'reject_odd_funding_amounts.py: max_accepted_htlcs=483') l2.daemon.wait_for_log( 'reject_odd_funding_amounts.py: max_htlc_value_in_flight_msat=18446744073709551615msat' ) l2.daemon.wait_for_log('reject_odd_funding_amounts.py: push_msat=0msat') l2.daemon.wait_for_log('reject_odd_funding_amounts.py: to_self_delay=5') # Close it. txid = l1.rpc.close(l2.info['id'])['txid'] bitcoind.generate_block(1, txid) wait_for(lambda: [ c['state'] for c in only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels'] ] == ['ONCHAIN']) # Odd amount: fails l1.connect(l2) with pytest.raises(RpcError, match=r"I don't like odd amounts"): l1.rpc.fundchannel(l2.info['id'], 100001)
def test_invoice_weirdstring(node_factory): l1 = node_factory.get_node() weird_label = 'label \\ " \t \n' weird_desc = 'description \\ " \t \n' l1.rpc.invoice(123000, weird_label, weird_desc) # FIXME: invoice RPC should return label! # Can find by this label. inv = only_one(l1.rpc.listinvoices(weird_label)['invoices']) assert inv['label'] == weird_label # Can find this in list. inv = only_one(l1.rpc.listinvoices()['invoices']) assert inv['label'] == weird_label b11 = l1.rpc.decodepay(inv['bolt11']) assert b11['description'] == weird_desc # Can delete by weird label. l1.rpc.delinvoice(weird_label, "unpaid") # We can also use numbers as labels. weird_label = 25 weird_desc = '"' l1.rpc.invoice(123000, weird_label, weird_desc) # FIXME: invoice RPC should return label! # Can find by this label. inv = only_one(l1.rpc.listinvoices(weird_label)['invoices']) assert inv['label'] == str(weird_label) # Can find this in list. inv = only_one(l1.rpc.listinvoices()['invoices']) assert inv['label'] == str(weird_label) b11 = l1.rpc.decodepay(inv['bolt11']) assert b11['description'] == weird_desc # Can delete by weird label. l1.rpc.delinvoice(weird_label, "unpaid")
def test_funding_fail(node_factory, bitcoind): """Add some funds, fund a channel without enough funds""" # Previous runs with same bitcoind can leave funds! max_locktime = 5 * 6 * 24 l1 = node_factory.get_node(random_hsm=True, options={'max-locktime-blocks': max_locktime}) l2 = node_factory.get_node(options={'watchtime-blocks': max_locktime + 1}) l1.rpc.connect(l2.info['id'], 'localhost', l2.port) funds = 1000000 addr = l1.rpc.newaddr()['address'] l1.bitcoin.rpc.sendtoaddress(addr, funds / 10**8) bitcoind.generate_block(1) # Wait for it to arrive. wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > 0) # Fail because l1 dislikes l2's huge locktime. with pytest.raises(RpcError, match=r'to_self_delay \d+ larger than \d+'): l1.rpc.fundchannel(l2.info['id'], int(funds / 10)) assert only_one(l1.rpc.listpeers()['peers'])['connected'] assert only_one(l2.rpc.listpeers()['peers'])['connected'] # Restart l2 without ridiculous locktime. del l2.daemon.opts['watchtime-blocks'] l2.restart() l1.rpc.connect(l2.info['id'], 'localhost', l2.port) # We don't have enough left to cover fees if we try to spend it all. with pytest.raises(RpcError, match=r'Cannot afford transaction'): l1.rpc.fundchannel(l2.info['id'], funds) # Should still be connected. assert only_one(l1.rpc.listpeers()['peers'])['connected'] l2.daemon.wait_for_log('lightning_openingd-.*: Handed peer, entering loop') assert only_one(l2.rpc.listpeers()['peers'])['connected'] # This works. l1.rpc.fundchannel(l2.info['id'], int(funds / 10))
def test_rbf_reconnect_ack(node_factory, bitcoind, chainparams): disconnects = ['-WIRE_ACK_RBF', '@WIRE_ACK_RBF', '+WIRE_ACK_RBF'] l1, l2 = node_factory.get_nodes(2, opts=[{ 'dev-force-features': '+223', 'may_reconnect': True }, { 'dev-force-features': '+223', 'disconnect': disconnects, 'may_reconnect': True }]) l1.rpc.connect(l2.info['id'], 'localhost', l2.port) amount = 2**24 chan_amount = 100000 bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'], amount / 10**8 + 0.01) bitcoind.generate_block(1) # Wait for it to arrive. wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > 0) res = l1.rpc.fundchannel(l2.info['id'], chan_amount) chan_id = res['channel_id'] vins = bitcoind.rpc.decoderawtransaction(res['tx'])['vin'] assert (only_one(vins)) prev_utxos = ["{}:{}".format(vins[0]['txid'], vins[0]['vout'])] # Check that we're waiting for lockin l1.daemon.wait_for_log(' to DUALOPEND_AWAITING_LOCKIN') next_feerate = find_next_feerate(l1, l2) # Initiate an RBF startweight = 42 + 172 # base weight, funding output initpsbt = l1.rpc.utxopsbt(chan_amount, next_feerate, startweight, prev_utxos, reservedok=True, min_witness_weight=110, excess_as_change=True) # Do the bump!? for d in disconnects: l1.rpc.connect(l2.info['id'], 'localhost', l2.port) with pytest.raises(RpcError): l1.rpc.openchannel_bump(chan_id, chan_amount, initpsbt['psbt']) assert l1.rpc.getpeer(l2.info['id']) is not None # This should succeed l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.rpc.openchannel_bump(chan_id, chan_amount, initpsbt['psbt'])
def test_gossip_notices_close(node_factory, bitcoind): # We want IO logging so we can replay a channel_announce to l1. l1 = node_factory.get_node(options={'log-level': 'io'}) l2, l3 = node_factory.line_graph(2) l1.rpc.connect(l2.info['id'], 'localhost', l2.port) # FIXME: sending SIGUSR1 immediately may kill it before handler installed. l1.daemon.wait_for_log('Handed peer, entering loop') subprocess.run(['kill', '-USR1', l1.subd_pid('openingd')]) bitcoind.generate_block(5) # Make sure l1 learns about channel and nodes. wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 2) wait_for(lambda: ['alias' in n for n in l1.rpc.listnodes()['nodes']] == [True, True]) l1.rpc.disconnect(l2.info['id']) # Grab channel_announcement from io logs (ends in ') channel_announcement = l1.daemon.is_in_log(r'\[IN\] 0100').split( ' ')[-1][:-1] channel_update = l1.daemon.is_in_log(r'\[IN\] 0102').split(' ')[-1][:-1] node_announcement = l1.daemon.is_in_log(r'\[IN\] 0101').split(' ')[-1][:-1] l2.rpc.close(l3.info['id']) wait_for(lambda: only_one(l2.rpc.listpeers(l3.info['id'])['peers'])[ 'channels'][0]['state'] == 'CLOSINGD_COMPLETE') bitcoind.generate_block(1) wait_for(lambda: l1.rpc.listchannels()['channels'] == []) wait_for(lambda: l1.rpc.listnodes()['nodes'] == []) # FIXME: This is a hack: we should have a framework for canned conversations # This doesn't naturally terminate, so we give it 5 seconds. try: subprocess.run([ 'devtools/gossipwith', '{}@localhost:{}'.format( l1.info['id'], l1.port), channel_announcement, channel_update, node_announcement ], timeout=5, stdout=subprocess.PIPE) except subprocess.TimeoutExpired: pass # l1 should reject it. assert (l1.rpc.listchannels()['channels'] == []) assert (l1.rpc.listnodes()['nodes'] == []) l1.stop() l1.start() assert (l1.rpc.listchannels()['channels'] == []) assert (l1.rpc.listnodes()['nodes'] == [])
def test_autocleaninvoice(node_factory): l1 = node_factory.get_node() l1.rpc.invoice(msatoshi=12300, label='inv1', description='description1', expiry=4) l1.rpc.invoice(msatoshi=12300, label='inv2', description='description2', expiry=12) l1.rpc.autocleaninvoice(cycle_seconds=8, expired_by=2) start_time = time.time() # time 0 # Both should still be there. assert len(l1.rpc.listinvoices('inv1')['invoices']) == 1 assert len(l1.rpc.listinvoices('inv2')['invoices']) == 1 assert l1.rpc.listinvoices('inv1')['invoices'][0]['description'] == 'description1' time.sleep(start_time - time.time() + 6) # total 6 # Both should still be there - auto clean cycle not started. # inv1 should be expired assert len(l1.rpc.listinvoices('inv1')['invoices']) == 1 assert only_one(l1.rpc.listinvoices('inv1')['invoices'])['status'] == 'expired' assert len(l1.rpc.listinvoices('inv2')['invoices']) == 1 assert only_one(l1.rpc.listinvoices('inv2')['invoices'])['status'] != 'expired' time.sleep(start_time - time.time() + 10) # total 10 # inv1 should have deleted, inv2 still there and unexpired. assert len(l1.rpc.listinvoices('inv1')['invoices']) == 0 assert len(l1.rpc.listinvoices('inv2')['invoices']) == 1 assert only_one(l1.rpc.listinvoices('inv2')['invoices'])['status'] != 'expired' time.sleep(start_time - time.time() + 14) # total 14 # inv2 should still be there, but expired assert len(l1.rpc.listinvoices('inv1')['invoices']) == 0 assert len(l1.rpc.listinvoices('inv2')['invoices']) == 1 assert only_one(l1.rpc.listinvoices('inv2')['invoices'])['status'] == 'expired' time.sleep(start_time - time.time() + 18) # total 18 # Everything deleted assert len(l1.rpc.listinvoices('inv1')['invoices']) == 0 assert len(l1.rpc.listinvoices('inv2')['invoices']) == 0
def test_reconnect_channel_peers(node_factory, executor): l1 = node_factory.get_node(may_reconnect=True) l2 = node_factory.get_node(may_reconnect=True) l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.fund_channel(l2, 10**6) l2.restart() # Should reconnect. wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'])[ 'connected']) wait_for(lambda: only_one(l2.rpc.listpeers(l1.info['id'])['peers'])[ 'connected']) # Connect command should succeed. l1.rpc.connect(l2.info['id'], 'localhost', l2.port) # Stop l2 and wait for l1 to notice. l2.stop() wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])[ 'connected']) # Now should fail. with pytest.raises(RpcError, match=r'Connection refused'): l1.rpc.connect(l2.info['id'], 'localhost', l2.port) # Wait for exponential backoff to give us a 2 second window. l1.daemon.wait_for_log('Will try reconnect in 2 seconds') # It should now succeed when it restarts. l2.start() # Multiples should be fine! fut1 = executor.submit(l1.rpc.connect, l2.info['id'], 'localhost', l2.port) fut2 = executor.submit(l1.rpc.connect, l2.info['id'], 'localhost', l2.port) fut3 = executor.submit(l1.rpc.connect, l2.info['id'], 'localhost', l2.port) fut1.result(10) fut2.result(10) fut3.result(10)
def test_reconnect_receiver_add(node_factory): disconnects = ['-WIRE_COMMITMENT_SIGNED', '@WIRE_COMMITMENT_SIGNED', '+WIRE_COMMITMENT_SIGNED', '-WIRE_REVOKE_AND_ACK', '@WIRE_REVOKE_AND_ACK', '+WIRE_REVOKE_AND_ACK'] l1 = node_factory.get_node(may_reconnect=True) l2 = node_factory.get_node(disconnect=disconnects, may_reconnect=True) l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.fund_channel(l2, 10**6) amt = 200000000 rhash = l2.rpc.invoice(amt, 'testpayment2', 'desc')['payment_hash'] assert only_one(l2.rpc.listinvoices('testpayment2')['invoices'])['status'] == 'unpaid' route = [{'msatoshi': amt, 'id': l2.info['id'], 'delay': 5, 'channel': '1:1:1'}] l1.rpc.sendpay(route, rhash) for i in range(len(disconnects)): l1.daemon.wait_for_log('Already have funding locked in') assert only_one(l2.rpc.listinvoices('testpayment2')['invoices'])['status'] == 'paid'
def test_gossip_notices_close(node_factory, bitcoind): # We want IO logging so we can replay a channel_announce to l1. l1 = node_factory.get_node(options={'log-level': 'io'}) l2, l3 = node_factory.line_graph(2) l1.rpc.connect(l2.info['id'], 'localhost', l2.port) # FIXME: sending SIGUSR1 immediately may kill it before handler installed. l1.daemon.wait_for_log('Handed peer, entering loop') subprocess.run(['kill', '-USR1', l1.subd_pid('openingd')]) bitcoind.generate_block(5) # Make sure l1 learns about channel. wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 2) wait_for(lambda: len(l1.rpc.listnodes()['nodes']) == 2) l1.rpc.disconnect(l2.info['id']) # Grab channel_announcement from io logs (ends in ') channel_announcement = l1.daemon.is_in_log(r'\[IN\] 0100').split(' ')[-1][:-1] channel_update = l1.daemon.is_in_log(r'\[IN\] 0102').split(' ')[-1][:-1] node_announcement = l1.daemon.is_in_log(r'\[IN\] 0101').split(' ')[-1][:-1] l2.rpc.close(l3.info['id']) wait_for(lambda: only_one(l2.rpc.listpeers(l3.info['id'])['peers'])['channels'][0]['state'] == 'CLOSINGD_COMPLETE') bitcoind.generate_block(1) wait_for(lambda: l1.rpc.listchannels()['channels'] == []) wait_for(lambda: l1.rpc.listnodes()['nodes'] == []) # FIXME: This is a hack: we should have a framework for canned conversations # This doesn't naturally terminate, so we give it 5 seconds. try: subprocess.run(['devtools/gossipwith', '{}@localhost:{}'.format(l1.info['id'], l1.port), channel_announcement, channel_update, node_announcement], timeout=5, stdout=subprocess.PIPE) except subprocess.TimeoutExpired: pass # l1 should reject it. assert(l1.rpc.listchannels()['channels'] == []) assert(l1.rpc.listnodes()['nodes'] == []) l1.stop() l1.start() assert(l1.rpc.listchannels()['channels'] == []) assert(l1.rpc.listnodes()['nodes'] == [])
def test_funding_all(node_factory, bitcoind): """Add some funds, fund a channel using all funds, make sure no funds remain """ l1, l2 = node_factory.line_graph(2, fundchannel=False) l1.fundwallet(0.1 * 10**8) bitcoind.generate_block(1) sync_blockheight(bitcoind, [l1]) outputs = l1.db_query('SELECT value FROM outputs WHERE status=0;') assert only_one(outputs)['value'] == 10000000 l1.rpc.fundchannel(l2.info['id'], "all") outputs = l1.db_query('SELECT value FROM outputs WHERE status=0;') assert len(outputs) == 0
def test_invoice_expiry(node_factory, executor): l1, l2 = node_factory.line_graph(2, fundchannel=True) inv = l2.rpc.invoice(msatoshi=123000, label='test_pay', description='description', expiry=1)['bolt11'] time.sleep(2) with pytest.raises(RpcError): l1.rpc.pay(inv) invoices = l2.rpc.listinvoices('test_pay')['invoices'] assert len(invoices) == 1 assert invoices[0]['status'] == 'expired' and invoices[0]['expires_at'] < time.time() # Try deleting it. with pytest.raises(RpcError, match=r'Invoice status is expired not unpaid'): l2.rpc.delinvoice('test_pay', 'unpaid') with pytest.raises(RpcError, match=r'Invoice status is expired not paid'): l2.rpc.delinvoice('test_pay', 'paid') l2.rpc.delinvoice('test_pay', 'expired') with pytest.raises(RpcError, match=r'Unknown invoice'): l2.rpc.delinvoice('test_pay', 'expired') # Test expiration waiting. # The second invoice created expires first. l2.rpc.invoice('any', 'inv1', 'description', 10) l2.rpc.invoice('any', 'inv2', 'description', 4) l2.rpc.invoice('any', 'inv3', 'description', 16) creation = int(time.time()) # Check waitinvoice correctly waits w1 = executor.submit(l2.rpc.waitinvoice, 'inv1') w2 = executor.submit(l2.rpc.waitinvoice, 'inv2') w3 = executor.submit(l2.rpc.waitinvoice, 'inv3') time.sleep(2) # total 2 assert not w1.done() assert not w2.done() assert not w3.done() time.sleep(4) # total 6 assert not w1.done() with pytest.raises(RpcError): w2.result() assert not w3.done() time.sleep(6) # total 12 with pytest.raises(RpcError): w1.result() assert not w3.done() time.sleep(8) # total 20 with pytest.raises(RpcError): w3.result() # Test delexpiredinvoice l2.rpc.delexpiredinvoice(maxexpirytime=creation + 8) # only inv2 should have been deleted assert len(l2.rpc.listinvoices()['invoices']) == 2 assert len(l2.rpc.listinvoices('inv2')['invoices']) == 0 # Test delexpiredinvoice all l2.rpc.delexpiredinvoice() # all invoices are expired and should be deleted assert len(l2.rpc.listinvoices()['invoices']) == 0 # Test expiry suffixes. start = int(time.time()) inv = l2.rpc.invoice(msatoshi=123000, label='inv_s', description='description', expiry='1s')['bolt11'] end = int(time.time()) expiry = only_one(l2.rpc.listinvoices('inv_s')['invoices'])['expires_at'] assert expiry >= start + 1 and expiry <= end + 1 start = int(time.time()) inv = l2.rpc.invoice(msatoshi=123000, label='inv_m', description='description', expiry='1m')['bolt11'] end = int(time.time()) expiry = only_one(l2.rpc.listinvoices('inv_m')['invoices'])['expires_at'] assert expiry >= start + 60 and expiry <= end + 60 start = int(time.time()) inv = l2.rpc.invoice(msatoshi=123000, label='inv_h', description='description', expiry='1h')['bolt11'] end = int(time.time()) expiry = only_one(l2.rpc.listinvoices('inv_h')['invoices'])['expires_at'] assert expiry >= start + 3600 and expiry <= end + 3600 start = int(time.time()) inv = l2.rpc.invoice(msatoshi=123000, label='inv_d', description='description', expiry='1d')['bolt11'] end = int(time.time()) expiry = only_one(l2.rpc.listinvoices('inv_d')['invoices'])['expires_at'] assert expiry >= start + 24 * 3600 and expiry <= end + 24 * 3600 start = int(time.time()) inv = l2.rpc.invoice(msatoshi=123000, label='inv_w', description='description', expiry='1w')['bolt11'] end = int(time.time()) expiry = only_one(l2.rpc.listinvoices('inv_w')['invoices'])['expires_at'] assert expiry >= start + 7 * 24 * 3600 and expiry <= end + 7 * 24 * 3600
def test_gossip_query_channel_range(node_factory, bitcoind): l1, l2, l3, l4 = node_factory.line_graph(4, opts={'log-level': 'io'}, fundchannel=False) # Make public channels on consecutive blocks l1.fundwallet(10**6) l2.fundwallet(10**6) num_tx = len(bitcoind.rpc.getrawmempool()) l1.rpc.fundchannel(l2.info['id'], 10**5)['tx'] wait_for(lambda: len(bitcoind.rpc.getrawmempool()) == num_tx + 1) bitcoind.generate_block(1) num_tx = len(bitcoind.rpc.getrawmempool()) l2.rpc.fundchannel(l3.info['id'], 10**5)['tx'] wait_for(lambda: len(bitcoind.rpc.getrawmempool()) == num_tx + 1) bitcoind.generate_block(1) # Get them both to gossip depth. bitcoind.generate_block(5) # Make sure l2 has received all the gossip. l2.daemon.wait_for_logs(['Received node_announcement for node ' + l1.info['id'], 'Received node_announcement for node ' + l3.info['id']]) scid12 = only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels'][0]['short_channel_id'] scid23 = only_one(l3.rpc.listpeers(l2.info['id'])['peers'])['channels'][0]['short_channel_id'] block12 = int(scid12.split('x')[0]) block23 = int(scid23.split('x')[0]) assert block23 == block12 + 1 # l1 asks for all channels, gets both. ret = l1.rpc.dev_query_channel_range(id=l2.info['id'], first=0, num=1000000) assert ret['final_first_block'] == 0 assert ret['final_num_blocks'] == 1000000 assert ret['final_complete'] assert len(ret['short_channel_ids']) == 2 assert ret['short_channel_ids'][0] == scid12 assert ret['short_channel_ids'][1] == scid23 # Does not include scid12 ret = l1.rpc.dev_query_channel_range(id=l2.info['id'], first=0, num=block12) assert ret['final_first_block'] == 0 assert ret['final_num_blocks'] == block12 assert ret['final_complete'] assert len(ret['short_channel_ids']) == 0 # Does include scid12 ret = l1.rpc.dev_query_channel_range(id=l2.info['id'], first=0, num=block12 + 1) assert ret['final_first_block'] == 0 assert ret['final_num_blocks'] == block12 + 1 assert ret['final_complete'] assert len(ret['short_channel_ids']) == 1 assert ret['short_channel_ids'][0] == scid12 # Doesn't include scid23 ret = l1.rpc.dev_query_channel_range(id=l2.info['id'], first=0, num=block23) assert ret['final_first_block'] == 0 assert ret['final_num_blocks'] == block23 assert ret['final_complete'] assert len(ret['short_channel_ids']) == 1 assert ret['short_channel_ids'][0] == scid12 # Does include scid23 ret = l1.rpc.dev_query_channel_range(id=l2.info['id'], first=block12, num=block23 - block12 + 1) assert ret['final_first_block'] == block12 assert ret['final_num_blocks'] == block23 - block12 + 1 assert ret['final_complete'] assert len(ret['short_channel_ids']) == 2 assert ret['short_channel_ids'][0] == scid12 assert ret['short_channel_ids'][1] == scid23 # Only includes scid23 ret = l1.rpc.dev_query_channel_range(id=l2.info['id'], first=block23, num=1) assert ret['final_first_block'] == block23 assert ret['final_num_blocks'] == 1 assert ret['final_complete'] assert len(ret['short_channel_ids']) == 1 assert ret['short_channel_ids'][0] == scid23 # Past both ret = l1.rpc.dev_query_channel_range(id=l2.info['id'], first=block23 + 1, num=1000000) assert ret['final_first_block'] == block23 + 1 assert ret['final_num_blocks'] == 1000000 assert ret['final_complete'] assert len(ret['short_channel_ids']) == 0 # Turn on IO logging in l1 channeld. subprocess.run(['kill', '-USR1', l1.subd_pid('channeld')]) # Make l2 split reply into two (technically async) l2.rpc.dev_set_max_scids_encode_size(max=9) l2.daemon.wait_for_log('Set max_scids_encode_bytes to 9') ret = l1.rpc.dev_query_channel_range(id=l2.info['id'], first=0, num=1000000) # Turns out it sends: 0+53, 53+26, 79+13, 92+7, 99+3, 102+2, 104+1, 105+999895 l1.daemon.wait_for_logs([r'\[IN\] 0108'] * 8) # It should definitely have split assert ret['final_first_block'] != 0 or ret['final_num_blocks'] != 1000000 assert ret['final_complete'] assert len(ret['short_channel_ids']) == 2 assert ret['short_channel_ids'][0] == scid12 assert ret['short_channel_ids'][1] == scid23 l2.daemon.wait_for_log('queue_channel_ranges full: splitting') # Test overflow case doesn't split forever; should still only get 8 for this ret = l1.rpc.dev_query_channel_range(id=l2.info['id'], first=1, num=429496000) l1.daemon.wait_for_logs([r'\[IN\] 0108'] * 8) # And no more! time.sleep(1) assert not l1.daemon.is_in_log(r'\[IN\] 0108', start=l1.daemon.logsearch_start) # This should actually be large enough for zlib to kick in! l3.fund_channel(l4, 10**5) bitcoind.generate_block(5) l2.daemon.wait_for_log('Received node_announcement for node ' + l4.info['id']) # Restore infinite encode size. l2.rpc.dev_set_max_scids_encode_size(max=(2**32 - 1)) l2.daemon.wait_for_log('Set max_scids_encode_bytes to {}' .format(2**32 - 1)) ret = l1.rpc.dev_query_channel_range(id=l2.info['id'], first=0, num=65535) l1.daemon.wait_for_log( # WIRE_REPLY_CHANNEL_RANGE r'\[IN\] 0108' # chain_hash + '................................................................' # first_blocknum + '00000000' # number_of_blocks + '0000ffff' # complete + '01' # length + '....' # encoding + '01' )
def test_gossip_jsonrpc(node_factory): l1, l2 = node_factory.line_graph(2, fundchannel=True, wait_for_announce=False) # Shouldn't send announce signatures until 6 deep. assert not l1.daemon.is_in_log('peer_out WIRE_ANNOUNCEMENT_SIGNATURES') # Channels should be activated locally wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 2) wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 2) # Make sure we can route through the channel, will raise on failure l1.rpc.getroute(l2.info['id'], 100, 1) # Outgoing should be active, but not public. channels1 = l1.rpc.listchannels()['channels'] channels2 = l2.rpc.listchannels()['channels'] assert [c['active'] for c in channels1] == [True, True] assert [c['active'] for c in channels2] == [True, True] # The incoming direction will be considered public, hence check for out # outgoing only assert len([c for c in channels1 if not c['public']]) == 2 assert len([c for c in channels2 if not c['public']]) == 2 # Test listchannels-by-source channels1 = l1.rpc.listchannels(source=l1.info['id'])['channels'] channels2 = l2.rpc.listchannels(source=l1.info['id'])['channels'] assert only_one(channels1)['source'] == l1.info['id'] assert only_one(channels1)['destination'] == l2.info['id'] assert channels1 == channels2 l2.rpc.listchannels()['channels'] # Now proceed to funding-depth and do a full gossip round l1.bitcoin.generate_block(5) # Could happen in either order. l1.daemon.wait_for_logs(['peer_out WIRE_ANNOUNCEMENT_SIGNATURES', 'peer_in WIRE_ANNOUNCEMENT_SIGNATURES']) # Just wait for the update to kick off and then check the effect needle = "Received node_announcement for node" l1.daemon.wait_for_log(needle) l2.daemon.wait_for_log(needle) # Need to increase timeout, intervals cannot be shortened with DEVELOPER=0 wait_for(lambda: len(l1.getactivechannels()) == 2, timeout=60) wait_for(lambda: len(l2.getactivechannels()) == 2, timeout=60) nodes = l1.rpc.listnodes()['nodes'] assert set([n['nodeid'] for n in nodes]) == set([l1.info['id'], l2.info['id']]) # Test listnodes with an arg, while we're here. n1 = l1.rpc.listnodes(l1.info['id'])['nodes'][0] n2 = l1.rpc.listnodes(l2.info['id'])['nodes'][0] assert n1['nodeid'] == l1.info['id'] assert n2['nodeid'] == l2.info['id'] # Might not have seen other node-announce yet. assert n1['alias'].startswith('JUNIORBEAM') assert n1['color'] == '0266e4' if 'alias' not in n2: assert 'color' not in n2 assert 'addresses' not in n2 else: assert n2['alias'].startswith('SILENTARTIST') assert n2['color'] == '022d22' assert [c['active'] for c in l1.rpc.listchannels()['channels']] == [True, True] assert [c['public'] for c in l1.rpc.listchannels()['channels']] == [True, True] assert [c['active'] for c in l2.rpc.listchannels()['channels']] == [True, True] assert [c['public'] for c in l2.rpc.listchannels()['channels']] == [True, True]