コード例 #1
0
def sync_gossip(nodes, scids):
    node = nodes[0]
    nodes = nodes[1:]
    for scid in scids:
        for n in nodes:
            wait_for(lambda: node.rpc.listchannels(scid) == n.rpc.listchannels(
                scid))
コード例 #2
0
    def start(self) -> None:
        self.proc = subprocess.Popen([
            '{}/lightningd/lightningd'.format(
                LIGHTNING_SRC), '--lightning-dir={}'.format(
                    self.lightning_dir), '--funding-confirms=3',
            '--dev-force-privkey=0000000000000000000000000000000000000000000000000000000000000001',
            '--dev-force-bip32-seed=0000000000000000000000000000000000000000000000000000000000000001',
            '--dev-force-channel-secrets=0000000000000000000000000000000000000000000000000000000000000010/0000000000000000000000000000000000000000000000000000000000000011/0000000000000000000000000000000000000000000000000000000000000012/0000000000000000000000000000000000000000000000000000000000000013/0000000000000000000000000000000000000000000000000000000000000014/FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF',
            '--dev-bitcoind-poll=1', '--dev-fast-gossip',
            '--dev-no-htlc-timeout', '--bind-addr=127.0.0.1:{}'.format(
                self.lightning_port), '--network=regtest',
            '--bitcoin-rpcuser=rpcuser', '--bitcoin-rpcpassword=rpcpass',
            '--bitcoin-rpcport={}'.format(
                self.bitcoind.port), '--log-level=debug', '--log-file=log'
        ] + self.startup_flags)
        self.rpc = pyln.client.LightningRpc(
            os.path.join(self.lightning_dir, "regtest", "lightning-rpc"))

        def node_ready(rpc: pyln.client.LightningRpc) -> bool:
            try:
                rpc.getinfo()
                return True
            except Exception:
                return False

        wait_for(lambda: node_ready(self.rpc))

        # Make sure that we see any funds that come to our wallet
        for i in range(5):
            self.rpc.newaddr()
コード例 #3
0
ファイル: test_datastore.py プロジェクト: svewa/plugins
def test_upgrade(node_factory):
    l1 = node_factory.get_node()

    datastore = shelve.open(os.path.join(l1.daemon.lightning_dir, 'regtest', 'datastore.dat'), 'c')
    datastore['foo'] = b'foodata'
    datastore['bar'] = b'bardata'
    datastore.close()

    # This "fails" because it unloads itself.
    try:
        l1.rpc.plugin_start(plugin_path)
    except RpcError:
        pass

    # There's no upgrade if there's a real datastore.
    if l1.daemon.is_in_log('there is a real datastore command'):
        return

    l1.daemon.wait_for_log('Upgrading store to have generation numbers')
    wait_for(lambda: not os.path.exists(os.path.join(l1.daemon.lightning_dir,
                                                     'regtest',
                                                     'datastore.dat')))

    vals = l1.rpc.listdatastore()['datastore']
    assert vals == [{'key': ['bar'],
                     'generation': 0,
                     'hex': b'bardata'.hex(),
                     'string': 'bardata'},
                    {'key': ['foo'],
                     'generation': 0,
                     'hex': b'foodata'.hex(),
                     'string': 'foodata'}]
コード例 #4
0
    def add_blocks(self, event: Event, txs: List[str], n: int) -> None:
        for tx in txs:
            self.bitcoind.rpc.sendrawtransaction(tx)
        self.bitcoind.rpc.generatetoaddress(n,
                                            self.bitcoind.rpc.getnewaddress())

        wait_for(
            lambda: self.rpc.getinfo()['blockheight'] == self.getblockheight())
コード例 #5
0
def test_grpc_no_auto_start(node_factory):
    """Ensure that we do not start cln-grpc unless a port is configured.
    """
    l1 = node_factory.get_node()

    wait_for(lambda: [
        p for p in l1.rpc.plugin('list')['plugins'] if 'cln-grpc' in p['name']
    ] == [])
    assert l1.daemon.is_in_log(
        r'plugin-cln-grpc: Killing plugin: disabled itself at init')
コード例 #6
0
ファイル: test_rebalance.py プロジェクト: gallizoltan/plugins
def test_rebalance_all(node_factory, bitcoind):
    l1, l2, l3 = node_factory.line_graph(3, opts=plugin_opt)
    nodes = [l1, l2, l3]

    # check we get an error if theres just one channel
    result = l1.rpc.rebalanceall()
    assert result[
        'message'] == 'Error: Not enough open channels to rebalance anything'

    # now we add another 100% outgoing liquidity to l1 which does not help
    l4 = node_factory.get_node()
    l1.connect(l4)
    l1.fundchannel(l4)

    # test this is still not possible
    result = l1.rpc.rebalanceall()
    assert result[
        'message'] == 'Error: Not enough liquidity to rebalance anything'

    # remove l4 it does not distort further testing
    l1.rpc.close(l1.get_channel_scid(l4))

    # now we form a circle so we can do actually rebalanceall
    l3.connect(l1)
    l3.fundchannel(l1)

    # get scids
    scid12 = l1.get_channel_scid(l2)
    scid23 = l2.get_channel_scid(l3)
    scid31 = l3.get_channel_scid(l1)
    scids = [scid12, scid23, scid31]

    # wait for each others gossip
    bitcoind.generate_block(6)
    wait_for_all_active(nodes, scids)

    # check the rebalanceall starts
    result = l1.rpc.rebalanceall(feeratio=5.0)  # we need high fees to work
    assert result['message'].startswith('Rebalance started')
    l1.daemon.wait_for_log(f"Try to rebalance: {scid12} -> {scid31}")
    wait_for(lambda: not l1.rpc.rebalanceall()['message'].startswith(
        "Rebalance is already running"))
    result = l1.rpc.rebalancestop()
    assert result['message'].startswith("Automatic rebalance finished")

    # wait until listpeers is up2date
    wait_for_all_htlcs(nodes)

    # check that channels are now balanced
    c12 = l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'][0]
    c13 = l1.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0]
    assert abs(0.5 - (Millisatoshi(c12['to_us_msat']) /
                      Millisatoshi(c12['total_msat']))) < 0.01
    assert abs(0.5 - (Millisatoshi(c13['to_us_msat']) /
                      Millisatoshi(c13['total_msat']))) < 0.01
コード例 #7
0
    def expect_tx(self, event: Event, txid: str) -> None:
        # Ah bitcoin endianness...
        revtxid = bitcoin.core.lx(txid).hex()

        # This txid should appear in the mempool.
        try:
            wait_for(lambda: revtxid in self.bitcoind.rpc.getrawmempool())
        except ValueError:
            raise EventError(
                event, "Did not broadcast the txid {}, just {}".format(
                    revtxid, [(txid, self.bitcoind.rpc.getrawtransaction(txid))
                              for txid in self.bitcoind.rpc.getrawmempool()]))
コード例 #8
0
def test_sendmsg_retry(node_factory, executor):
    """Fail a sendmsg using a cheap route, and check that it retries.

    ```dot
    digraph {
      l1 -> l2;
      l2 -> l3;
      l3 -> l4 [label = "fee-base=100'000"];
      l2 -> l5;
      l5 -> l4 [label = "fee-base=normal"];
    }
    ```

    By having a huge fee on the l3 -> l4 edge we force the initial attempt to
    go through l1 -> l2 -> l5 -> l4, which should fail since l5 is offline (l1
    should still be unaware about this).

    """
    opts = [{'plugin': plugin}, {}, {'fee-base': 10000}, {'plugin': plugin}]
    l1, l2, l3, l4 = node_factory.line_graph(4, opts=opts)
    l5 = node_factory.get_node()

    l2.openchannel(l5, 10**6)
    l5.openchannel(l4, 10**6)

    def gossip_synced(nodes):
        for a, b in zip(nodes[:-1], nodes[1:]):
            if a.rpc.listchannels() != b.rpc.listchannels():
                return False
        return True

    wait_for(lambda: [c['active'] for c in l1.rpc.listchannels()['channels']]
             == [True] * 10)

    # Now stop l5 so the first attempt will fail.
    l5.stop()

    executor.submit(l4.rpc.recvmsg)
    send = executor.submit(l1.rpc.sendmsg, l4.info['id'], "Hello world!")

    # Just making sure our view didn't change since we initiated the attempt
    assert ([c['active']
             for c in l1.rpc.listchannels()['channels']] == [True] * 10)
    pprint(l1.rpc.listchannels())

    l1.daemon.wait_for_log(r'Retrying delivery')

    sres = send.result(10)
    assert (sres['attempt'] == 2)
    pprint(sres)

    l4.rpc.recvmsg(last_id=-1)
コード例 #9
0
def test_grpc_no_auto_start(node_factory):
    """Ensure that we do not start cln-grpc unless a port is configured.
    """
    bin_path = Path.cwd() / "target" / "debug" / "cln-grpc"
    l1, = node_factory.get_nodes(1, opts={
        "plugin": str(bin_path),
    })

    wait_for(lambda: [
        p for p in l1.rpc.plugin('list')['plugins'] if 'cln-grpc' in p['name']
    ] == [])
    assert l1.daemon.is_in_log(
        r'plugin-cln-grpc: Killing plugin: exited during normal operation')
コード例 #10
0
def test_summary_peer_thread(node_factory):
    # Set a low PeerThread interval so we can test quickly.
    opts = {'summary-availability-interval': 0.5}
    opts.update(pluginopt)
    l1, l2 = node_factory.line_graph(2, opts=opts)
    l2id = l2.info['id']

    # when
    s1 = l1.rpc.summary()
    l2.stop()  # we stop l2 and wait for l1 to see that
    wait_for(lambda: l1.rpc.listpeers(l2id)['peers'][0]['connected'] is False)
    l1.daemon.logsearch_start = len(l1.daemon.logs)
    l1.daemon.wait_for_log(r".*availability persisted and synced.*")
    s2 = l1.rpc.summary()

    # then
    avail1 = int(re.search(' ([0-9]*)% ', s1['channels'][2]).group(1))
    avail2 = int(re.search(' ([0-9]*)% ', s2['channels'][2]).group(1))
    assert (avail1 == 100)
    assert (avail2 > 0 and avail2 < avail1)
コード例 #11
0
ファイル: test_chat.py プロジェクト: laanwj/plugins
def test_msg_and_keysend(node_factory, executor):
    opts = [{'plugin': plugin}, {}, {'plugin': plugin}]
    l1, l2, l3 = node_factory.line_graph(3, wait_for_announce=True, opts=opts)
    amt = 10000

    # Check that l3 does not have funds initially
    assert(l3.rpc.listpeers()['peers'][0]['channels'][0]['msatoshi_to_us'] == 0)

    l1.rpc.sendmsg(l3.info['id'], "Hello world!", amt)
    m = l3.rpc.recvmsg(last_id=-1)

    assert(m['sender'] == l1.info['id'])
    assert(m['verified'] is True)
    p = m['payment']
    assert(p is not None)
    assert(p['payment_key'] is not None)
    assert(p['amount'] == '10000msat')

    # Check that l3 actually got the funds I sent it
    wait_for(lambda: l3.rpc.listpeers()['peers'][0]['channels'][0]['msatoshi_to_us'] == amt)
コード例 #12
0
def test_feeadjuster_adjusts(node_factory):
    """
    A rather simple network:

            A                   B
    l1  <========>   l2   <=========>  l3

    l2 will adjust its configuration-set base and proportional fees for
    channels A and B as l1 and l3 exchange payments.
    """
    base_fee = 5000
    ppm_fee = 300
    l2_opts = {
        "fee-base": base_fee,
        "fee-per-satoshi": ppm_fee,
        "plugin": plugin_path,
        "feeadjuster-deactivate-fuzz": None,
    }
    l1, l2, l3 = node_factory.line_graph(3,
                                         opts=[{}, l2_opts, {}],
                                         wait_for_announce=True)

    chan_A = l2.rpc.listpeers(l1.info["id"])["peers"][0]["channels"][0]
    chan_B = l2.rpc.listpeers(l3.info["id"])["peers"][0]["channels"][0]
    scid_A = chan_A["short_channel_id"]
    scid_B = chan_B["short_channel_id"]
    nodes = [l1, l2, l3]
    scids = [scid_A, scid_B]

    # Fees don't get updated until there is a forwarding event!
    assert all(
        [get_chan_fees(l2, scid) == (base_fee, ppm_fee) for scid in scids])

    chan_total = int(chan_A["total_msat"])
    assert chan_total == int(chan_B["total_msat"])

    # The first payment will trigger fee adjustment, no matter its value
    amount = int(chan_total * 0.04)
    pay(l1, l3, amount)
    wait_for(lambda: all(
        [get_chan_fees(l2, scid) != (base_fee, ppm_fee) for scid in scids]))

    # Send most of the balance to the other side..
    amount = int(chan_total * 0.8)
    pay(l1, l3, amount)
    l2.daemon.wait_for_logs([
        f'Adjusted fees of {scid_A} with a ratio of 0.2',
        f'Adjusted fees of {scid_B} with a ratio of 3.'
    ])

    # ..And back
    sync_gossip(nodes, scids)
    pay(l3, l1, amount)
    l2.daemon.wait_for_logs([
        f'Adjusted fees of {scid_A} with a ratio of 6.',
        f'Adjusted fees of {scid_B} with a ratio of 0.1'
    ])

    # Sending a payment worth 3% of the channel balance should not trigger
    # fee adjustment
    sync_gossip(nodes, scids)
    fees_before = [get_chan_fees(l2, scid) for scid in [scid_A, scid_B]]
    amount = int(chan_total * 0.03)
    pay(l1, l3, amount)
    sync_gossip(nodes, scids)
    assert fees_before == [get_chan_fees(l2, scid) for scid in scids]

    # But sending another 3%-worth payment does trigger adjustment (total sent
    # since last adjustment is >5%)
    pay(l1, l3, amount)
    l2.daemon.wait_for_logs([
        f'Adjusted fees of {scid_A} with a ratio of 4.',
        f'Adjusted fees of {scid_B} with a ratio of 0.2'
    ])
コード例 #13
0
def wait_for_not_fees(l, scids, fees):
    for scid in scids:
        wait_for(lambda: not get_chan_fees(l, scid) == fees)
コード例 #14
0
def test_rebalance_failure(node_factory):
    """Same setup as the first test :

    l1 ---- l2 ---- l3 ----- l4
              |    /
              |   /
              |  /
               l5

    We now test failures (l5 rejects HTLCs, l3 takes too long to resolve it).
    """
    # First, the "no route left" case.
    opts = [{}, {
        'plugin': plugin,
        'jitrebalance-try-timeout': 3
    }, {}, {}, {
        'plugin': reject_plugin
    }]
    l1, l2, l3, l4, l5 = node_factory.get_nodes(5, opts=opts)
    amt = 10**7

    # Open the channels
    channels = [(l1, l2), (l3, l2), (l3, l4), (l2, l5), (l5, l3)]
    for src, dst in channels:
        src.openchannel(dst, capacity=10**6)

    # Drain (l2, l3) so that a larger payment fails later on
    chan = l2.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0]

    # Send 9 million millisatoshis + reserve + a tiny fee allowance from l3 to
    # l2 for the actual payment
    inv = l2.rpc.invoice(
        chan['our_channel_reserve_satoshis'] * 1000 + 9000000 + 100,
        "imbalance", "imbalance")
    time.sleep(1)
    l3.rpc.pay(inv['bolt11'])

    def no_pending_htlcs():
        peer = l2.rpc.listpeers(l3.info['id'])['peers'][0]
        return peer['channels'][0]['htlcs'] == []

    wait_for(no_pending_htlcs)

    chan = l2.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0]
    assert (chan['spendable_msatoshi'] < amt)

    # Get (l2, l5) so we can exclude it when routing from l1 to l4
    peer = l2.rpc.listpeers(l5.info['id'])['peers'][0]
    scid = peer['channels'][0]['short_channel_id']

    # The actual invoice that l1 will attempt to pay to l4, and that will be
    # larger than the current capacity of (l2, l3) so it triggers a
    # rebalancing.
    inv = l4.rpc.invoice(amt, "test", "test")

    # Now wait for gossip to settle and l1 to learn the topology so it can
    # then route the payment. We do this now since we already did what we
    # could without this info
    wait_for(
        lambda: len(l1.rpc.listchannels()['channels']) == 2 * len(channels))

    route = l1.rpc.getroute(node_id=l4.info['id'],
                            msatoshi=amt,
                            riskfactor=1,
                            exclude=[scid + '/0', scid + '/1'])['route']

    # This will exclude [l5, l3] and fail as there is no route left
    l1.rpc.sendpay(route, inv['payment_hash'])
    with pytest.raises(RpcError, match='WIRE_TEMPORARY_CHANNEL_FAILURE'):
        l1.rpc.waitsendpay(inv['payment_hash'])
    assert l2.daemon.is_in_log('Could not get a route, no remaining one?')
    l5.rpc.plugin_stop(reject_plugin)

    # Now test the timeout on number of attempts
    l3.rpc.plugin_start(hold_plugin)
    l1.rpc.sendpay(route, inv['payment_hash'])
    # l3 will hold on the HTLC, and at the time it rejects it, l2 won't try
    # other routes as it exceeded its timeout
    with pytest.raises(RpcError, match='WIRE_TEMPORARY_CHANNEL_FAILURE'):
        l1.rpc.waitsendpay(inv['payment_hash'])
    assert l2.daemon.is_in_log('Timed out while trying to rebalance')
コード例 #15
0
def test_simple_rebalance(node_factory):
    """Simple rebalance that routes along a cycle to enable the original payment

    l1 ---- l2 ---- l3 ----- l4
              |    /
              |   /
              |  /
               l5

    We are going to drain the channel (l2, l3) of most of its funds and then
    ask l1 to route through [l1, l2, l3, l4]. Under normal circumstances
    that'd fail since (l2, l3) doesn't have sufficient funds. l2 however will
    attempt to rebalance (l2,l3) using a circular route (l2, l5, l3, l2) to
    get the required funds back.

    """
    print(plugin)
    opts = [{}, {'plugin': plugin}, {}, {}, {}]
    l1, l2, l3, l4, l5 = node_factory.get_nodes(5, opts=opts)
    amt = 10**7

    # Open the channels
    channels = [(l1, l2), (l3, l2), (l3, l4), (l2, l5), (l5, l3)]
    for src, dst in channels:
        src.openchannel(dst, capacity=10**6)

    # Drain (l2, l3) so that a larger payment fails later on
    chan = l2.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0]

    # Send 9 million millisatoshis + reserve + a tiny fee allowance from l3 to
    # l2 for the actual payment
    inv = l2.rpc.invoice(
        chan['our_channel_reserve_satoshis'] * 1000 + 9000000 + 100,
        "imbalance", "imbalance")
    time.sleep(1)
    l3.rpc.pay(inv['bolt11'])

    def no_pending_htlcs():
        peer = l2.rpc.listpeers(l3.info['id'])['peers'][0]
        return peer['channels'][0]['htlcs'] == []

    wait_for(no_pending_htlcs)

    chan = l2.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0]
    assert (chan['spendable_msatoshi'] < amt)

    # Get (l2, l5) so we can exclude it when routing from l1 to l4
    peer = l2.rpc.listpeers(l5.info['id'])['peers'][0]
    scid = peer['channels'][0]['short_channel_id']

    # The actual invoice that l1 will attempt to pay to l4, and that will be
    # larger than the current capacity of (l2, l3) so it triggers a
    # rebalancing.
    inv = l4.rpc.invoice(amt, "test", "test")

    # Now wait for gossip to settle and l1 to learn the topology so it can
    # then route the payment. We do this now since we already did what we
    # could without this info
    wait_for(
        lambda: len(l1.rpc.listchannels()['channels']) == 2 * len(channels))

    route = l1.rpc.getroute(node_id=l4.info['id'],
                            msatoshi=amt,
                            riskfactor=1,
                            exclude=[scid + '/0', scid + '/1'])['route']

    # This will succeed with l2 doing a rebalancing just-in-time !
    l1.rpc.sendpay(route, inv['payment_hash'])
    assert l1.rpc.waitsendpay(inv['payment_hash'])['status'] == 'complete'
    assert l2.daemon.is_in_log('Succesfully re-filled outgoing capacity')
コード例 #16
0
def wait_for_grpc_start(node):
    """This can happen before "public key" which start() swallows"""
    wait_for(lambda: node.daemon.is_in_log(r'serving grpc on 0.0.0.0:'))