コード例 #1
0
class LightningNode(object):
    def __init__(self,
                 node_id,
                 lightning_dir,
                 bitcoind,
                 executor,
                 may_fail=False,
                 may_reconnect=False,
                 allow_broken_log=False,
                 allow_bad_gossip=False,
                 db=None,
                 port=None,
                 disconnect=None,
                 random_hsm=None,
                 options=None,
                 **kwargs):
        self.bitcoin = bitcoind
        self.executor = executor
        self.may_fail = may_fail
        self.may_reconnect = may_reconnect
        self.allow_broken_log = allow_broken_log
        self.allow_bad_gossip = allow_bad_gossip
        self.db = db

        # Assume successful exit
        self.rc = 0

        socket_path = os.path.join(lightning_dir, TEST_NETWORK,
                                   "lightning-rpc").format(node_id)
        self.rpc = LightningRpc(socket_path, self.executor)

        self.daemon = LightningD(lightning_dir,
                                 bitcoindproxy=bitcoind.get_proxy(),
                                 port=port,
                                 random_hsm=random_hsm,
                                 node_id=node_id)
        # If we have a disconnect string, dump it to a file for daemon.
        if disconnect:
            self.daemon.disconnect_file = os.path.join(lightning_dir,
                                                       TEST_NETWORK,
                                                       "dev_disconnect")
            with open(self.daemon.disconnect_file, "w") as f:
                f.write("\n".join(disconnect))
            self.daemon.opts["dev-disconnect"] = "dev_disconnect"
        if DEVELOPER:
            self.daemon.opts["dev-fail-on-subdaemon-fail"] = None
            self.daemon.env["LIGHTNINGD_DEV_MEMLEAK"] = "1"
            if os.getenv("DEBUG_SUBD"):
                self.daemon.opts["dev-debugger"] = os.getenv("DEBUG_SUBD")
            if VALGRIND:
                self.daemon.env["LIGHTNINGD_DEV_NO_BACKTRACE"] = "1"
            if not may_reconnect:
                self.daemon.opts["dev-no-reconnect"] = None

        if options is not None:
            self.daemon.opts.update(options)
        dsn = db.get_dsn()
        if dsn is not None:
            self.daemon.opts['wallet'] = dsn
        if VALGRIND:
            self.daemon.cmd_prefix = [
                'valgrind', '-q', '--trace-children=yes',
                '--trace-children-skip=*python*,*bitcoin-cli*,*elements-cli*',
                '--error-exitcode=7',
                '--log-file={}/valgrind-errors.%p'.format(
                    self.daemon.lightning_dir)
            ]

    def connect(self, remote_node):
        self.rpc.connect(remote_node.info['id'], '127.0.0.1',
                         remote_node.daemon.port)

    def is_connected(self, remote_node):
        return remote_node.info['id'] in [
            p['id'] for p in self.rpc.listpeers()['peers']
        ]

    def openchannel(self,
                    remote_node,
                    capacity,
                    addrtype="p2sh-segwit",
                    confirm=True,
                    wait_for_announce=True,
                    connect=True):
        addr, wallettxid = self.fundwallet(10 * capacity, addrtype)

        if connect and not self.is_connected(remote_node):
            self.connect(remote_node)

        fundingtx = self.rpc.fundchannel(remote_node.info['id'], capacity)

        # Wait for the funding transaction to be in bitcoind's mempool
        wait_for(lambda: fundingtx['txid'] in self.bitcoin.rpc.getrawmempool())

        if confirm or wait_for_announce:
            self.bitcoin.generate_block(1)

        if wait_for_announce:
            self.bitcoin.generate_block(5)

        if confirm or wait_for_announce:
            self.daemon.wait_for_log(r'Funding tx {} depth'.format(
                fundingtx['txid']))
        return {
            'address': addr,
            'wallettxid': wallettxid,
            'fundingtx': fundingtx
        }

    def fundwallet(self, sats, addrtype="p2sh-segwit"):
        addr = self.rpc.newaddr(addrtype)[addrtype]
        txid = self.bitcoin.rpc.sendtoaddress(addr, sats / 10**8)
        self.bitcoin.generate_block(1)
        self.daemon.wait_for_log(
            'Owning output .* txid {} CONFIRMED'.format(txid))
        return addr, txid

    def getactivechannels(self):
        return [c for c in self.rpc.listchannels()['channels'] if c['active']]

    def db_query(self, query):
        return self.db.query(query)

    # Assumes node is stopped!
    def db_manip(self, query):
        db = sqlite3.connect(
            os.path.join(self.daemon.lightning_dir, TEST_NETWORK,
                         "lightningd.sqlite3"))
        db.row_factory = sqlite3.Row
        c = db.cursor()
        c.execute(query)
        db.commit()
        c.close()
        db.close()

    def is_synced_with_bitcoin(self, info=None):
        if info is None:
            info = self.rpc.getinfo()
        return 'warning_bitcoind_sync' not in info and 'warning_lightningd_sync' not in info

    def start(self, wait_for_bitcoind_sync=True):
        self.daemon.start()
        # Cache `getinfo`, we'll be using it a lot
        self.info = self.rpc.getinfo()
        # This shortcut is sufficient for our simple tests.
        self.port = self.info['binding'][0]['port']
        if wait_for_bitcoind_sync and not self.is_synced_with_bitcoin(
                self.info):
            wait_for(lambda: self.is_synced_with_bitcoin())

    def stop(self, timeout=10):
        """ Attempt to do a clean shutdown, but kill if it hangs
        """

        # Tell the daemon to stop
        try:
            # May fail if the process already died
            self.rpc.stop()
        except Exception:
            pass

        self.rc = self.daemon.wait(timeout)

        # If it did not stop be more insistent
        if self.rc is None:
            self.rc = self.daemon.stop()

        self.daemon.save_log()
        self.daemon.cleanup()

        if self.rc != 0 and not self.may_fail:
            raise ValueError("Node did not exit cleanly, rc={}".format(
                self.rc))
        else:
            return self.rc

    def restart(self, timeout=10, clean=True):
        """Stop and restart the lightning node.

        Keyword arguments:
        timeout: number of seconds to wait for a shutdown
        clean: whether to issue a `stop` RPC command before killing
        """
        if clean:
            self.stop(timeout)
        else:
            self.daemon.stop()

        self.start()

    def fund_channel(self,
                     l2,
                     amount,
                     wait_for_active=True,
                     announce_channel=True):

        # Give yourself some funds to work with
        addr = self.rpc.newaddr()['bech32']
        self.bitcoin.rpc.sendtoaddress(addr, (amount + 1000000) / 10**8)
        numfunds = len(self.rpc.listfunds()['outputs'])
        self.bitcoin.generate_block(1)
        wait_for(lambda: len(self.rpc.listfunds()['outputs']) > numfunds)

        # Now go ahead and open a channel
        num_tx = len(self.bitcoin.rpc.getrawmempool())
        tx = self.rpc.fundchannel(l2.info['id'],
                                  amount,
                                  announce=announce_channel)['tx']

        wait_for(lambda: len(self.bitcoin.rpc.getrawmempool()) == num_tx + 1)
        self.bitcoin.generate_block(1)

        # Hacky way to find our output.
        scid = "{}x1x{}".format(self.bitcoin.rpc.getblockcount(),
                                get_tx_p2wsh_outnum(self.bitcoin, tx, amount))

        if wait_for_active:
            # We wait until gossipd sees both local updates, as well as status NORMAL,
            # so it can definitely route through.
            self.daemon.wait_for_logs([
                r'update for channel {}/0 now ACTIVE'.format(scid),
                r'update for channel {}/1 now ACTIVE'.format(scid),
                'to CHANNELD_NORMAL'
            ])
            l2.daemon.wait_for_logs([
                r'update for channel {}/0 now ACTIVE'.format(scid),
                r'update for channel {}/1 now ACTIVE'.format(scid),
                'to CHANNELD_NORMAL'
            ])
        return scid

    def subd_pid(self, subd, peerid=None):
        """Get the process id of the given subdaemon, eg channeld or gossipd"""
        if peerid:
            ex = re.compile(r'{}-.*{}.*: pid ([0-9]*),'.format(peerid, subd))
        else:
            ex = re.compile('{}-.*: pid ([0-9]*),'.format(subd))
        # Make sure we get latest one if it's restarted!
        for l in reversed(self.daemon.logs):
            group = ex.search(l)
            if group:
                return group.group(1)
        raise ValueError("No daemon {} found".format(subd))

    def channel_state(self, other):
        """Return the state of the channel to the other node.

        Returns None if there is no such peer, or a channel hasn't been funded
        yet.

        """
        peers = self.rpc.listpeers(other.info['id'])['peers']
        if not peers or 'channels' not in peers[0]:
            return None
        channel = peers[0]['channels'][0]
        return channel['state']

    def get_channel_scid(self, other):
        """Get the short_channel_id for the channel to the other node.
        """
        peers = self.rpc.listpeers(other.info['id'])['peers']
        if not peers or 'channels' not in peers[0]:
            return None
        channel = peers[0]['channels'][0]
        return channel['short_channel_id']

    def is_channel_active(self, chanid):
        channels = self.rpc.listchannels(chanid)['channels']
        active = [(c['short_channel_id'], c['channel_flags']) for c in channels
                  if c['active']]
        return (chanid, 0) in active and (chanid, 1) in active

    def wait_for_channel_onchain(self, peerid):
        txid = only_one(
            only_one(self.rpc.listpeers(peerid)['peers'])
            ['channels'])['scratch_txid']
        wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())

    def wait_channel_active(self, chanid):
        wait_for(lambda: self.is_channel_active(chanid))

    # This waits until gossipd sees channel_update in both directions
    # (or for local channels, at least a local announcement)
    def wait_for_channel_updates(self, scids):
        # Could happen in any order...
        self.daemon.wait_for_logs([
            'Received channel_update for channel {}/0'.format(c) for c in scids
        ] + [
            'Received channel_update for channel {}/1'.format(c) for c in scids
        ])

    def wait_for_route(self, destination, timeout=30):
        """ Wait for a route to the destination to become available.
        """
        start_time = time.time()
        while time.time() < start_time + timeout:
            try:
                self.rpc.getroute(destination.info['id'], 1, 1)
                return True
            except Exception:
                time.sleep(1)
        if time.time() > start_time + timeout:
            raise ValueError(
                "Error waiting for a route to destination {}".format(
                    destination))

    def pay(self, dst, amt, label=None):
        if not label:
            label = ''.join(
                random.choice(string.ascii_letters + string.digits)
                for _ in range(20))

        rhash = dst.rpc.invoice(amt, label, label)['payment_hash']
        invoices = dst.rpc.listinvoices(label)['invoices']
        assert len(invoices) == 1 and invoices[0]['status'] == 'unpaid'

        routestep = {
            'msatoshi': amt,
            'id': dst.info['id'],
            'delay': 5,
            'channel': '1x1x1'
        }

        def wait_pay():
            # Up to 10 seconds for payment to succeed.
            start_time = time.time()
            while dst.rpc.listinvoices(
                    label)['invoices'][0]['status'] != 'paid':
                if time.time() > start_time + 10:
                    raise TimeoutError('Payment timed out')
                time.sleep(0.1)

        # sendpay is async now
        self.rpc.sendpay([routestep], rhash)
        # wait for sendpay to comply
        self.rpc.waitsendpay(rhash)

    # Note: this feeds through the smoother in update_feerate, so changing
    # it on a running daemon may not give expected result!
    def set_feerates(self, feerates, wait_for_effect=True):
        # (bitcoind returns bitcoin per kb, so these are * 4)

        def mock_estimatesmartfee(r):
            params = r['params']
            if params == [2, 'CONSERVATIVE']:
                feerate = feerates[0] * 4
            elif params == [4, 'ECONOMICAL']:
                feerate = feerates[1] * 4
            elif params == [100, 'ECONOMICAL']:
                feerate = feerates[2] * 4
            else:
                raise ValueError()
            return {
                'id': r['id'],
                'error': None,
                'result': {
                    'feerate': Decimal(feerate) / 10**8
                },
            }

        self.daemon.rpcproxy.mock_rpc('estimatesmartfee',
                                      mock_estimatesmartfee)

        # Technically, this waits until it's called, not until it's processed.
        # We wait until all three levels have been called.
        if wait_for_effect:
            wait_for(lambda: self.daemon.rpcproxy.mock_counts[
                'estimatesmartfee'] >= 3)

    def wait_for_onchaind_broadcast(self, name, resolve=None):
        """Wait for onchaind to drop tx name to resolve (if any)"""
        if resolve:
            r = self.daemon.wait_for_log(
                'Broadcasting {} .* to resolve {}'.format(name, resolve))
        else:
            r = self.daemon.wait_for_log(
                'Broadcasting {} .* to resolve '.format(name))

        rawtx = re.search(r'.* \(([0-9a-fA-F]*)\) ', r).group(1)
        txid = self.bitcoin.rpc.decoderawtransaction(rawtx, True)['txid']

        wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())

    def query_gossip(self, querytype, *args, filters=[]):
        """Generate a gossip query, feed it into this node and get responses
        in hex"""
        query = subprocess.run(['devtools/mkquery', querytype] +
                               [str(a) for a in args],
                               check=True,
                               timeout=TIMEOUT,
                               stdout=subprocess.PIPE).stdout.strip()
        out = subprocess.run([
            'devtools/gossipwith', '--timeout-after={}'.format(
                int(math.sqrt(TIMEOUT) + 1)), '{}@localhost:{}'.format(
                    self.info['id'], self.port), query
        ],
                             check=True,
                             timeout=TIMEOUT,
                             stdout=subprocess.PIPE).stdout

        def passes_filters(hmsg, filters):
            for f in filters:
                if hmsg.startswith(f):
                    return False
            return True

        msgs = []
        while len(out):
            length = struct.unpack('>H', out[0:2])[0]
            hmsg = out[2:2 + length].hex()
            if passes_filters(hmsg, filters):
                msgs.append(out[2:2 + length].hex())
            out = out[2 + length:]
        return msgs
コード例 #2
0
ファイル: utils.py プロジェクト: crowphale/lightning
class LightningNode(object):
    def __init__(self, node_id, lightning_dir, bitcoind, executor, valgrind, may_fail=False,
                 may_reconnect=False, allow_broken_log=False,
                 allow_bad_gossip=False, db=None, port=None, disconnect=None, random_hsm=None, options=None,
                 **kwargs):
        self.bitcoin = bitcoind
        self.executor = executor
        self.may_fail = may_fail
        self.may_reconnect = may_reconnect
        self.allow_broken_log = allow_broken_log
        self.allow_bad_gossip = allow_bad_gossip
        self.db = db

        # Assume successful exit
        self.rc = 0

        socket_path = os.path.join(lightning_dir, TEST_NETWORK, "lightning-rpc").format(node_id)
        self.rpc = LightningRpc(socket_path, self.executor)

        self.daemon = LightningD(
            lightning_dir, bitcoindproxy=bitcoind.get_proxy(),
            port=port, random_hsm=random_hsm, node_id=node_id
        )
        # If we have a disconnect string, dump it to a file for daemon.
        if disconnect:
            self.daemon.disconnect_file = os.path.join(lightning_dir, TEST_NETWORK, "dev_disconnect")
            with open(self.daemon.disconnect_file, "w") as f:
                f.write("\n".join(disconnect))
            self.daemon.opts["dev-disconnect"] = "dev_disconnect"
        if DEVELOPER:
            self.daemon.opts["dev-fail-on-subdaemon-fail"] = None
            # Don't run --version on every subdaemon if we're valgrinding and slow.
            if SLOW_MACHINE and VALGRIND:
                self.daemon.opts["dev-no-version-checks"] = None
            if os.getenv("DEBUG_SUBD"):
                self.daemon.opts["dev-debugger"] = os.getenv("DEBUG_SUBD")
            if valgrind:
                self.daemon.env["LIGHTNINGD_DEV_NO_BACKTRACE"] = "1"
            else:
                # Under valgrind, scanning can access uninitialized mem.
                self.daemon.env["LIGHTNINGD_DEV_MEMLEAK"] = "1"
            if not may_reconnect:
                self.daemon.opts["dev-no-reconnect"] = None

        if options is not None:
            self.daemon.opts.update(options)
        dsn = db.get_dsn()
        if dsn is not None:
            self.daemon.opts['wallet'] = dsn
        if valgrind:
            self.daemon.cmd_prefix = [
                'valgrind',
                '-q',
                '--trace-children=yes',
                '--trace-children-skip=*python*,*bitcoin-cli*,*elements-cli*',
                '--error-exitcode=7',
                '--log-file={}/valgrind-errors.%p'.format(self.daemon.lightning_dir)
            ]
            # Reduce precision of errors, speeding startup and reducing memory greatly:
            if SLOW_MACHINE:
                self.daemon.cmd_prefix += ['--read-inline-info=no']

    def connect(self, remote_node):
        self.rpc.connect(remote_node.info['id'], '127.0.0.1', remote_node.daemon.port)

    def is_connected(self, remote_node):
        return remote_node.info['id'] in [p['id'] for p in self.rpc.listpeers()['peers']]

    def openchannel(self, remote_node, capacity=FUNDAMOUNT, addrtype="p2sh-segwit", confirm=True, wait_for_announce=True, connect=True):
        addr, wallettxid = self.fundwallet(10 * capacity, addrtype)

        if connect and not self.is_connected(remote_node):
            self.connect(remote_node)

        fundingtx = self.rpc.fundchannel(remote_node.info['id'], capacity)

        # Wait for the funding transaction to be in bitcoind's mempool
        wait_for(lambda: fundingtx['txid'] in self.bitcoin.rpc.getrawmempool())

        if confirm or wait_for_announce:
            self.bitcoin.generate_block(1)

        if wait_for_announce:
            self.bitcoin.generate_block(5)

        if confirm or wait_for_announce:
            self.daemon.wait_for_log(
                r'Funding tx {} depth'.format(fundingtx['txid']))
        return {'address': addr, 'wallettxid': wallettxid, 'fundingtx': fundingtx}

    def fundwallet(self, sats, addrtype="p2sh-segwit"):
        addr = self.rpc.newaddr(addrtype)[addrtype]
        txid = self.bitcoin.rpc.sendtoaddress(addr, sats / 10**8)
        self.bitcoin.generate_block(1)
        self.daemon.wait_for_log('Owning output .* txid {} CONFIRMED'.format(txid))
        return addr, txid

    def fundbalancedchannel(self, remote_node, total_capacity, announce=True):
        '''
        Creates a perfectly-balanced channel, as all things should be.
        '''
        if isinstance(total_capacity, Millisatoshi):
            total_capacity = int(total_capacity.to_satoshi())
        else:
            total_capacity = int(total_capacity)

        self.fundwallet(total_capacity + 10000)
        self.rpc.connect(remote_node.info['id'], 'localhost', remote_node.port)

        # Make sure the fundchannel is confirmed.
        num_tx = len(self.bitcoin.rpc.getrawmempool())
        tx = self.rpc.fundchannel(remote_node.info['id'], total_capacity, feerate='slow', minconf=0, announce=announce, push_msat=Millisatoshi(total_capacity * 500))['tx']
        wait_for(lambda: len(self.bitcoin.rpc.getrawmempool()) == num_tx + 1)
        self.bitcoin.generate_block(1)

        # Generate the scid.
        # NOTE This assumes only the coinbase and the fundchannel is
        # confirmed in the block.
        return '{}x1x{}'.format(self.bitcoin.rpc.getblockcount(),
                                get_tx_p2wsh_outnum(self.bitcoin, tx, total_capacity))

    def getactivechannels(self):
        return [c for c in self.rpc.listchannels()['channels'] if c['active']]

    def db_query(self, query):
        return self.db.query(query)

    # Assumes node is stopped!
    def db_manip(self, query):
        db = sqlite3.connect(os.path.join(self.daemon.lightning_dir, TEST_NETWORK, "lightningd.sqlite3"))
        db.row_factory = sqlite3.Row
        c = db.cursor()
        c.execute(query)
        db.commit()
        c.close()
        db.close()

    def is_synced_with_bitcoin(self, info=None):
        if info is None:
            info = self.rpc.getinfo()
        return 'warning_bitcoind_sync' not in info and 'warning_lightningd_sync' not in info

    def start(self, wait_for_bitcoind_sync=True, stderr=None):
        self.daemon.start(stderr=stderr)
        # Cache `getinfo`, we'll be using it a lot
        self.info = self.rpc.getinfo()
        # This shortcut is sufficient for our simple tests.
        self.port = self.info['binding'][0]['port']
        if wait_for_bitcoind_sync and not self.is_synced_with_bitcoin(self.info):
            wait_for(lambda: self.is_synced_with_bitcoin())

    def stop(self, timeout=10):
        """ Attempt to do a clean shutdown, but kill if it hangs
        """

        # Tell the daemon to stop
        try:
            # May fail if the process already died
            self.rpc.stop()
        except Exception:
            pass

        self.rc = self.daemon.wait(timeout)

        # If it did not stop be more insistent
        if self.rc is None:
            self.rc = self.daemon.stop()

        self.daemon.save_log()
        self.daemon.cleanup()

        if self.rc != 0 and not self.may_fail:
            raise ValueError("Node did not exit cleanly, rc={}".format(self.rc))
        else:
            return self.rc

    def restart(self, timeout=10, clean=True):
        """Stop and restart the lightning node.

        Keyword arguments:
        timeout: number of seconds to wait for a shutdown
        clean: whether to issue a `stop` RPC command before killing
        """
        if clean:
            self.stop(timeout)
        else:
            self.daemon.stop()

        self.start()

    def fund_channel(self, l2, amount, wait_for_active=True, announce_channel=True):
        warnings.warn("LightningNode.fund_channel is deprecated in favor of "
                      "LightningNode.fundchannel", category=DeprecationWarning)
        return self.fundchannel(l2, amount, wait_for_active, announce_channel)

    def fundchannel(self, l2, amount=FUNDAMOUNT, wait_for_active=True,
                    announce_channel=True, **kwargs):
        # Give yourself some funds to work with
        addr = self.rpc.newaddr()['bech32']

        def has_funds_on_addr(addr):
            """Check if the given address has funds in the internal wallet.
            """
            outs = self.rpc.listfunds()['outputs']
            addrs = [o['address'] for o in outs]
            return addr in addrs

        # We should not have funds on that address yet, we just generated it.
        assert(not has_funds_on_addr(addr))

        self.bitcoin.rpc.sendtoaddress(addr, (amount + 1000000) / 10**8)
        self.bitcoin.generate_block(1)

        # Now we should.
        wait_for(lambda: has_funds_on_addr(addr))

        # Now go ahead and open a channel
        res = self.rpc.fundchannel(l2.info['id'], amount,
                                   announce=announce_channel,
                                   **kwargs)
        wait_for(lambda: res['txid'] in self.bitcoin.rpc.getrawmempool())
        self.bitcoin.generate_block(1)

        # Hacky way to find our output.
        scid = "{}x1x{}".format(self.bitcoin.rpc.getblockcount(),
                                get_tx_p2wsh_outnum(self.bitcoin, res['tx'], amount))

        if wait_for_active:
            self.wait_channel_active(scid)
            l2.wait_channel_active(scid)

        return scid, res

    def subd_pid(self, subd, peerid=None):
        """Get the process id of the given subdaemon, eg channeld or gossipd"""
        if peerid:
            ex = re.compile(r'{}-.*{}.*: pid ([0-9]*),'
                            .format(peerid, subd))
        else:
            ex = re.compile('{}-.*: pid ([0-9]*),'.format(subd))
        # Make sure we get latest one if it's restarted!
        for l in reversed(self.daemon.logs):
            group = ex.search(l)
            if group:
                return group.group(1)
        raise ValueError("No daemon {} found".format(subd))

    def channel_state(self, other):
        """Return the state of the channel to the other node.

        Returns None if there is no such peer, or a channel hasn't been funded
        yet.

        """
        peers = self.rpc.listpeers(other.info['id'])['peers']
        if not peers or 'channels' not in peers[0]:
            return None
        channel = peers[0]['channels'][0]
        return channel['state']

    def get_channel_scid(self, other):
        """Get the short_channel_id for the channel to the other node.
        """
        peers = self.rpc.listpeers(other.info['id'])['peers']
        if not peers or 'channels' not in peers[0]:
            return None
        channel = peers[0]['channels'][0]
        return channel['short_channel_id']

    def get_channel_id(self, other):
        """Get the channel_id for the channel to the other node.
        """
        peers = self.rpc.listpeers(other.info['id'])['peers']
        if not peers or 'channels' not in peers[0]:
            return None
        channel = peers[0]['channels'][0]
        return channel['channel_id']

    def is_channel_active(self, chanid):
        channels = self.rpc.listchannels(chanid)['channels']
        active = [(c['short_channel_id'], c['channel_flags']) for c in channels if c['active']]
        return (chanid, 0) in active and (chanid, 1) in active

    def wait_for_channel_onchain(self, peerid):
        txid = only_one(only_one(self.rpc.listpeers(peerid)['peers'])['channels'])['scratch_txid']
        wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())

    def wait_channel_active(self, chanid):
        wait_for(lambda: self.is_channel_active(chanid))

    # This waits until gossipd sees channel_update in both directions
    # (or for local channels, at least a local announcement)
    def wait_for_channel_updates(self, scids):
        # Could happen in any order...
        self.daemon.wait_for_logs(['Received channel_update for channel {}/0'.format(c)
                                   for c in scids]
                                  + ['Received channel_update for channel {}/1'.format(c)
                                     for c in scids])

    def wait_for_route(self, destination, timeout=30):
        """ Wait for a route to the destination to become available.
        """
        start_time = time.time()
        while time.time() < start_time + timeout:
            try:
                self.rpc.getroute(destination.info['id'], 1, 1)
                return True
            except Exception:
                time.sleep(1)
        if time.time() > start_time + timeout:
            raise ValueError("Error waiting for a route to destination {}".format(destination))

    # This helper waits for all HTLCs to settle
    def wait_for_htlcs(self):
        peers = self.rpc.listpeers()['peers']
        for p, peer in enumerate(peers):
            if 'channels' in peer:
                for c, channel in enumerate(peer['channels']):
                    if 'htlcs' in channel:
                        wait_for(lambda: len(self.rpc.listpeers()['peers'][p]['channels'][c]['htlcs']) == 0)

    # This sends money to a directly connected peer
    def pay(self, dst, amt, label=None):
        if not label:
            label = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20))

        # check we are connected
        dst_id = dst.info['id']
        assert len(self.rpc.listpeers(dst_id).get('peers')) == 1

        # make an invoice
        rhash = dst.rpc.invoice(amt, label, label)['payment_hash']
        invoices = dst.rpc.listinvoices(label)['invoices']
        assert len(invoices) == 1 and invoices[0]['status'] == 'unpaid'

        routestep = {
            'msatoshi': amt,
            'id': dst_id,
            'delay': 5,
            'channel': '1x1x1'  # note: can be bogus for 1-hop direct payments
        }

        # sendpay is async now
        self.rpc.sendpay([routestep], rhash)
        # wait for sendpay to comply
        result = self.rpc.waitsendpay(rhash)
        assert(result.get('status') == 'complete')

    # This helper sends all money to a peer until even 1 msat can't get through.
    def drain(self, peer):
        total = 0
        msat = 4294967295  # Max payment size in some configs
        while msat != 0:
            try:
                logging.debug("Drain step with size={}".format(msat))
                self.pay(peer, msat)
                total += msat
            except RpcError as e:
                logging.debug("Got an exception while draining channel: {}".format(e))
                msat //= 2
        logging.debug("Draining complete after sending a total of {}msats".format(total))
        return total

    # Note: this feeds through the smoother in update_feerate, so changing
    # it on a running daemon may not give expected result!
    def set_feerates(self, feerates, wait_for_effect=True):
        # (bitcoind returns bitcoin per kb, so these are * 4)

        def mock_estimatesmartfee(r):
            params = r['params']
            if params == [2, 'CONSERVATIVE']:
                feerate = feerates[0] * 4
            elif params == [3, 'CONSERVATIVE']:
                feerate = feerates[1] * 4
            elif params == [4, 'ECONOMICAL']:
                feerate = feerates[2] * 4
            elif params == [100, 'ECONOMICAL']:
                feerate = feerates[3] * 4
            else:
                warnings.warn("Don't have a feerate set for {}/{}.".format(
                    params[0], params[1],
                ))
                feerate = 42
            return {
                'id': r['id'],
                'error': None,
                'result': {
                    'feerate': Decimal(feerate) / 10**8
                },
            }
        self.daemon.rpcproxy.mock_rpc('estimatesmartfee', mock_estimatesmartfee)

        # Technically, this waits until it's called, not until it's processed.
        # We wait until all three levels have been called.
        if wait_for_effect:
            wait_for(lambda:
                     self.daemon.rpcproxy.mock_counts['estimatesmartfee'] >= 4)

    # force new feerates by restarting and thus skipping slow smoothed process
    # Note: testnode must be created with: opts={'may_reconnect': True}
    def force_feerates(self, rate):
        assert(self.may_reconnect)
        self.set_feerates([rate] * 4, False)
        self.restart()
        self.daemon.wait_for_log('peer_out WIRE_UPDATE_FEE')
        assert(self.rpc.feerates('perkw')['perkw']['opening'] == rate)

    def wait_for_onchaind_broadcast(self, name, resolve=None):
        """Wait for onchaind to drop tx name to resolve (if any)"""
        if resolve:
            r = self.daemon.wait_for_log('Broadcasting {} .* to resolve {}'
                                         .format(name, resolve))
        else:
            r = self.daemon.wait_for_log('Broadcasting {} .* to resolve '
                                         .format(name))

        rawtx = re.search(r'.* \(([0-9a-fA-F]*)\) ', r).group(1)
        txid = self.bitcoin.rpc.decoderawtransaction(rawtx, True)['txid']

        wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())

    def query_gossip(self, querytype, *args, filters=[]):
        """Generate a gossip query, feed it into this node and get responses
        in hex"""
        query = subprocess.run(['devtools/mkquery',
                                querytype] + [str(a) for a in args],
                               check=True,
                               timeout=TIMEOUT,
                               stdout=subprocess.PIPE).stdout.strip()
        out = subprocess.run(['devtools/gossipwith',
                              '--timeout-after={}'.format(int(math.sqrt(TIMEOUT) + 1)),
                              '{}@localhost:{}'.format(self.info['id'],
                                                       self.port),
                              query],
                             check=True,
                             timeout=TIMEOUT, stdout=subprocess.PIPE).stdout

        def passes_filters(hmsg, filters):
            for f in filters:
                if hmsg.startswith(f):
                    return False
            return True

        msgs = []
        while len(out):
            length = struct.unpack('>H', out[0:2])[0]
            hmsg = out[2:2 + length].hex()
            if passes_filters(hmsg, filters):
                msgs.append(out[2:2 + length].hex())
            out = out[2 + length:]
        return msgs