Esempio n. 1
0
    def __init__(self, node_id, lightning_dir, bitcoind, executor, valgrind, may_fail=False,
                 may_reconnect=False, allow_broken_log=False,
                 allow_bad_gossip=False, db=None, port=None, disconnect=None, random_hsm=None, options=None,
                 **kwargs):
        self.bitcoin = bitcoind
        self.executor = executor
        self.may_fail = may_fail
        self.may_reconnect = may_reconnect
        self.allow_broken_log = allow_broken_log
        self.allow_bad_gossip = allow_bad_gossip
        self.db = db

        # Assume successful exit
        self.rc = 0

        socket_path = os.path.join(lightning_dir, TEST_NETWORK, "lightning-rpc").format(node_id)
        self.rpc = LightningRpc(socket_path, self.executor)

        self.daemon = LightningD(
            lightning_dir, bitcoindproxy=bitcoind.get_proxy(),
            port=port, random_hsm=random_hsm, node_id=node_id
        )
        # If we have a disconnect string, dump it to a file for daemon.
        if disconnect:
            self.daemon.disconnect_file = os.path.join(lightning_dir, TEST_NETWORK, "dev_disconnect")
            with open(self.daemon.disconnect_file, "w") as f:
                f.write("\n".join(disconnect))
            self.daemon.opts["dev-disconnect"] = "dev_disconnect"
        if DEVELOPER:
            self.daemon.opts["dev-fail-on-subdaemon-fail"] = None
            # Don't run --version on every subdaemon if we're valgrinding and slow.
            if SLOW_MACHINE and VALGRIND:
                self.daemon.opts["dev-no-version-checks"] = None
            if os.getenv("DEBUG_SUBD"):
                self.daemon.opts["dev-debugger"] = os.getenv("DEBUG_SUBD")
            if valgrind:
                self.daemon.env["LIGHTNINGD_DEV_NO_BACKTRACE"] = "1"
            else:
                # Under valgrind, scanning can access uninitialized mem.
                self.daemon.env["LIGHTNINGD_DEV_MEMLEAK"] = "1"
            if not may_reconnect:
                self.daemon.opts["dev-no-reconnect"] = None

        if options is not None:
            self.daemon.opts.update(options)
        dsn = db.get_dsn()
        if dsn is not None:
            self.daemon.opts['wallet'] = dsn
        if valgrind:
            self.daemon.cmd_prefix = [
                'valgrind',
                '-q',
                '--trace-children=yes',
                '--trace-children-skip=*python*,*bitcoin-cli*,*elements-cli*',
                '--error-exitcode=7',
                '--log-file={}/valgrind-errors.%p'.format(self.daemon.lightning_dir)
            ]
            # Reduce precision of errors, speeding startup and reducing memory greatly:
            if SLOW_MACHINE:
                self.daemon.cmd_prefix += ['--read-inline-info=no']
Esempio n. 2
0
def main():
    for node in nodes:
        ln_dir = f"/tmp/l{node + 1}-regtest/regtest"
        ln = LightningRpc(f"{ln_dir}/lightning-rpc")
        pubkey = ln.getinfo()["id"]
        logger.info(f"Node {node +1}:")
        logger.info(f"privkey: {get_privkey(ln_dir, pubkey)}")
        logger.info(f"pubkey:  {pubkey}")
Esempio n. 3
0
    def __init__(self, node_id, lightning_dir, bitcoind, executor, may_fail=False,
                 may_reconnect=False, allow_broken_log=False,
                 allow_bad_gossip=False, db=None, port=None, disconnect=None, random_hsm=None, log_all_io=None, options=None, **kwargs):
        self.bitcoin = bitcoind
        self.executor = executor
        self.may_fail = may_fail
        self.may_reconnect = may_reconnect
        self.allow_broken_log = allow_broken_log
        self.allow_bad_gossip = allow_bad_gossip
        self.db = db

        socket_path = os.path.join(lightning_dir, "lightning-rpc").format(node_id)
        self.rpc = LightningRpc(socket_path, self.executor)

        self.daemon = LightningD(
            lightning_dir, bitcoindproxy=bitcoind.get_proxy(),
            port=port, random_hsm=random_hsm, node_id=node_id
        )
        # If we have a disconnect string, dump it to a file for daemon.
        if disconnect:
            self.daemon.disconnect_file = os.path.join(lightning_dir, "dev_disconnect")
            with open(self.daemon.disconnect_file, "w") as f:
                f.write("\n".join(disconnect))
            self.daemon.opts["dev-disconnect"] = "dev_disconnect"
        if log_all_io:
            assert DEVELOPER
            self.daemon.env["LIGHTNINGD_DEV_LOG_IO"] = "1"
            self.daemon.opts["log-level"] = "io"
        if DEVELOPER:
            self.daemon.opts["dev-fail-on-subdaemon-fail"] = None
            self.daemon.env["LIGHTNINGD_DEV_MEMLEAK"] = "1"
            if os.getenv("DEBUG_SUBD"):
                self.daemon.opts["dev-debugger"] = os.getenv("DEBUG_SUBD")
            if VALGRIND:
                self.daemon.env["LIGHTNINGD_DEV_NO_BACKTRACE"] = "1"
            if not may_reconnect:
                self.daemon.opts["dev-no-reconnect"] = None

        if options is not None:
            self.daemon.opts.update(options)
        dsn = db.get_dsn()
        if dsn is not None:
            self.daemon.opts['wallet'] = dsn
        if VALGRIND:
            self.daemon.cmd_prefix = [
                'valgrind',
                '-q',
                '--trace-children=yes',
                '--trace-children-skip=*python*,*bitcoin-cli*,*elements-cli*',
                '--error-exitcode=7',
                '--log-file={}/valgrind-errors.%p'.format(self.daemon.lightning_dir)
            ]
Esempio n. 4
0
def main(connect, fund):
    rpc = LightningRpc("/home/bits/.lightning/testnet/lightning-rpc")
    channels = rpc.listchannels()['channels']
    nodes = rpc.listnodes()['nodes']

    pruned = prune_nodes(nodes)
    node_map = build_node_map(pruned)
    merge_channels(node_map, channels)
    eligible = get_eligible(node_map, 2)

    if connect:
        connected = connect_to(rpc, eligible)
        print("connected to {} of {} eligible nodes".format(connected, len(eligible)))

    if fund:
        fund_connected(rpc)
def ln_init_global(chains):
    to_return = {}
    for chain_name in chains:
        to_return[chain_name] = {}
        for user_name in chains[chain_name]['users']:
            to_return[chain_name][user_name] = LightningRpc('/wd/clightning_datadir_%s/%s/lightning-rpc' % (
                user_name, chain_name))
    return to_return
Esempio n. 6
0
class LightningDaemon(object):
    def __init__(self, daemon_rpc):
        self.rpc = LightningRpc(daemon_rpc)

    def invoice_c_lightning(self, msatoshi, label, description):
        expiry = INVOICE_EXPIRY + random.randint(3, 9)
        try:
            result = self.rpc.invoice(msatoshi, label, description,
                                      expiry=expiry)
            logging.info("invoicing daemon. got: %s" %
                         json.dumps(result, indent=1, sort_keys=True))
            return result
        except:
            return None

    def get_c_lightning_invoices(self):
        try:
            return self.rpc.listinvoices()
        except:
            return None
        #logging.info(json.dumps(result, indent=1, sort_keys=True))

    def _delete(self, label, state="paid"):
        try:
            result = self.rpc.delinvoice(label, state)
            return result
        except IOError:
            # unpaid could have expired in the last split second due
            # to a race, so try again
            if state == "unpaid":
                result = self.rpc.delinvoice(label, "expired")
        return result

    def delete(self, label, state="paid"):
        try:
            return self._delete(label, state=state)
        except:
            return None

    def getinfo(self):
        try:
            return self.rpc.getinfo()
        except:
            return None

    def listfunds(self):
        try:
            return self.rpc.listfunds()
        except:
            return None

    def listnodes(self):
        try:
            return self.rpc.listnodes()
        except:
            return None
Esempio n. 7
0
 def __init__(self, nodes_config_path):
     self.sibling_nodes = {}
     with open(nodes_config_path) as json_file:
         data = json.load(json_file)
         self.invoices_expiry = data['invoices_expiry']
         # The chain id is the hash of the genesis block
         # REM petname and bip173 are just local settings, others can set them differently
         self.chains_by_bip173 = data['chains_by_bip173']
         for chain_id, node_config in data['nodes'].items():
             print(chain_id, node_config)
             self.sibling_nodes[chain_id] = LightningRpc(node_config)
         self.other_gateways = data['other_gateways']
Esempio n. 8
0
    def __init__(self, node_config):
        from pyln.client import LightningRpc

        self.config = node_config
        self.is_onchain = False

        for i in range(config.connection_attempts):
            try:
                if config.tunnel_host is None:
                    rpc_file = self.config['clightning_rpc_file']
                else:
                    rpc_file = "lightning-rpc"

                logging.info(
                    "Attempting to connect to clightning with unix domain socket: {}"
                    .format(rpc_file))
                self.clightning = LightningRpc(rpc_file)

                logging.info("Getting clightning info...")
                info = self.clightning.getinfo()
                logging.info(info)

                logging.info("Successfully connected to clightning.")
                break

            except Exception as e:
                logging.error(e)
                if i < 5:
                    time.sleep(2)
                else:
                    time.sleep(60)
                logging.info("Attempting again... {}/{}...".format(
                    i + 1, config.connection_attempts))
        else:
            raise Exception(
                "Could not connect to clightning. Check your port tunneling settings and try again."
            )

        logging.info("Ready for payments requests.")
        return
Esempio n. 9
0
class Daemon(object):
    def __init__(self):
        self.rpc = LightningRpc(RPC_FILE)

    def _gen_new_label(self):
        label_bytes = uuid.uuid4().bytes
        label_str = b64encode(label_bytes).decode("utf8")
        return label_str

    def invoice(self):
        msatoshis = 12000
        description = "f**k you, pay me"
        label = self._gen_new_label()
        i = self.rpc.invoice(msatoshis, label, description)
        return {'label': label, 'bolt11': i['bolt11']}
Esempio n. 10
0
class clightning:
    def __init__(self, node_config):
        from pyln.client import LightningRpc

        self.config = node_config
        self.is_onchain = False

        for i in range(config.connection_attempts):
            try:
                if config.tunnel_host is None:
                    rpc_file = self.config['clightning_rpc_file']
                else:
                    rpc_file = "lightning-rpc"

                logging.info(
                    "Attempting to connect to clightning with unix domain socket: {}"
                    .format(rpc_file))
                self.clightning = LightningRpc(rpc_file)

                logging.info("Getting clightning info...")
                info = self.clightning.getinfo()
                logging.info(info)

                logging.info("Successfully connected to clightning.")
                break

            except Exception as e:
                logging.error(e)
                if i < 5:
                    time.sleep(2)
                else:
                    time.sleep(60)
                logging.info("Attempting again... {}/{}...".format(
                    i + 1, config.connection_attempts))
        else:
            raise Exception(
                "Could not connect to clightning. Check your port tunneling settings and try again."
            )

        logging.info("Ready for payments requests.")
        return

    def create_qr(self, uuid, address, value):
        qr_str = "{}".format(address.upper())
        img = qrcode.make(qr_str)
        img.save("static/qr_codes/{}.png".format(uuid))
        return

    def get_info(self):
        return self.clightning.getinfo()

    def get_uri(self):
        info = self.get_info()
        address = info["address"][0]
        return info["id"] + "@" + address["address"] + ":" + str(
            address["port"])

    # Create lightning invoice
    def create_clightning_invoice(self, btc_amount, label, expiry):
        # Multiplying by 10^8 to convert to satoshi units
        msats_amount = int(float(btc_amount) * 10**(3 + 8))
        lnd_invoice = self.clightning.invoice(msats_amount, label,
                                              "SatSale-{}".format(label),
                                              expiry)
        return lnd_invoice["bolt11"], lnd_invoice["payment_hash"]

    def get_address(self, amount, label, expiry):
        address, r_hash = self.create_clightning_invoice(amount, label, expiry)
        return address, r_hash

    # Check whether the payment has been paid
    def check_payment(self, uuid):
        invoices = self.clightning.listinvoices(uuid)["invoices"]

        if len(invoices) == 0:
            logging.error("Could not find invoice on node. Something's wrong.")
            return 0, 0

        invoice = invoices[0]

        if invoice["status"] != "paid":
            conf_paid = 0
            unconf_paid = 0
        else:
            # Store amount paid and convert to BTC units
            conf_paid = int(invoice["msatoshi_received"]) / 10**(3 + 8)
            unconf_paid = 0

        return conf_paid, unconf_paid
Esempio n. 11
0
 def init(self):
     # Create an instance of the LightningRpc object using the Core Lightning daemon on your computer.
     logger.info('initializing clightning client: {}'.format(self.rpc_path))
     self.lrpc = LightningRpc(self.rpc_path)
Esempio n. 12
0
class CLightningClient(LightningClient):
    """Access a c-lightning instance using the Python API."""
    def __init__(
        self,
        rpc_path: str,
    ) -> None:
        self.rpc_path = rpc_path

    def init(self):
        # Create an instance of the LightningRpc object using the Core Lightning daemon on your computer.
        logger.info('initializing clightning client: {}'.format(self.rpc_path))
        self.lrpc = LightningRpc(self.rpc_path)

    def pay_invoice(self, payment_request: str) -> Payment:
        payment = self.lrpc.pay(payment_request)
        if payment['status'] == 'complete':
            return Payment(
                payment_preimage=bytes.fromhex(payment['payment_preimage']),
                payment_error='',
            )
        else:
            return Payment(
                payment_preimage=b'',
                payment_error='Payment failed.',
            )

    def get_info(self) -> Info:
        info = self.lrpc.getinfo()
        pubkey = info['id']
        binding = info.get('binding')
        uris = []
        if binding:
            for b in binding:
                address = b['address']
                if ':' not in address:  # TODO: Change type of uri to LightningAddress.
                    port = b['port']
                    uri = f"{pubkey}@{address}:{port}"
                    uris.append(uri)
        return Info(uris=uris, )

    def decode_pay_req(self, payment_request: str) -> PayReq:
        pay_req = self.lrpc.decodepay(payment_request)
        return PayReq(
            payment_hash=bytes.fromhex(pay_req['payment_hash']),
            payment_point=b'',  # TODO: Use real payment point.
            num_msat=pay_req['amount_msat'].millisatoshis,
            destination=pay_req['payee'],
            timestamp=int(pay_req['created_at']),
            expiry=int(pay_req['expiry']),
        )

    def create_invoice(self, preimage: bytes, amount_msat: int) -> Invoice:
        created_invoice = self.lrpc.invoice(
            amount_msat,
            label=str(uuid.uuid4()),
            description="Squeaknode invoice",
            preimage=preimage.hex(),
        )
        creation_time = int(time.time())
        expiry = int(created_invoice['expires_at']) - creation_time
        return Invoice(
            r_hash=bytes.fromhex(created_invoice['payment_hash']),
            payment_request=created_invoice['bolt11'],
            value_msat=amount_msat,
            settled=False,
            settle_index=0,
            creation_date=creation_time,
            expiry=expiry,
        )

    def subscribe_invoices(self, settle_index: int) -> InvoiceStream:
        def cancel_fn():
            return None

        def get_invoice_stream():
            try:
                pay_index = settle_index
                while True:
                    payment = self.lrpc.waitanyinvoice(lastpay_index=pay_index)
                    if payment['status'] == 'paid':
                        pay_index = payment.get('pay_index') or 0
                        yield Invoice(
                            r_hash=bytes.fromhex(payment['payment_hash']),
                            payment_request=payment.get('bolt11') or '',
                            value_msat=payment.get('value_msat'),
                            settled=True,
                            settle_index=pay_index,
                            creation_date=0,  # This value is ignored.
                            expiry=payment['expires_at'],
                        )
            except Exception:
                raise InvoiceSubscriptionError()

        return InvoiceStream(
            cancel=cancel_fn,
            result_stream=get_invoice_stream(),
        )
Esempio n. 13
0
class LightningNode(object):
    def __init__(self,
                 node_id,
                 lightning_dir,
                 bitcoind,
                 executor,
                 may_fail=False,
                 may_reconnect=False,
                 allow_broken_log=False,
                 allow_bad_gossip=False,
                 db=None,
                 port=None,
                 disconnect=None,
                 random_hsm=None,
                 options=None,
                 **kwargs):
        self.bitcoin = bitcoind
        self.executor = executor
        self.may_fail = may_fail
        self.may_reconnect = may_reconnect
        self.allow_broken_log = allow_broken_log
        self.allow_bad_gossip = allow_bad_gossip
        self.db = db

        # Assume successful exit
        self.rc = 0

        socket_path = os.path.join(lightning_dir, TEST_NETWORK,
                                   "lightning-rpc").format(node_id)
        self.rpc = LightningRpc(socket_path, self.executor)

        self.daemon = LightningD(lightning_dir,
                                 bitcoindproxy=bitcoind.get_proxy(),
                                 port=port,
                                 random_hsm=random_hsm,
                                 node_id=node_id)
        # If we have a disconnect string, dump it to a file for daemon.
        if disconnect:
            self.daemon.disconnect_file = os.path.join(lightning_dir,
                                                       TEST_NETWORK,
                                                       "dev_disconnect")
            with open(self.daemon.disconnect_file, "w") as f:
                f.write("\n".join(disconnect))
            self.daemon.opts["dev-disconnect"] = "dev_disconnect"
        if DEVELOPER:
            self.daemon.opts["dev-fail-on-subdaemon-fail"] = None
            self.daemon.env["LIGHTNINGD_DEV_MEMLEAK"] = "1"
            if os.getenv("DEBUG_SUBD"):
                self.daemon.opts["dev-debugger"] = os.getenv("DEBUG_SUBD")
            if VALGRIND:
                self.daemon.env["LIGHTNINGD_DEV_NO_BACKTRACE"] = "1"
            if not may_reconnect:
                self.daemon.opts["dev-no-reconnect"] = None

        if options is not None:
            self.daemon.opts.update(options)
        dsn = db.get_dsn()
        if dsn is not None:
            self.daemon.opts['wallet'] = dsn
        if VALGRIND:
            self.daemon.cmd_prefix = [
                'valgrind', '-q', '--trace-children=yes',
                '--trace-children-skip=*python*,*bitcoin-cli*,*elements-cli*',
                '--error-exitcode=7',
                '--log-file={}/valgrind-errors.%p'.format(
                    self.daemon.lightning_dir)
            ]

    def connect(self, remote_node):
        self.rpc.connect(remote_node.info['id'], '127.0.0.1',
                         remote_node.daemon.port)

    def is_connected(self, remote_node):
        return remote_node.info['id'] in [
            p['id'] for p in self.rpc.listpeers()['peers']
        ]

    def openchannel(self,
                    remote_node,
                    capacity,
                    addrtype="p2sh-segwit",
                    confirm=True,
                    wait_for_announce=True,
                    connect=True):
        addr, wallettxid = self.fundwallet(10 * capacity, addrtype)

        if connect and not self.is_connected(remote_node):
            self.connect(remote_node)

        fundingtx = self.rpc.fundchannel(remote_node.info['id'], capacity)

        # Wait for the funding transaction to be in bitcoind's mempool
        wait_for(lambda: fundingtx['txid'] in self.bitcoin.rpc.getrawmempool())

        if confirm or wait_for_announce:
            self.bitcoin.generate_block(1)

        if wait_for_announce:
            self.bitcoin.generate_block(5)

        if confirm or wait_for_announce:
            self.daemon.wait_for_log(r'Funding tx {} depth'.format(
                fundingtx['txid']))
        return {
            'address': addr,
            'wallettxid': wallettxid,
            'fundingtx': fundingtx
        }

    def fundwallet(self, sats, addrtype="p2sh-segwit"):
        addr = self.rpc.newaddr(addrtype)[addrtype]
        txid = self.bitcoin.rpc.sendtoaddress(addr, sats / 10**8)
        self.bitcoin.generate_block(1)
        self.daemon.wait_for_log(
            'Owning output .* txid {} CONFIRMED'.format(txid))
        return addr, txid

    def getactivechannels(self):
        return [c for c in self.rpc.listchannels()['channels'] if c['active']]

    def db_query(self, query):
        return self.db.query(query)

    # Assumes node is stopped!
    def db_manip(self, query):
        db = sqlite3.connect(
            os.path.join(self.daemon.lightning_dir, TEST_NETWORK,
                         "lightningd.sqlite3"))
        db.row_factory = sqlite3.Row
        c = db.cursor()
        c.execute(query)
        db.commit()
        c.close()
        db.close()

    def is_synced_with_bitcoin(self, info=None):
        if info is None:
            info = self.rpc.getinfo()
        return 'warning_bitcoind_sync' not in info and 'warning_lightningd_sync' not in info

    def start(self, wait_for_bitcoind_sync=True):
        self.daemon.start()
        # Cache `getinfo`, we'll be using it a lot
        self.info = self.rpc.getinfo()
        # This shortcut is sufficient for our simple tests.
        self.port = self.info['binding'][0]['port']
        if wait_for_bitcoind_sync and not self.is_synced_with_bitcoin(
                self.info):
            wait_for(lambda: self.is_synced_with_bitcoin())

    def stop(self, timeout=10):
        """ Attempt to do a clean shutdown, but kill if it hangs
        """

        # Tell the daemon to stop
        try:
            # May fail if the process already died
            self.rpc.stop()
        except Exception:
            pass

        self.rc = self.daemon.wait(timeout)

        # If it did not stop be more insistent
        if self.rc is None:
            self.rc = self.daemon.stop()

        self.daemon.save_log()
        self.daemon.cleanup()

        if self.rc != 0 and not self.may_fail:
            raise ValueError("Node did not exit cleanly, rc={}".format(
                self.rc))
        else:
            return self.rc

    def restart(self, timeout=10, clean=True):
        """Stop and restart the lightning node.

        Keyword arguments:
        timeout: number of seconds to wait for a shutdown
        clean: whether to issue a `stop` RPC command before killing
        """
        if clean:
            self.stop(timeout)
        else:
            self.daemon.stop()

        self.start()

    def fund_channel(self,
                     l2,
                     amount,
                     wait_for_active=True,
                     announce_channel=True):

        # Give yourself some funds to work with
        addr = self.rpc.newaddr()['bech32']
        self.bitcoin.rpc.sendtoaddress(addr, (amount + 1000000) / 10**8)
        numfunds = len(self.rpc.listfunds()['outputs'])
        self.bitcoin.generate_block(1)
        wait_for(lambda: len(self.rpc.listfunds()['outputs']) > numfunds)

        # Now go ahead and open a channel
        num_tx = len(self.bitcoin.rpc.getrawmempool())
        tx = self.rpc.fundchannel(l2.info['id'],
                                  amount,
                                  announce=announce_channel)['tx']

        wait_for(lambda: len(self.bitcoin.rpc.getrawmempool()) == num_tx + 1)
        self.bitcoin.generate_block(1)

        # Hacky way to find our output.
        scid = "{}x1x{}".format(self.bitcoin.rpc.getblockcount(),
                                get_tx_p2wsh_outnum(self.bitcoin, tx, amount))

        if wait_for_active:
            # We wait until gossipd sees both local updates, as well as status NORMAL,
            # so it can definitely route through.
            self.daemon.wait_for_logs([
                r'update for channel {}/0 now ACTIVE'.format(scid),
                r'update for channel {}/1 now ACTIVE'.format(scid),
                'to CHANNELD_NORMAL'
            ])
            l2.daemon.wait_for_logs([
                r'update for channel {}/0 now ACTIVE'.format(scid),
                r'update for channel {}/1 now ACTIVE'.format(scid),
                'to CHANNELD_NORMAL'
            ])
        return scid

    def subd_pid(self, subd, peerid=None):
        """Get the process id of the given subdaemon, eg channeld or gossipd"""
        if peerid:
            ex = re.compile(r'{}-.*{}.*: pid ([0-9]*),'.format(peerid, subd))
        else:
            ex = re.compile('{}-.*: pid ([0-9]*),'.format(subd))
        # Make sure we get latest one if it's restarted!
        for l in reversed(self.daemon.logs):
            group = ex.search(l)
            if group:
                return group.group(1)
        raise ValueError("No daemon {} found".format(subd))

    def channel_state(self, other):
        """Return the state of the channel to the other node.

        Returns None if there is no such peer, or a channel hasn't been funded
        yet.

        """
        peers = self.rpc.listpeers(other.info['id'])['peers']
        if not peers or 'channels' not in peers[0]:
            return None
        channel = peers[0]['channels'][0]
        return channel['state']

    def get_channel_scid(self, other):
        """Get the short_channel_id for the channel to the other node.
        """
        peers = self.rpc.listpeers(other.info['id'])['peers']
        if not peers or 'channels' not in peers[0]:
            return None
        channel = peers[0]['channels'][0]
        return channel['short_channel_id']

    def is_channel_active(self, chanid):
        channels = self.rpc.listchannels(chanid)['channels']
        active = [(c['short_channel_id'], c['channel_flags']) for c in channels
                  if c['active']]
        return (chanid, 0) in active and (chanid, 1) in active

    def wait_for_channel_onchain(self, peerid):
        txid = only_one(
            only_one(self.rpc.listpeers(peerid)['peers'])
            ['channels'])['scratch_txid']
        wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())

    def wait_channel_active(self, chanid):
        wait_for(lambda: self.is_channel_active(chanid))

    # This waits until gossipd sees channel_update in both directions
    # (or for local channels, at least a local announcement)
    def wait_for_channel_updates(self, scids):
        # Could happen in any order...
        self.daemon.wait_for_logs([
            'Received channel_update for channel {}/0'.format(c) for c in scids
        ] + [
            'Received channel_update for channel {}/1'.format(c) for c in scids
        ])

    def wait_for_route(self, destination, timeout=30):
        """ Wait for a route to the destination to become available.
        """
        start_time = time.time()
        while time.time() < start_time + timeout:
            try:
                self.rpc.getroute(destination.info['id'], 1, 1)
                return True
            except Exception:
                time.sleep(1)
        if time.time() > start_time + timeout:
            raise ValueError(
                "Error waiting for a route to destination {}".format(
                    destination))

    def pay(self, dst, amt, label=None):
        if not label:
            label = ''.join(
                random.choice(string.ascii_letters + string.digits)
                for _ in range(20))

        rhash = dst.rpc.invoice(amt, label, label)['payment_hash']
        invoices = dst.rpc.listinvoices(label)['invoices']
        assert len(invoices) == 1 and invoices[0]['status'] == 'unpaid'

        routestep = {
            'msatoshi': amt,
            'id': dst.info['id'],
            'delay': 5,
            'channel': '1x1x1'
        }

        def wait_pay():
            # Up to 10 seconds for payment to succeed.
            start_time = time.time()
            while dst.rpc.listinvoices(
                    label)['invoices'][0]['status'] != 'paid':
                if time.time() > start_time + 10:
                    raise TimeoutError('Payment timed out')
                time.sleep(0.1)

        # sendpay is async now
        self.rpc.sendpay([routestep], rhash)
        # wait for sendpay to comply
        self.rpc.waitsendpay(rhash)

    # Note: this feeds through the smoother in update_feerate, so changing
    # it on a running daemon may not give expected result!
    def set_feerates(self, feerates, wait_for_effect=True):
        # (bitcoind returns bitcoin per kb, so these are * 4)

        def mock_estimatesmartfee(r):
            params = r['params']
            if params == [2, 'CONSERVATIVE']:
                feerate = feerates[0] * 4
            elif params == [4, 'ECONOMICAL']:
                feerate = feerates[1] * 4
            elif params == [100, 'ECONOMICAL']:
                feerate = feerates[2] * 4
            else:
                raise ValueError()
            return {
                'id': r['id'],
                'error': None,
                'result': {
                    'feerate': Decimal(feerate) / 10**8
                },
            }

        self.daemon.rpcproxy.mock_rpc('estimatesmartfee',
                                      mock_estimatesmartfee)

        # Technically, this waits until it's called, not until it's processed.
        # We wait until all three levels have been called.
        if wait_for_effect:
            wait_for(lambda: self.daemon.rpcproxy.mock_counts[
                'estimatesmartfee'] >= 3)

    def wait_for_onchaind_broadcast(self, name, resolve=None):
        """Wait for onchaind to drop tx name to resolve (if any)"""
        if resolve:
            r = self.daemon.wait_for_log(
                'Broadcasting {} .* to resolve {}'.format(name, resolve))
        else:
            r = self.daemon.wait_for_log(
                'Broadcasting {} .* to resolve '.format(name))

        rawtx = re.search(r'.* \(([0-9a-fA-F]*)\) ', r).group(1)
        txid = self.bitcoin.rpc.decoderawtransaction(rawtx, True)['txid']

        wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())

    def query_gossip(self, querytype, *args, filters=[]):
        """Generate a gossip query, feed it into this node and get responses
        in hex"""
        query = subprocess.run(['devtools/mkquery', querytype] +
                               [str(a) for a in args],
                               check=True,
                               timeout=TIMEOUT,
                               stdout=subprocess.PIPE).stdout.strip()
        out = subprocess.run([
            'devtools/gossipwith', '--timeout-after={}'.format(
                int(math.sqrt(TIMEOUT) + 1)), '{}@localhost:{}'.format(
                    self.info['id'], self.port), query
        ],
                             check=True,
                             timeout=TIMEOUT,
                             stdout=subprocess.PIPE).stdout

        def passes_filters(hmsg, filters):
            for f in filters:
                if hmsg.startswith(f):
                    return False
            return True

        msgs = []
        while len(out):
            length = struct.unpack('>H', out[0:2])[0]
            hmsg = out[2:2 + length].hex()
            if passes_filters(hmsg, filters):
                msgs.append(out[2:2 + length].hex())
            out = out[2 + length:]
        return msgs
Esempio n. 14
0
 def __init__(self, daemon_rpc):
     self.rpc = LightningRpc(daemon_rpc)
Esempio n. 15
0
class LightningNode(object):
    def __init__(self, node_id, lightning_dir, bitcoind, executor, valgrind, may_fail=False,
                 may_reconnect=False, allow_broken_log=False,
                 allow_bad_gossip=False, db=None, port=None, disconnect=None, random_hsm=None, options=None,
                 **kwargs):
        self.bitcoin = bitcoind
        self.executor = executor
        self.may_fail = may_fail
        self.may_reconnect = may_reconnect
        self.allow_broken_log = allow_broken_log
        self.allow_bad_gossip = allow_bad_gossip
        self.db = db

        # Assume successful exit
        self.rc = 0

        socket_path = os.path.join(lightning_dir, TEST_NETWORK, "lightning-rpc").format(node_id)
        self.rpc = LightningRpc(socket_path, self.executor)

        self.daemon = LightningD(
            lightning_dir, bitcoindproxy=bitcoind.get_proxy(),
            port=port, random_hsm=random_hsm, node_id=node_id
        )
        # If we have a disconnect string, dump it to a file for daemon.
        if disconnect:
            self.daemon.disconnect_file = os.path.join(lightning_dir, TEST_NETWORK, "dev_disconnect")
            with open(self.daemon.disconnect_file, "w") as f:
                f.write("\n".join(disconnect))
            self.daemon.opts["dev-disconnect"] = "dev_disconnect"
        if DEVELOPER:
            self.daemon.opts["dev-fail-on-subdaemon-fail"] = None
            # Don't run --version on every subdaemon if we're valgrinding and slow.
            if SLOW_MACHINE and VALGRIND:
                self.daemon.opts["dev-no-version-checks"] = None
            if os.getenv("DEBUG_SUBD"):
                self.daemon.opts["dev-debugger"] = os.getenv("DEBUG_SUBD")
            if valgrind:
                self.daemon.env["LIGHTNINGD_DEV_NO_BACKTRACE"] = "1"
            else:
                # Under valgrind, scanning can access uninitialized mem.
                self.daemon.env["LIGHTNINGD_DEV_MEMLEAK"] = "1"
            if not may_reconnect:
                self.daemon.opts["dev-no-reconnect"] = None

        if options is not None:
            self.daemon.opts.update(options)
        dsn = db.get_dsn()
        if dsn is not None:
            self.daemon.opts['wallet'] = dsn
        if valgrind:
            self.daemon.cmd_prefix = [
                'valgrind',
                '-q',
                '--trace-children=yes',
                '--trace-children-skip=*python*,*bitcoin-cli*,*elements-cli*',
                '--error-exitcode=7',
                '--log-file={}/valgrind-errors.%p'.format(self.daemon.lightning_dir)
            ]
            # Reduce precision of errors, speeding startup and reducing memory greatly:
            if SLOW_MACHINE:
                self.daemon.cmd_prefix += ['--read-inline-info=no']

    def connect(self, remote_node):
        self.rpc.connect(remote_node.info['id'], '127.0.0.1', remote_node.daemon.port)

    def is_connected(self, remote_node):
        return remote_node.info['id'] in [p['id'] for p in self.rpc.listpeers()['peers']]

    def openchannel(self, remote_node, capacity=FUNDAMOUNT, addrtype="p2sh-segwit", confirm=True, wait_for_announce=True, connect=True):
        addr, wallettxid = self.fundwallet(10 * capacity, addrtype)

        if connect and not self.is_connected(remote_node):
            self.connect(remote_node)

        fundingtx = self.rpc.fundchannel(remote_node.info['id'], capacity)

        # Wait for the funding transaction to be in bitcoind's mempool
        wait_for(lambda: fundingtx['txid'] in self.bitcoin.rpc.getrawmempool())

        if confirm or wait_for_announce:
            self.bitcoin.generate_block(1)

        if wait_for_announce:
            self.bitcoin.generate_block(5)

        if confirm or wait_for_announce:
            self.daemon.wait_for_log(
                r'Funding tx {} depth'.format(fundingtx['txid']))
        return {'address': addr, 'wallettxid': wallettxid, 'fundingtx': fundingtx}

    def fundwallet(self, sats, addrtype="p2sh-segwit"):
        addr = self.rpc.newaddr(addrtype)[addrtype]
        txid = self.bitcoin.rpc.sendtoaddress(addr, sats / 10**8)
        self.bitcoin.generate_block(1)
        self.daemon.wait_for_log('Owning output .* txid {} CONFIRMED'.format(txid))
        return addr, txid

    def fundbalancedchannel(self, remote_node, total_capacity, announce=True):
        '''
        Creates a perfectly-balanced channel, as all things should be.
        '''
        if isinstance(total_capacity, Millisatoshi):
            total_capacity = int(total_capacity.to_satoshi())
        else:
            total_capacity = int(total_capacity)

        self.fundwallet(total_capacity + 10000)
        self.rpc.connect(remote_node.info['id'], 'localhost', remote_node.port)

        # Make sure the fundchannel is confirmed.
        num_tx = len(self.bitcoin.rpc.getrawmempool())
        tx = self.rpc.fundchannel(remote_node.info['id'], total_capacity, feerate='slow', minconf=0, announce=announce, push_msat=Millisatoshi(total_capacity * 500))['tx']
        wait_for(lambda: len(self.bitcoin.rpc.getrawmempool()) == num_tx + 1)
        self.bitcoin.generate_block(1)

        # Generate the scid.
        # NOTE This assumes only the coinbase and the fundchannel is
        # confirmed in the block.
        return '{}x1x{}'.format(self.bitcoin.rpc.getblockcount(),
                                get_tx_p2wsh_outnum(self.bitcoin, tx, total_capacity))

    def getactivechannels(self):
        return [c for c in self.rpc.listchannels()['channels'] if c['active']]

    def db_query(self, query):
        return self.db.query(query)

    # Assumes node is stopped!
    def db_manip(self, query):
        db = sqlite3.connect(os.path.join(self.daemon.lightning_dir, TEST_NETWORK, "lightningd.sqlite3"))
        db.row_factory = sqlite3.Row
        c = db.cursor()
        c.execute(query)
        db.commit()
        c.close()
        db.close()

    def is_synced_with_bitcoin(self, info=None):
        if info is None:
            info = self.rpc.getinfo()
        return 'warning_bitcoind_sync' not in info and 'warning_lightningd_sync' not in info

    def start(self, wait_for_bitcoind_sync=True, stderr=None):
        self.daemon.start(stderr=stderr)
        # Cache `getinfo`, we'll be using it a lot
        self.info = self.rpc.getinfo()
        # This shortcut is sufficient for our simple tests.
        self.port = self.info['binding'][0]['port']
        if wait_for_bitcoind_sync and not self.is_synced_with_bitcoin(self.info):
            wait_for(lambda: self.is_synced_with_bitcoin())

    def stop(self, timeout=10):
        """ Attempt to do a clean shutdown, but kill if it hangs
        """

        # Tell the daemon to stop
        try:
            # May fail if the process already died
            self.rpc.stop()
        except Exception:
            pass

        self.rc = self.daemon.wait(timeout)

        # If it did not stop be more insistent
        if self.rc is None:
            self.rc = self.daemon.stop()

        self.daemon.save_log()
        self.daemon.cleanup()

        if self.rc != 0 and not self.may_fail:
            raise ValueError("Node did not exit cleanly, rc={}".format(self.rc))
        else:
            return self.rc

    def restart(self, timeout=10, clean=True):
        """Stop and restart the lightning node.

        Keyword arguments:
        timeout: number of seconds to wait for a shutdown
        clean: whether to issue a `stop` RPC command before killing
        """
        if clean:
            self.stop(timeout)
        else:
            self.daemon.stop()

        self.start()

    def fund_channel(self, l2, amount, wait_for_active=True, announce_channel=True):
        warnings.warn("LightningNode.fund_channel is deprecated in favor of "
                      "LightningNode.fundchannel", category=DeprecationWarning)
        return self.fundchannel(l2, amount, wait_for_active, announce_channel)

    def fundchannel(self, l2, amount=FUNDAMOUNT, wait_for_active=True,
                    announce_channel=True, **kwargs):
        # Give yourself some funds to work with
        addr = self.rpc.newaddr()['bech32']

        def has_funds_on_addr(addr):
            """Check if the given address has funds in the internal wallet.
            """
            outs = self.rpc.listfunds()['outputs']
            addrs = [o['address'] for o in outs]
            return addr in addrs

        # We should not have funds on that address yet, we just generated it.
        assert(not has_funds_on_addr(addr))

        self.bitcoin.rpc.sendtoaddress(addr, (amount + 1000000) / 10**8)
        self.bitcoin.generate_block(1)

        # Now we should.
        wait_for(lambda: has_funds_on_addr(addr))

        # Now go ahead and open a channel
        res = self.rpc.fundchannel(l2.info['id'], amount,
                                   announce=announce_channel,
                                   **kwargs)
        wait_for(lambda: res['txid'] in self.bitcoin.rpc.getrawmempool())
        self.bitcoin.generate_block(1)

        # Hacky way to find our output.
        scid = "{}x1x{}".format(self.bitcoin.rpc.getblockcount(),
                                get_tx_p2wsh_outnum(self.bitcoin, res['tx'], amount))

        if wait_for_active:
            self.wait_channel_active(scid)
            l2.wait_channel_active(scid)

        return scid, res

    def subd_pid(self, subd, peerid=None):
        """Get the process id of the given subdaemon, eg channeld or gossipd"""
        if peerid:
            ex = re.compile(r'{}-.*{}.*: pid ([0-9]*),'
                            .format(peerid, subd))
        else:
            ex = re.compile('{}-.*: pid ([0-9]*),'.format(subd))
        # Make sure we get latest one if it's restarted!
        for l in reversed(self.daemon.logs):
            group = ex.search(l)
            if group:
                return group.group(1)
        raise ValueError("No daemon {} found".format(subd))

    def channel_state(self, other):
        """Return the state of the channel to the other node.

        Returns None if there is no such peer, or a channel hasn't been funded
        yet.

        """
        peers = self.rpc.listpeers(other.info['id'])['peers']
        if not peers or 'channels' not in peers[0]:
            return None
        channel = peers[0]['channels'][0]
        return channel['state']

    def get_channel_scid(self, other):
        """Get the short_channel_id for the channel to the other node.
        """
        peers = self.rpc.listpeers(other.info['id'])['peers']
        if not peers or 'channels' not in peers[0]:
            return None
        channel = peers[0]['channels'][0]
        return channel['short_channel_id']

    def get_channel_id(self, other):
        """Get the channel_id for the channel to the other node.
        """
        peers = self.rpc.listpeers(other.info['id'])['peers']
        if not peers or 'channels' not in peers[0]:
            return None
        channel = peers[0]['channels'][0]
        return channel['channel_id']

    def is_channel_active(self, chanid):
        channels = self.rpc.listchannels(chanid)['channels']
        active = [(c['short_channel_id'], c['channel_flags']) for c in channels if c['active']]
        return (chanid, 0) in active and (chanid, 1) in active

    def wait_for_channel_onchain(self, peerid):
        txid = only_one(only_one(self.rpc.listpeers(peerid)['peers'])['channels'])['scratch_txid']
        wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())

    def wait_channel_active(self, chanid):
        wait_for(lambda: self.is_channel_active(chanid))

    # This waits until gossipd sees channel_update in both directions
    # (or for local channels, at least a local announcement)
    def wait_for_channel_updates(self, scids):
        # Could happen in any order...
        self.daemon.wait_for_logs(['Received channel_update for channel {}/0'.format(c)
                                   for c in scids]
                                  + ['Received channel_update for channel {}/1'.format(c)
                                     for c in scids])

    def wait_for_route(self, destination, timeout=30):
        """ Wait for a route to the destination to become available.
        """
        start_time = time.time()
        while time.time() < start_time + timeout:
            try:
                self.rpc.getroute(destination.info['id'], 1, 1)
                return True
            except Exception:
                time.sleep(1)
        if time.time() > start_time + timeout:
            raise ValueError("Error waiting for a route to destination {}".format(destination))

    # This helper waits for all HTLCs to settle
    def wait_for_htlcs(self):
        peers = self.rpc.listpeers()['peers']
        for p, peer in enumerate(peers):
            if 'channels' in peer:
                for c, channel in enumerate(peer['channels']):
                    if 'htlcs' in channel:
                        wait_for(lambda: len(self.rpc.listpeers()['peers'][p]['channels'][c]['htlcs']) == 0)

    # This sends money to a directly connected peer
    def pay(self, dst, amt, label=None):
        if not label:
            label = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20))

        # check we are connected
        dst_id = dst.info['id']
        assert len(self.rpc.listpeers(dst_id).get('peers')) == 1

        # make an invoice
        rhash = dst.rpc.invoice(amt, label, label)['payment_hash']
        invoices = dst.rpc.listinvoices(label)['invoices']
        assert len(invoices) == 1 and invoices[0]['status'] == 'unpaid'

        routestep = {
            'msatoshi': amt,
            'id': dst_id,
            'delay': 5,
            'channel': '1x1x1'  # note: can be bogus for 1-hop direct payments
        }

        # sendpay is async now
        self.rpc.sendpay([routestep], rhash)
        # wait for sendpay to comply
        result = self.rpc.waitsendpay(rhash)
        assert(result.get('status') == 'complete')

    # This helper sends all money to a peer until even 1 msat can't get through.
    def drain(self, peer):
        total = 0
        msat = 4294967295  # Max payment size in some configs
        while msat != 0:
            try:
                logging.debug("Drain step with size={}".format(msat))
                self.pay(peer, msat)
                total += msat
            except RpcError as e:
                logging.debug("Got an exception while draining channel: {}".format(e))
                msat //= 2
        logging.debug("Draining complete after sending a total of {}msats".format(total))
        return total

    # Note: this feeds through the smoother in update_feerate, so changing
    # it on a running daemon may not give expected result!
    def set_feerates(self, feerates, wait_for_effect=True):
        # (bitcoind returns bitcoin per kb, so these are * 4)

        def mock_estimatesmartfee(r):
            params = r['params']
            if params == [2, 'CONSERVATIVE']:
                feerate = feerates[0] * 4
            elif params == [3, 'CONSERVATIVE']:
                feerate = feerates[1] * 4
            elif params == [4, 'ECONOMICAL']:
                feerate = feerates[2] * 4
            elif params == [100, 'ECONOMICAL']:
                feerate = feerates[3] * 4
            else:
                warnings.warn("Don't have a feerate set for {}/{}.".format(
                    params[0], params[1],
                ))
                feerate = 42
            return {
                'id': r['id'],
                'error': None,
                'result': {
                    'feerate': Decimal(feerate) / 10**8
                },
            }
        self.daemon.rpcproxy.mock_rpc('estimatesmartfee', mock_estimatesmartfee)

        # Technically, this waits until it's called, not until it's processed.
        # We wait until all three levels have been called.
        if wait_for_effect:
            wait_for(lambda:
                     self.daemon.rpcproxy.mock_counts['estimatesmartfee'] >= 4)

    # force new feerates by restarting and thus skipping slow smoothed process
    # Note: testnode must be created with: opts={'may_reconnect': True}
    def force_feerates(self, rate):
        assert(self.may_reconnect)
        self.set_feerates([rate] * 4, False)
        self.restart()
        self.daemon.wait_for_log('peer_out WIRE_UPDATE_FEE')
        assert(self.rpc.feerates('perkw')['perkw']['opening'] == rate)

    def wait_for_onchaind_broadcast(self, name, resolve=None):
        """Wait for onchaind to drop tx name to resolve (if any)"""
        if resolve:
            r = self.daemon.wait_for_log('Broadcasting {} .* to resolve {}'
                                         .format(name, resolve))
        else:
            r = self.daemon.wait_for_log('Broadcasting {} .* to resolve '
                                         .format(name))

        rawtx = re.search(r'.* \(([0-9a-fA-F]*)\) ', r).group(1)
        txid = self.bitcoin.rpc.decoderawtransaction(rawtx, True)['txid']

        wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())

    def query_gossip(self, querytype, *args, filters=[]):
        """Generate a gossip query, feed it into this node and get responses
        in hex"""
        query = subprocess.run(['devtools/mkquery',
                                querytype] + [str(a) for a in args],
                               check=True,
                               timeout=TIMEOUT,
                               stdout=subprocess.PIPE).stdout.strip()
        out = subprocess.run(['devtools/gossipwith',
                              '--timeout-after={}'.format(int(math.sqrt(TIMEOUT) + 1)),
                              '{}@localhost:{}'.format(self.info['id'],
                                                       self.port),
                              query],
                             check=True,
                             timeout=TIMEOUT, stdout=subprocess.PIPE).stdout

        def passes_filters(hmsg, filters):
            for f in filters:
                if hmsg.startswith(f):
                    return False
            return True

        msgs = []
        while len(out):
            length = struct.unpack('>H', out[0:2])[0]
            hmsg = out[2:2 + length].hex()
            if passes_filters(hmsg, filters):
                msgs.append(out[2:2 + length].hex())
            out = out[2 + length:]
        return msgs
Esempio n. 16
0
 def __init__(self):
     self.rpc = LightningRpc(RPC_FILE)
Esempio n. 17
0
from twisted.internet import reactor
from twisted.internet.task import LoopingCall
from autobahn.twisted.websocket import WebSocketServerProtocol
from autobahn.twisted.websocket import WebSocketServerFactory

from txzmq import ZmqEndpoint, ZmqEndpointType
from txzmq import ZmqFactory
from txzmq import ZmqSubConnection

from pyln.client import LightningRpc

RPC_FILE = "/home/abc/.lightning/bitcoin/lightning-rpc"

###############################################################################

rpc = LightningRpc(RPC_FILE)


class Daemon(object):
    def __init__(self):
        self.rpc = LightningRpc(RPC_FILE)

    def _gen_new_label(self):
        label_bytes = uuid.uuid4().bytes
        label_str = b64encode(label_bytes).decode("utf8")
        return label_str

    def invoice(self):
        msatoshis = 12000
        description = "f**k you, pay me"
        label = self._gen_new_label()
Esempio n. 18
0
#!/usr/bin/python3
import sys
import json
from datetime import date, datetime
from pyln.client import LightningRpc
from os.path import expanduser
from mpmath import *
from functools import reduce

rpc = LightningRpc(expanduser("~") + "/.lightning/bitcoin/lightning-rpc")
N = 1  # min number of triangles
megahub_nodes = set()
adjacent = dict(
)  # node_id -> adjacent node_ids (i.e. other node_ids to which this node has a direct channel)
min_chan_size = 100000  #satoshis minimum in a channel for it to be considered


def print_usage_and_die():
    sys.stderr.write("Usage:\n")
    sys.stderr.write("%s N node_id [node_id ...]" % sys.argv[0])
    sys.stderr.write("\n")
    sys.stderr.write(
        "Extracts the megahub rooted at the specified node_ids with all nodes having at least N\n"
    )
    sys.stderr.write(
        "triangles where both other nodes are already in the megahub.\n")
    sys.stderr.write("\n")
    sys.exit(1)


#Calculate the average shortest path length from root_node to each node in lowfee_nodes
Esempio n. 19
0
                incoming.append(chan)

    incoming = sorted(incoming, key=lambda k: k['to_middle'])
    outgoing = sorted(outgoing, key=lambda k: k['to_middle'])

    print()
    print("incoming")
    print_list(incoming)
    print("outgoing")
    print_list(outgoing)

    return incoming, outgoing


if __name__ == "__main__":
    rpc = LightningRpc(os.environ['LIGHTNING_RPC'])
    out_success = Counter()
    inc_success = Counter()
    total_out_success = Counter()
    total_inc_success = Counter()
    exclude = []

    while True:
        print("----------------------------------")
        config = rpc.listconfigs()
        fee_base = config['fee-base']
        fee_per_satoshi = config['fee-per-satoshi']  # microsat per sat
        maxfeepercent = fee_per_satoshi * 0.000001 * 100  # my fee in percent
        print("fee_base=%d fee_per_satoshi=%d maxfeepercent=%.2f" %
              (fee_base, fee_per_satoshi, maxfeepercent))
Esempio n. 20
0
manual.add_argument('pixels',
                    type=str,
                    help="a string specifying coordinates and colors of "
                    "pixels to draw separated by underscores. "
                    "Eg. 1_1_ffffff_2_2_00ff00 will set pixel "
                    "(1,1) white (#ffffff) and (2,2) green (#00ff00)")
manual.set_defaults(func=manual_func)

png.add_argument("x_offset",
                 type=int,
                 help="the x coordinate to begin drawing at")
png.add_argument("y_offset",
                 type=int,
                 help="the y coordinate to begin drawing at")
png.add_argument("png_file", type=str, help="the path to the png file to use")
png.set_defaults(func=png_func)

settings = parser.parse_args()

if settings.image_no not in {0, 1, 2}:
    sys.exit("invalid image_no: %d" % settings.image_no)
if not os.path.exists(settings.lightning_rpc):
    sys.exit("no such file? %s" % settings.lightning_rpc)

rpc = LightningRpc(settings.lightning_rpc)

err = settings.func(settings, rpc)
if err:
    sys.exit(err)
Esempio n. 21
0
#!/usr/bin/python3
import sys
import json
from datetime import date, datetime
from pyln.client import LightningRpc
from os.path import expanduser

rpc = LightningRpc(expanduser("~") + "/.lightning/bitcoin/lightning-rpc")

channels = set()  # a bag of scids
channel_fees_collected = dict(
)  # scid -> (x, y) where x = fees from this channel as input, y = fees from this channel as output
msat_moved = dict(
)  # scid -> ((n, x), (n, y)) where x = msat moved in all forwards with this channel as input, y = msat moved " as output and n in both cases is the number of forwarding events
rebalance_payment_hashes = set()
rebalance_msat_received = dict()  # payment hash -> msat received
rebalance_channels = dict(
)  # payment hash -> (x, y) where x = outgoing channel, y = incoming channel
channel_rebalance_paid = dict(
)  #scid -> (x, y) where x = fees paid rebalancing this channel in, y = fees paid rebalancing this channel out
rebalance_fees = dict()  # payment hash -> msat


def print_usage_and_die():
    sys.stderr.write("Usage:\n")
    sys.stderr.write(
        "%s $(date '+%%s' -d \"Apr 01 2021\") $(date '+%%s' -d \"May 01 2021\")\n"
        % sys.argv[0])
    sys.stderr.write("\n")
    sys.stderr.write(
        "Calculates your C-Lightning node's earnings and rebalancing costs in the specified period.\n"