def main(refresh, api_key, verbose): # setup logging log_level = logging.DEBUG if verbose else logging.WARNING root = logging.getLogger() root.setLevel(log_level) handler = logging.StreamHandler(sys.stdout) handler.setLevel(log_level) root.addHandler(handler) marketcap.init(api_key, refresh=refresh) defs = coin_info.coin_info() support_info = coin_info.support_info(defs) coins = {} coins.update(update_bitcoin(defs.bitcoin, support_info)) coins.update(update_erc20(defs.erc20, defs.eth, support_info)) coins.update(update_ethereum_networks(defs.eth, support_info)) coins.update(update_nem_mosaics(defs.nem, support_info)) coins.update(update_simple(defs.misc, support_info, "coin")) apply_overrides(coins) finalize_wallets(coins) update_marketcaps(coins) check_missing_data(coins) info = summary(coins, api_key) details = dict(coins=coins, info=info) print(json.dumps(info, sort_keys=True, indent=4)) with open(os.path.join(coin_info.DEFS_DIR, "coins_details.json"), "w") as f: json.dump(details, f, sort_keys=True, indent=4) f.write("\n")
def main(refresh, api_key, verbose): # setup logging log_level = logging.DEBUG if verbose else logging.WARNING root = logging.getLogger() root.setLevel(log_level) handler = logging.StreamHandler(sys.stdout) handler.setLevel(log_level) root.addHandler(handler) coinmarketcap_init(api_key, refresh=refresh) defs = coin_info.coin_info() support_info = coin_info.support_info(defs) coins = {} coins.update(update_bitcoin(defs.bitcoin, support_info)) coins.update(update_erc20(defs.erc20, defs.eth, support_info)) coins.update(update_ethereum_networks(defs.eth, support_info)) coins.update(update_nem_mosaics(defs.nem, support_info)) coins.update(update_simple(defs.misc, support_info, "coin")) apply_overrides(coins) finalize_wallets(coins) update_marketcaps(coins) check_missing_data(coins) info = summary(coins, api_key) details = dict(coins=coins, info=info) print(json.dumps(info, sort_keys=True, indent=4)) with open(os.path.join(coin_info.DEFS_DIR, "coins_details.json"), "w") as f: json.dump(details, f, sort_keys=True, indent=4)
def dump(outfile): """Dump all coin data in a single JSON file. This file is structured the same as the internal data. That is, top-level object is a dict with keys: 'bitcoin', 'eth', 'erc20', 'nem' and 'misc'. Value for each key is a list of dicts, each describing a known coin. \b Fields are category-specific, except for four common ones: - 'name' - human-readable name - 'shortcut' - currency symbol - 'key' - unique identifier, e.g., 'bitcoin:BTC' - 'support' - a dict with entries per known device """ coins = coin_info.coin_info() support_info = coin_info.support_info(coins.as_list()) for category in coins.values(): for coin in category: coin["support"] = support_info[coin["key"]] # get rid of address_bytes which are bytes which can't be JSON encoded for coin in coins.erc20: coin.pop("address_bytes", None) with outfile: json.dump(coins, outfile, indent=4, sort_keys=True) outfile.write("\n")
def render(paths, outfile, verbose): """Generate source code from Mako templates. For every "foo.bar.mako" filename passed, runs the template and saves the result as "foo.bar". For every directory name passed, processes all ".mako" files found in that directory. If `-o` is specified, renders a single file into the specified outfile. If no arguments are given, processes the current directory. """ if not CAN_RENDER: raise click.ClickException("Please install 'mako' and 'munch'") if outfile and (len(paths) != 1 or not os.path.isfile(paths[0])): raise click.ClickException( "Option -o can only be used with single input file") # prepare defs defs = coin_info.coin_info() support_info = coin_info.support_info(defs) # munch dicts - make them attribute-accessible for key, value in defs.items(): defs[key] = [Munch(coin) for coin in value] for key, value in support_info.items(): support_info[key] = Munch(value) def do_render(src, dst): if verbose: click.echo("Rendering {} => {}".format(src, dst)) render_file(src, dst, defs, support_info) # single in-out case if outfile: do_render(paths[0], outfile) return # find files in directories if not paths: paths = ["."] files = [] for path in paths: if not os.path.exists(path): click.echo("Path {} does not exist".format(path)) elif os.path.isdir(path): files += glob.glob(os.path.join(path, "*.mako")) else: files.append(path) # render each file for file in files: if not file.endswith(".mako"): click.echo("File {} does not end with .mako".format(file)) else: target = file[:-len(".mako")] with open(target, "w") as dst: do_render(file, dst)
def render(paths, outfile, verbose): """Generate source code from Mako templates. For every "foo.bar.mako" filename passed, runs the template and saves the result as "foo.bar". For every directory name passed, processes all ".mako" files found in that directory. If `-o` is specified, renders a single file into the specified outfile. If no arguments are given, processes the current directory. """ if not CAN_RENDER: raise click.ClickException("Please install 'mako' and 'munch'") if outfile and (len(paths) != 1 or not os.path.isfile(paths[0])): raise click.ClickException("Option -o can only be used with single input file") # prepare defs defs = coin_info.coin_info() support_info = coin_info.support_info(defs) # munch dicts - make them attribute-accessible for key, value in defs.items(): defs[key] = [Munch(coin) for coin in value] for key, value in support_info.items(): support_info[key] = Munch(value) def do_render(src, dst): if verbose: click.echo("Rendering {} => {}".format(src, dst)) render_file(src, dst, defs, support_info) # single in-out case if outfile: do_render(paths[0], outfile) return # find files in directories if not paths: paths = ["."] files = [] for path in paths: if not os.path.exists(path): click.echo("Path {} does not exist".format(path)) elif os.path.isdir(path): files += glob.glob(os.path.join(path, "*.mako")) else: files.append(path) # render each file for file in files: if not file.endswith(".mako"): click.echo("File {} does not end with .mako".format(file)) else: target = file[: -len(".mako")] with open(target, "w") as dst: do_render(file, dst)
def coins_json(outfile): """Generate coins.json for consumption in python-trezor and Connect/Wallet""" coins = coin_info.coin_info().bitcoin support_info = coin_info.support_info(coins) by_name = {} for coin in coins: coin["support"] = support_info[coin["key"]] by_name[coin["name"]] = coin with outfile: json.dump(by_name, outfile, indent=4, sort_keys=True) outfile.write("\n")
def dump(outfile, support, pretty, tokens): """Dump all coin data in a single JSON file. This file is structured the same as the internal data. That is, top-level object is a dict with keys: 'bitcoin', 'eth', 'erc20', 'nem' and 'misc'. Value for each key is a list of dicts, each describing a known coin. \b Fields are category-specific, except for four common ones: - 'name' - human-readable name - 'shortcut' - currency symbol - 'key' - unique identifier, e.g., 'bitcoin:BTC' - 'support' - a dict with entries per known device To control the size and properties of the resulting file, you can specify whether or not you want pretty-printing, whether or not to include support data with each coin, and whether to include information about ERC20 tokens, which takes up several hundred kB of space. \b The option '--tokens' can have one of three values: 'full': include all token data 'stripped': exclude 'social' links and 'logo' data from tokens 'none': exclude the 'erc20' category altogether. """ coins = coin_info.coin_info() if support: support_info = coin_info.support_info(coins.as_list()) for category in coins.values(): for coin in category: coin["support"] = support_info[coin["key"]] # get rid of address_bytes which are bytes which can't be JSON encoded for coin in coins.erc20: coin.pop("address_bytes", None) if tokens == "stripped": coin.pop("social", None) coin.pop("logo", None) if tokens == "none": del coins["erc20"] with outfile: if pretty: json.dump(coins, outfile, indent=4, sort_keys=True) outfile.write("\n") else: json.dump(coins, outfile)
def build_coins_json(dst): TOOLS_PATH = os.path.join(TREZOR_COMMON, "tools") sys.path.insert(0, TOOLS_PATH) import coin_info coins = coin_info.coin_info().bitcoin support = coin_info.support_info(coins) for coin in coins: coin["support"] = support[coin["key"]] with open(dst, "w") as f: json.dump(coins, f, indent=2, sort_keys=True) del sys.path[0]
def build_coins_json(dst): TOOLS_PATH = os.path.join(TREZOR_COMMON, "tools") sys.path.insert(0, TOOLS_PATH) import coin_info coins = coin_info.coin_info().bitcoin support = coin_info.support_info(coins) for coin in coins: coin["support"] = support[coin["key"]] with open(dst, "w") as f: json.dump(coins, f, indent=2, sort_keys=True) del sys.path[0]
def dump( outfile, support, pretty, flat_list, include, exclude, include_type, exclude_type, filter, filter_exclude, exclude_tokens, device, ): """Dump coin data in JSON format This file is structured the same as the internal data. That is, top-level object is a dict with keys: 'bitcoin', 'eth', 'erc20', 'nem' and 'misc'. Value for each key is a list of dicts, each describing a known coin. If '--list' is specified, the top-level object is instead a flat list of coins. \b Fields are category-specific, except for four common ones: - 'name' - human-readable name - 'shortcut' - currency symbol - 'key' - unique identifier, e.g., 'bitcoin:BTC' - 'support' - a dict with entries per known device To control the size and properties of the resulting file, you can specify whether or not you want pretty-printing and whether or not to include support data with each coin. You can specify which categories and which fields will be included or excluded. You cannot specify both include and exclude at the same time. Include is "stronger" than exclude, in that _only_ the specified fields are included. You can also specify filters, in the form '-f field=value' (or '-F' for inverse filter). Filter values are case-insensitive and support shell-style wildcards, so '-f name=bit*' finds all coins whose names start with "bit" or "Bit". """ if exclude_tokens: exclude_type = ("erc20", ) if include and exclude: raise click.ClickException( "You cannot specify --include and --exclude at the same time.") if include_type and exclude_type: raise click.ClickException( "You cannot specify --include-type and --exclude-type at the same time." ) coins = coin_info.coin_info() support_info = coin_info.support_info(coins.as_list()) if support: for category in coins.values(): for coin in category: coin["support"] = support_info[coin["key"]] # filter types if include_type: coins_dict = {k: v for k, v in coins.items() if k in include_type} else: coins_dict = {k: v for k, v in coins.items() if k not in exclude_type} # filter individual coins include_filters = [f.split("=", maxsplit=1) for f in filter] exclude_filters = [f.split("=", maxsplit=1) for f in filter_exclude] # always exclude 'address_bytes', not encodable in JSON exclude += ("address_bytes", ) def should_include_coin(coin): for field, filter in include_filters: filter = filter.lower() if field not in coin: return False if not fnmatch.fnmatch(coin[field].lower(), filter): return False for field, filter in exclude_filters: filter = filter.lower() if field not in coin: continue if fnmatch.fnmatch(coin[field].lower(), filter): return False if device: is_supported = support_info[coin["key"]].get(device, None) if not is_supported: return False return True def modify_coin(coin): if include: return {k: v for k, v in coin.items() if k in include} else: return {k: v for k, v in coin.items() if k not in exclude} for key, coinlist in coins_dict.items(): coins_dict[key] = [ modify_coin(c) for c in coinlist if should_include_coin(c) ] if flat_list: output = sum(coins_dict.values(), []) else: output = coins_dict with outfile: indent = 4 if pretty else None json.dump(output, outfile, indent=indent, sort_keys=True) outfile.write("\n")
def check_btc(coins): check_passed = True support_infos = coin_info.support_info(coins) # validate individual coin data for coin in coins: errors = coin_info.validate_btc(coin) if errors: check_passed = False print_log(logging.ERROR, "invalid definition for", coin["name"]) print("\n".join(errors)) def collision_str(bucket): """Generate a colorful string out of a bucket of colliding coins.""" coin_strings = [] for coin in bucket: name = coin["name"] prefix = "" if name.endswith("Testnet"): color = "green" elif name == "Bitcoin": color = "red" elif coin.get("unsupported"): color = "grey" prefix = crayon("blue", "(X)", bold=True) else: color = "blue" hl = highlight_key(coin, color) coin_strings.append(prefix + hl) return ", ".join(coin_strings) def print_collision_buckets(buckets, prefix, maxlevel=logging.ERROR, strict=False): """Intelligently print collision buckets. For each bucket, if there are any collision with a mainnet, print it. If the collision is with unsupported networks or testnets, it's just INFO. If the collision is with supported mainnets, it's WARNING. If the collision with any supported network includes Bitcoin, it's an ERROR. """ failed = False for key, bucket in buckets.items(): mainnets = [c for c in bucket if not c["name"].endswith("Testnet")] have_bitcoin = False for coin in mainnets: if coin["name"] == "Bitcoin": have_bitcoin = True if all(v is False for k, v in support_infos[coin["key"]].items()): coin["unsupported"] = True supported_mainnets = [ c for c in mainnets if not c.get("unsupported") ] supported_networks = [ c for c in bucket if not c.get("unsupported") ] if len(mainnets) > 1: if (have_bitcoin or strict) and len(supported_networks) > 1: # ANY collision with Bitcoin is bad level = maxlevel failed = True elif len(supported_mainnets) > 1: # collision between supported networks is still pretty bad level = logging.WARNING else: # collision between some unsupported networks is OK level = logging.INFO print_log(level, "{} {}:".format(prefix, key), collision_str(bucket)) return failed # slip44 collisions print("Checking SLIP44 values collisions...") slip44 = find_collisions(coins, "slip44") if print_collision_buckets(slip44, "value", strict=True): check_passed = False # only check address_type on coins that don't use cashaddr nocashaddr = [coin for coin in coins if not coin.get("cashaddr_prefix")] print("Checking address_type collisions...") address_type = find_collisions(nocashaddr, "address_type") if print_collision_buckets(address_type, "address type"): check_passed = False print("Checking address_type_p2sh collisions...") address_type_p2sh = find_collisions(nocashaddr, "address_type_p2sh") # we ignore failed checks on P2SH, because reasons print_collision_buckets(address_type_p2sh, "address type", logging.WARNING) print("Checking genesis block collisions...") genesis = find_collisions(coins, "hash_genesis_block") print_collision_buckets(genesis, "genesis block", logging.WARNING) return check_passed
def check(backend, icons, show_duplicates): """Validate coin definitions. Checks that every btc-like coin is properly filled out, reports duplicate symbols, missing or invalid icons, backend responses, and uniform key information -- i.e., that all coins of the same type have the same fields in their JSON data. Uniformity check ignores NEM mosaics and ERC20 tokens, where non-uniformity is expected. The `--show-duplicates` option can be set to: - all: all shortcut collisions are shown, including colliding ERC20 tokens - nontoken: only collisions that affect non-ERC20 coins are shown - errors: only collisions between non-ERC20 tokens are shown. This is the default, as a collision between two or more non-ERC20 tokens is an error. In the output, duplicate ERC tokens will be shown in cyan; duplicate non-tokens in red. An asterisk (*) next to symbol name means that even though it was detected as duplicate, it is still included in results. The collision detection checks that SLIP44 numbers don't collide between different mainnets (testnet collisions are allowed), that `address_prefix` doesn't collide with Bitcoin (other collisions are reported as warnings). `address_prefix_p2sh` is also checked but we have a bunch of collisions there and can't do much about them, so it's not an error. In the collision checks, Bitcoin is shown in red, other mainnets in blue, testnets in green and unsupported networks in gray, marked with `(X)` for non-colored output. """ if backend and requests is None: raise click.ClickException("You must install requests for backend check") if icons and not CAN_BUILD_DEFS: raise click.ClickException("Missing requirements for icon check") defs, buckets = coin_info.coin_info_with_duplicates() support_info = coin_info.support_info(defs) mark_unsupported(support_info, defs.as_list()) all_checks_passed = True print("Checking BTC-like coins...") if not check_btc(defs.bitcoin): all_checks_passed = False print("Checking Ethereum networks...") if not check_eth(defs.eth): all_checks_passed = False if show_duplicates == "all": dup_level = logging.DEBUG elif show_duplicates == "nontoken": dup_level = logging.INFO else: dup_level = logging.WARNING print("Checking unexpected duplicates...") if not check_dups(buckets, dup_level): all_checks_passed = False nontoken_dups = [coin for coin in defs.as_list() if "dup_key_nontoken" in coin] if nontoken_dups: nontoken_dup_str = ", ".join( highlight_key(coin, "red") for coin in nontoken_dups ) print_log(logging.ERROR, "Non-token duplicate keys: " + nontoken_dup_str) all_checks_passed = False if icons: print("Checking icon files...") if not check_icons(defs.bitcoin): all_checks_passed = False if backend: print("Checking backend responses...") if not check_backends(defs.bitcoin): all_checks_passed = False print("Checking segwit fields...") if not check_segwit(defs.bitcoin): all_checks_passed = False print("Checking key uniformity...") for cointype, coinlist in defs.items(): if cointype in ("erc20", "nem"): continue if not check_key_uniformity(coinlist): all_checks_passed = False print("Checking FIDO app definitions...") if not check_fido(coin_info.fido_info()): all_checks_passed = False if not all_checks_passed: print("Some checks failed.") sys.exit(1) else: print("Everything is OK.")
def dump( outfile, support, pretty, flat_list, include, exclude, include_type, exclude_type, filter, filter_exclude, exclude_tokens, device, ): """Dump coin data in JSON format This file is structured the same as the internal data. That is, top-level object is a dict with keys: 'bitcoin', 'eth', 'erc20', 'nem' and 'misc'. Value for each key is a list of dicts, each describing a known coin. If '--list' is specified, the top-level object is instead a flat list of coins. \b Fields are category-specific, except for four common ones: - 'name' - human-readable name - 'shortcut' - currency symbol - 'key' - unique identifier, e.g., 'bitcoin:BTC' - 'support' - a dict with entries per known device To control the size and properties of the resulting file, you can specify whether or not you want pretty-printing and whether or not to include support data with each coin. You can specify which categories and which fields will be included or excluded. You cannot specify both include and exclude at the same time. Include is "stronger" than exclude, in that _only_ the specified fields are included. You can also specify filters, in the form '-f field=value' (or '-F' for inverse filter). Filter values are case-insensitive and support shell-style wildcards, so '-f name=bit*' finds all coins whose names start with "bit" or "Bit". """ if exclude_tokens: exclude_type = ("erc20",) if include and exclude: raise click.ClickException( "You cannot specify --include and --exclude at the same time." ) if include_type and exclude_type: raise click.ClickException( "You cannot specify --include-type and --exclude-type at the same time." ) coins = coin_info.coin_info() support_info = coin_info.support_info(coins.as_list()) if support: for category in coins.values(): for coin in category: coin["support"] = support_info[coin["key"]] # filter types if include_type: coins_dict = {k: v for k, v in coins.items() if k in include_type} else: coins_dict = {k: v for k, v in coins.items() if k not in exclude_type} # filter individual coins include_filters = [f.split("=", maxsplit=1) for f in filter] exclude_filters = [f.split("=", maxsplit=1) for f in filter_exclude] # always exclude 'address_bytes', not encodable in JSON exclude += ("address_bytes",) def should_include_coin(coin): for field, filter in include_filters: filter = filter.lower() if field not in coin: return False if not fnmatch.fnmatch(str(coin[field]).lower(), filter): return False for field, filter in exclude_filters: filter = filter.lower() if field not in coin: continue if fnmatch.fnmatch(str(coin[field]).lower(), filter): return False if device: is_supported = support_info[coin["key"]].get(device, None) if not is_supported: return False return True def modify_coin(coin): if include: return {k: v for k, v in coin.items() if k in include} else: return {k: v for k, v in coin.items() if k not in exclude} for key, coinlist in coins_dict.items(): coins_dict[key] = [modify_coin(c) for c in coinlist if should_include_coin(c)] if flat_list: output = sum(coins_dict.values(), []) else: output = coins_dict with outfile: indent = 4 if pretty else None json.dump(output, outfile, indent=indent, sort_keys=True) outfile.write("\n")
def check_btc(coins): check_passed = True support_infos = coin_info.support_info(coins) # validate individual coin data for coin in coins: errors = coin_info.validate_btc(coin) if errors: check_passed = False print_log(logging.ERROR, "invalid definition for", coin["name"]) print("\n".join(errors)) def collision_str(bucket): """Generate a colorful string out of a bucket of colliding coins.""" coin_strings = [] for coin in bucket: name = coin["name"] prefix = "" if name.endswith("Testnet"): color = "green" elif name == "Bitcoin": color = "red" elif coin.get("unsupported"): color = "grey" prefix = crayon("blue", "(X)", bold=True) else: color = "blue" hl = highlight_key(coin, color) coin_strings.append(prefix + hl) return ", ".join(coin_strings) def print_collision_buckets(buckets, prefix, maxlevel=logging.ERROR, strict=False): """Intelligently print collision buckets. For each bucket, if there are any collision with a mainnet, print it. If the collision is with unsupported networks or testnets, it's just INFO. If the collision is with supported mainnets, it's WARNING. If the collision with any supported network includes Bitcoin, it's an ERROR. """ failed = False for key, bucket in buckets.items(): mainnets = [c for c in bucket if not c["name"].endswith("Testnet")] have_bitcoin = False for coin in mainnets: if coin["name"] == "Bitcoin": have_bitcoin = True if all(v is False for k, v in support_infos[coin["key"]].items()): coin["unsupported"] = True supported_mainnets = [c for c in mainnets if not c.get("unsupported")] supported_networks = [c for c in bucket if not c.get("unsupported")] if len(mainnets) > 1: if (have_bitcoin or strict) and len(supported_networks) > 1: # ANY collision with Bitcoin is bad level = maxlevel failed = True elif len(supported_mainnets) > 1: # collision between supported networks is still pretty bad level = logging.WARNING else: # collision between some unsupported networks is OK level = logging.INFO print_log(level, "{} {}:".format(prefix, key), collision_str(bucket)) return failed # slip44 collisions print("Checking SLIP44 values collisions...") slip44 = find_collisions(coins, "slip44") if print_collision_buckets(slip44, "value", strict=True): check_passed = False # only check address_type on coins that don't use cashaddr nocashaddr = [coin for coin in coins if not coin.get("cashaddr_prefix")] print("Checking address_type collisions...") address_type = find_collisions(nocashaddr, "address_type") if print_collision_buckets(address_type, "address type"): check_passed = False print("Checking address_type_p2sh collisions...") address_type_p2sh = find_collisions(nocashaddr, "address_type_p2sh") # we ignore failed checks on P2SH, because reasons print_collision_buckets(address_type_p2sh, "address type", logging.WARNING) print("Checking genesis block collisions...") genesis = find_collisions(coins, "hash_genesis_block") print_collision_buckets(genesis, "genesis block", logging.WARNING) return check_passed
def render(paths: tuple[str, ...], outfile: TextIO, verbose: bool, bitcoin_only: bool) -> None: """Generate source code from Mako templates. For every "foo.bar.mako" filename passed, runs the template and saves the result as "foo.bar". For every directory name passed, processes all ".mako" files found in that directory. If `-o` is specified, renders a single file into the specified outfile. If no arguments are given, processes the current directory. """ if not CAN_RENDER: raise click.ClickException("Please install 'mako' and 'munch'") if outfile and (len(paths) != 1 or not os.path.isfile(paths[0])): raise click.ClickException( "Option -o can only be used with single input file") # prepare defs defs = coin_info.coin_info() defs["fido"] = coin_info.fido_info() support_info = coin_info.support_info(defs) if bitcoin_only: defs["bitcoin"] = [ x for x in defs["bitcoin"] if x["coin_name"] in ("Bitcoin", "Testnet", "Regtest") ] # munch dicts - make them attribute-accessible for key, value in defs.items(): defs[key] = [Munch(coin) for coin in value] for key, value in support_info.items(): support_info[key] = Munch(value) def do_render(src: str, dst: TextIO) -> None: if verbose: click.echo(f"Rendering {src} => {dst.name}") render_file(src, dst, defs, support_info) # single in-out case if outfile: do_render(paths[0], outfile) return # find files in directories if not paths: paths = (".", ) files: list[str] = [] for path in paths: if not os.path.exists(path): click.echo(f"Path {path} does not exist") elif os.path.isdir(path): files += glob.glob(os.path.join(path, "*.mako")) else: files.append(path) # render each file for file in files: if not file.endswith(".mako"): click.echo(f"File {file} does not end with .mako") else: target = file[:-len(".mako")] with open(target, "w") as dst: do_render(file, dst)
def dump( outfile: TextIO, support: bool, wallet: bool, pretty: bool, flat_list: bool, include: tuple[str, ...], exclude: tuple[str, ...], include_type: tuple[str, ...], exclude_type: tuple[str, ...], filter: tuple[str, ...], filter_exclude: tuple[str, ...], exclude_tokens: bool, device_include: tuple[str, ...], device_exclude: tuple[str, ...], ) -> None: """Dump coin data in JSON format. By default prints to stdout, specify an output file with '-o file.json'. This file is structured the same as the internal data. That is, top-level object is a dict with keys: 'bitcoin', 'eth', 'erc20', 'nem' and 'misc'. Value for each key is a list of dicts, each describing a known coin. If '--list' is specified, the top-level object is instead a flat list of coins. \b Fields are category-specific, except for four common ones: - 'name' - human-readable name - 'shortcut' - currency symbol - 'key' - unique identifier, e.g., 'bitcoin:BTC' - 'support' - a dict with entries per known device To control the size and properties of the resulting file, you can specify whether or not you want pretty-printing and whether or not to include support data with each coin. You can specify which categories and which fields will be included or excluded. You cannot specify both include and exclude at the same time. Include is "stronger" than exclude, in that _only_ the specified fields are included. You can also specify filters, in the form '-f field=value' (or '-F' for inverse filter). Filter values are case-insensitive and support shell-style wildcards, so '-f name=bit*' finds all coins whose names start with "bit" or "Bit". Also devices can be used as filters. For example to find out which coins are supported in Suite and connect but not on Trezor 1, it is possible to say '-d suite -d connect -D trezor1'. Includes even the wallet data, unless turned off by '-W'. These can be filtered by using '-f', for example `-f 'wallet=*exodus*'` (* are necessary) """ if exclude_tokens: exclude_type += ("erc20", ) if include and exclude: raise click.ClickException( "You cannot specify --include and --exclude at the same time.") if include_type and exclude_type: raise click.ClickException( "You cannot specify --include-type and --exclude-type at the same time." ) # getting initial info coins = coin_info.coin_info() support_info = coin_info.support_info(coins.as_list()) wallet_info = coin_info.wallet_info(coins) # optionally adding support info if support: for category in coins.values(): for coin in category: coin["support"] = support_info[coin["key"]] # optionally adding wallet info if wallet: for category in coins.values(): for coin in category: coin["wallet"] = wallet_info[coin["key"]] # filter types if include_type: coins_dict = {k: v for k, v in coins.items() if k in include_type} elif exclude_type: coins_dict = {k: v for k, v in coins.items() if k not in exclude_type} else: coins_dict = coins # filter individual coins include_filters = [f.split("=", maxsplit=1) for f in filter] exclude_filters = [f.split("=", maxsplit=1) for f in filter_exclude] # always exclude 'address_bytes', not encodable in JSON exclude += ("address_bytes", ) def should_include_coin(coin: Coin) -> bool: for field, filter in include_filters: if field not in coin: return False if not fnmatch.fnmatch(str(coin[field]).lower(), filter.lower()): return False for field, filter in exclude_filters: if field not in coin: continue if fnmatch.fnmatch(str(coin[field]).lower(), filter.lower()): return False if device_include: is_supported_everywhere = all(support_info[coin["key"]].get(device) for device in device_include) if not is_supported_everywhere: return False if device_exclude: is_supported_somewhere = any(support_info[coin["key"]].get(device) for device in device_exclude) if is_supported_somewhere: return False return True def modify_coin(coin: Coin) -> Coin: if include: return cast(Coin, {k: v for k, v in coin.items() if k in include}) else: return cast(Coin, {k: v for k, v in coin.items() if k not in exclude}) for key, coinlist in coins_dict.items(): coins_dict[key] = [ modify_coin(c) for c in coinlist if should_include_coin(c) ] # deciding the output structure if flat_list: output = sum(coins_dict.values(), []) else: output = coins_dict # dump the data - to stdout or to a file with outfile: indent = 4 if pretty else None json.dump(output, outfile, indent=indent, sort_keys=True) outfile.write("\n")