def print_tr_claims(claims, claim_id=False, sanitize=False, file=None, fdate=None, sep=";"): """Print generic claims, particularly trending or searched claims.""" n_claims = len(claims) out = [] for num, claim in enumerate(claims, start=1): vtype = claim["value_type"] if "stream_type" in claim["value"]: stream_type = claim["value"].get("stream_type") else: stream_type = 8 * "_" if "source" in claim["value"]: mtype = claim["value"]["source"].get("media_type", 14 * "_") else: mtype = 14 * "_" if "signing_channel" in claim: channel = claim["signing_channel"].get("name", 14 * "_") if sanitize: channel = funcs.sanitize_text(channel) else: channel = 14 * "_" name = claim["name"] if sanitize: name = funcs.sanitize_text(claim["name"]) line = f"{num:2d}/{n_claims:2d}" + f"{sep} " if claim_id: line += claim["claim_id"] + f"{sep} " line += f"{vtype:9s}" + f"{sep} " line += f"{stream_type:9s}" + f"{sep} " line += f"{mtype:17s}" + f"{sep} " line += f"{channel:40s}" + f"{sep} " line += f'"{name}"' out.append(line) content = funcs.print_content(out, file=file, fdate=fdate) return content
def print_ch_subs_latest(ch_latest_claims, claim_id=False, typ=True, title=False, sanitize=False, start=1, end=0, file=None, fdate=False, sep=";"): """Print a summary of the channels with their latest published claims.""" if not ch_latest_claims or len(ch_latest_claims) < 0: return False n_channels = len(ch_latest_claims) out = [] for num, result in enumerate(ch_latest_claims, start=1): if num < start: continue if end != 0 and num > end: break channel = result["channel"] cid = result["claim_id"] claims = result["claims"] out.append(f"Channel {num}/{n_channels}, {channel}, {cid}") if not claims: out.append(" - Invalid channel (removed?)") if num < n_channels: out.append("") continue n_claims = len(claims) for k, claim in enumerate(claims, start=1): source_info = claim["value"] r_time = int(source_info.get("release_time", 0)) r_time = time.strftime("%Y-%m-%d_%H:%M:%S%z", time.localtime(r_time)) vtype = claim["value_type"] if "stream_type" in source_info: stream_type = source_info.get("stream_type") else: stream_type = 8 * "_" size = 0 if "source" in source_info: size = int(source_info["source"].get("size", 0)) seconds = 0 if "video" in source_info: seconds = source_info["video"].get("duration", 0) elif "audio" in source_info: seconds = source_info["audio"].get("duration", 0) mi = seconds // 60 sec = seconds % 60 duration = f"{mi:3d}:{sec:02d}" size_mb = size / (1024**2) c_name = claim["name"] if title and "title" in source_info: c_name = source_info["title"] if sanitize: c_name = funcs.sanitize_text(c_name) line = f" {k:2d}/{n_claims:2d}" + f"{sep} " line += r_time + f"{sep} " if claim_id: line += '"' + claim["claim_id"] + '"' + f"{sep} " if typ: line += f"{vtype:10s}" + f"{sep} " line += f"{stream_type:9s}" + f"{sep} " line += f"{duration}" + f"{sep} " line += f"{size_mb:9.4f} MB" + f"{sep} " line += '"' + c_name + '"' out.append(line) if num < n_channels: out.append("") print() funcs.print_content(out, file=file, fdate=fdate)
def print_r_comments(comments, sub_replies=True, full=False, indent=0, sanitize=False, fd=None): """Print the comments included in the comment list. This function calls itself recursively in order to get the replies from the inspected comments. Parameters ---------- comments: list of dict Each dict is a comment that may have the `'sub_replies'` key with the replies to this comment. sub_replies: bool, optional It defaults to `True`, in which case it will recursively print the replies under each comment, if the `'sub_replies'` key is found in the comment. If it is `False` only the root level comments (1st level) will be printed. full: bool, optional It defaults to `False`, in which case only 80 characters of the first line of the comment will be printed. If it is `True` it will print the full comment, which may be as big as 2000 characters. indent: int, optional It defaults to 0, which indicates that the comment will be printed with no indentation. As this function is called recursively, it will print each level with more indentation (2, 4, 6, etc.). sanitize: bool, optional It defaults to `False`, in which case it will not remove the emojis from the comments. If it is `True` it will remove these unicode characters. This option requires the `emoji` package to be installed. fd: io.StringIO, optional It defaults to `None`, in which case the output will be printed to the terminal. If it is present, it is an object created by `open()` ready to be used for writting text. After calling this function, we must `fd.close()` to close the object. """ n_base = len(comments) indentation = indent * " " for num, comment in enumerate(comments, start=1): ch = comment.get("channel_url", "lbry://_Unknown_#000") ch = ch.lstrip("lbry://").split("#") ch_name = ch[0] + "#" + ch[1][0:3] comm = comment["comment"] if sanitize: comm = funcs.sanitize_text(comm) if full: cmmnt = f'"{comm}"' else: comm = comm.splitlines() if len(comm) > 0: comm = comm[0] else: comm = "" if len(comm) > 80: cmmnt = f'"{comm:.80s}..."' else: cmmnt = f'"{comm}"' line = (f"{indentation}" + f"{num:2d}/{n_base:2d}; {ch_name:30s}; {cmmnt}") if fd: print(line, file=fd) else: print(line) if (sub_replies and "replies" in comment and "sub_replies" in comment and comment["sub_replies"]): print_r_comments(comment["sub_replies"], sub_replies=True, indent=indent+2, sanitize=sanitize, fd=fd)
def print_items(items=None, show="all", blocks=False, cid=True, blobs=True, size=True, typ=False, ch=False, ch_online=True, name=True, title=False, path=False, sanitize=False, start=1, end=0, channel=None, reverse=False, file=None, fdate=False, sep=";", server="http://localhost:5279"): """Print information on each claim in the given list of claims. Parameters ---------- items: list of dict List of items to print information about. Each item should be a dictionary filled with information from the standard output of the `lbrynet file list` command. show: str, optional It defaults to `'all'`, in which case it shows all items. If it is `'incomplete'` it will show claims that are missing blobs. If it is `'full'` it will show claims that have all blobs. If it is `'media'` it will show claims that have the media file (mp4, mp3, mkv, etc.). Normally only items that have all blobs also have a media file; however, if the claim is currently being downloaded a partial media file may be present. If it is `'missing'` it will show claims that don't have the media file, whether the full blobs are present or not. blocks: bool, optional It defaults to `False`, in which case it won't print the `height` block of the claims. If it is `True` it will print this value, which gives some idea of when the claim was registered in the blockchain. cid: bool, optional It defaults to `True`. Show the `'claim_id'` of the claim. It is a 40 character alphanumeric string. blobs: bool, optional It defaults to `True`. Show the number of blobs in the file, and how many are complete. size: bool, optional It defaults to `True`. Show the length of the stream in minutes and seconds, like `14:12`, when possible (audio and video), and also the size in mebibytes (MB). typ: bool, optional It defaults to `False`. Show the type of stream (video, audio, document, etc.). ch: bool, optional It defaults to `False`. Show the name of the channel that published the claim. This is slow if `ch_online=True`. ch_online: bool, optional It defaults to `True`, in which case it searches for the channel name by doing a reverse search of the item online. This makes the search slow. By setting it to `False` it will consider the channel name stored in the input dictionary itself, which will be faster but it won't be the full name of the channel. If no channel is found offline, then it will set a default value `'_None_'` just so it can be printed with no error. This parameter only has effect if `ch=True`, or if `channel` is used, as it internally sets `ch=True`. name: bool, optional It defaults to `True`. Show the name of the claim. title: bool, optional It defaults to `False`. Show the title of the claim. path: bool, optional It defaults to `False`. Show the full path of the saved media file. sanitize: bool, optional It defaults to `False`, in which case it will not remove the emojis from the name of the claim and channel. If it is `True` it will remove these unicode characters. This option requires the `emoji` package to be installed. start: int, optional It defaults to 1. Show claims starting from this index in the list of items. end: int, optional It defaults to 0. Show claims until and including this index in the list of items. If it is 0, it is the same as the last index in the list. channel: str, optional It defaults to `None`. It must be a channel's name, in which case it shows only the claims published by this channel. Using this parameter sets `ch=True`, and is slow because it needs to perform an additional search for the channel. reverse: bool, optional It defaults to `False`, in which case older items come first in the output list. If it is `True` newer claims are at the beginning of the list. file: str, optional It defaults to `None`. It must be a user writable path to which the summary will be written. Otherwise the summary will be printed to the terminal. fdate: bool, optional It defaults to `False`. If it is `True` it will add the date to the name of the summary file. sep: str, optional It defaults to `;`. It is the separator character between the data fields in the printed summary. Since the claim name can have commas, a semicolon `;` is used by default. server: str, optional It defaults to `'http://localhost:5279'`. This is the address of the `lbrynet` daemon, which should be running in your computer before using any `lbrynet` command. Normally, there is no need to change this parameter from its default value. Returns ------- bool It returns `True` if it printed the summary successfully. If there is any error it will return `False`. """ if not funcs.server_exists(server=server): return False if not items or not isinstance(items, (list, tuple)): print("No input item list. " "A list of items must be obtained from `lbrynet file list`.") print(f"items={items}, " f"show={show}, " f"blocks={blocks}, cid={cid}, blobs={blobs}, size={size}, " f"typ={typ}, ch={ch}, ch_online={ch_online}, " f"name={name}, title={title}, path={path}, " f"sanitize={sanitize}, reverse={reverse}, " f"start={start}, end={end}, channel={channel}, " f"file={file}, fdate={fdate}, sep={sep}") if file: print("No file written.") return False n_items = len(items) if reverse: items.reverse() if (not isinstance(show, str) or show not in ("all", "media", "missing", "incomplete", "full")): print(">>> Error: show can only be 'all', 'media', 'missing', " "'incomplete', or 'full'") print(f"show={show}") return False if channel: if not isinstance(channel, str): print(">>> Error: channel must be a string") return False ch = True if file and not isinstance(file, str): print("The file must be a string.") print(f"file={file}") return False out = [] for num, item in enumerate(items, start=1): if num < start: continue if end != 0 and num > end: break st_path = item["download_path"] st_blobs = item["blobs_completed"] st_blobs_in_stream = item["blobs_in_stream"] # st_completed = item["completed"] # Skip printing an item depending on the value of `show`, # and whether the blobs or media files exist or not if show in "media" and not st_path: continue elif show in "missing" and st_path: continue elif show in "incomplete" and st_blobs == st_blobs_in_stream: continue elif show in "full" and st_blobs < st_blobs_in_stream: continue meta = item["metadata"] st_height = item["height"] st_time = int(meta["release_time"]) st_time = time.strftime("%Y%m%d_%H:%M:%S%z", time.localtime(st_time)) st_claim_id = item["claim_id"] st_type = meta.get("stream_type", 8 * "_") st_claim_name = item["claim_name"] st_title = meta["title"] length_s = 0 if ("video" in meta and "duration" in meta["video"]): length_s = meta["video"]["duration"] if ("audio" in meta and "duration" in meta["audio"]): length_s = meta["audio"]["duration"] rem_s = length_s % 60 rem_min = length_s // 60 st_size = 0 if ("source" in meta and "size" in meta["source"]): st_size = float(meta["source"]["size"]) st_size = st_size / (1024**2) # to MB if ch: if ch_online: # Searching online is slower but it gets the full channel name st_channel = srch_ch.find_channel(cid=item["claim_id"], full=True, server=server) if not st_channel: print(st_claim_name) print() continue else: # Searching offline is necessary for "invalid" claims # that no longer exist as active claims online. # We don't want to skip this item so we force a channel name. st_channel = item["channel_name"] if not st_channel: st_channel = "_Unknown_" # Skip if the item is not published by the specified channel if channel and channel not in st_channel: continue if sanitize: st_channel = funcs.sanitize_text(st_channel) if sanitize: st_claim_name = funcs.sanitize_text(st_claim_name) st_title = funcs.sanitize_text(st_title) line = f"{num:4d}/{n_items:4d}" if blocks: line += f"{sep} " + f"{st_height:8d}" line += f"{sep} " + f"{st_time}" if cid: line += f"{sep} " + f"{st_claim_id}" if blobs: line += f"{sep} " + f"{st_blobs:3d}/{st_blobs_in_stream:3d}" if size: line += f"{sep} " + f"{rem_min:3d}:{rem_s:02d}" line += f"{sep} " + f"{st_size:9.4f} MB" if typ: line += f"{sep} " + f"{st_type:9s}" if st_path: line += f"{sep} " + "media " else: line += f"{sep} " + "no-media" if ch: line += f"{sep} " + f"{st_channel}" if name: line += f"{sep} " + f'"{st_claim_name}"' if title: line += f"{sep} " + f'"{st_title}"' if path: line += f"{sep} " + f'"{st_path}"' out.append(line) print(f"Number of shown items: {len(out)}") funcs.print_content(out, file=file, fdate=fdate) return True
def print_sch_claims(claims, blocks=False, claim_id=False, typ=False, ch_name=False, title=False, sanitize=False, start=1, end=0, reverse=False, file=None, fdate=None, sep=";"): """Print the provided list of claims, particularly those from a channel.""" n_claims = len(claims) if reverse: claims.reverse() out = [] for num, claim in enumerate(claims, start=1): if num < start: continue if end != 0 and num > end: break creation = claim["meta"]["creation_height"] height = claim["height"] res_time = int(claim["value"].get("release_time", 0)) res_time = time.strftime("%Y-%m-%d_%H:%M:%S%z", time.localtime(res_time)) vtype = claim["value_type"] if "stream_type" in claim["value"]: stream_type = claim["value"].get("stream_type") else: stream_type = 8 * "_" if "source" in claim["value"]: mtype = claim["value"]["source"].get("media_type", 14 * "_") else: mtype = 14 * "_" if "signing_channel" in claim: # channel = claim["signing_channel"].get("name", 14 * "_") channel = claim["signing_channel"]["canonical_url"] channel = channel.lstrip("lbry://") if sanitize: channel = funcs.sanitize_text(channel) else: channel = 14 * "_" if sanitize: channel = funcs.sanitize_text(channel) name = claim["name"] if title and "title" in claim["value"]: name = claim["value"]["title"] if sanitize: name = funcs.sanitize_text(name) length_s = 0 rem_s = 0 rem_min = 0 if "video" in claim["value"] and "duration" in claim["value"]["video"]: length_s = claim["value"]["video"]["duration"] if "audio" in claim["value"] and "duration" in claim["value"]["audio"]: length_s = claim["value"]["audio"]["duration"] rem_s = length_s % 60 rem_min = length_s // 60 size = 0 if "source" in claim["value"] and "size" in claim["value"]["source"]: size = float(claim["value"]["source"]["size"]) size = size / (1024**2) # to MB line = f"{num:4d}/{n_claims:4d}" + f"{sep} " if blocks: line += f"{creation:8d}" + f"{sep}" line += f"{height:8d}" + f"{sep} " line += res_time + f"{sep} " if claim_id: line += claim["claim_id"] + f"{sep} " if typ: line += f"{vtype:10s}" + f"{sep} " line += f"{stream_type:9s}" + f"{sep} " line += f"{mtype:17s}" + f"{sep} " if ch_name: line += f"{channel}" + f"{sep} " line += f"{rem_min:3d}:{rem_s:02d}" + f"{sep} " line += f"{size:9.4f} MB" + f"{sep} " line += f'"{name}"' out.append(line) content = funcs.print_content(out, file=file, fdate=fdate) return content
def print_claims(ch_claims, updates=False, claim_id=False, addresses=False, typ=False, amounts=True, ch_name=False, title=False, sanitize=False, file=None, fdate=False, sep=";"): """Print the list of channels and claims.""" n_chs = len(ch_claims) out = [] t_n_claims = 0 t_size = 0 t_duration = 0 t_n_an_claims = 0 t_an_size = 0 t_an_duration = 0 anon_exists = False is_anon = False for ch_claim in ch_claims: if ch_claim["name"] in "_Unknown_": anon_exists = True n_chs = n_chs - 1 for n_ch, ch_claim in enumerate(ch_claims, start=1): chan_name = ch_claim["name"] if chan_name in "_Unknown_": is_anon = True if sanitize: chan_name = funcs.sanitize_text(chan_name) chan_name = '"' + chan_name + '"' chan_id = ch_claim["id"] chan_add = ch_claim["address"] chan_size = ch_claim["size"] chan_duration = ch_claim["duration"] claims = ch_claim["claims"] if is_anon: t_n_an_claims += len(claims) t_an_size += chan_size t_an_duration += chan_duration else: t_n_claims += len(claims) t_size += chan_size t_duration += chan_duration GB = chan_size / (1024**3) # to GiB hrs = chan_duration / 3600 days = hrs / 24 hr = chan_duration // 3600 mi = (chan_duration % 3600) // 60 sec = (chan_duration % 3600) % 60 if is_anon: line = "" else: line = f"{n_ch:2d}/{n_chs:2d}" + f"{sep} " line += f"{chan_name}" + f"{sep} " line += f"{chan_id}" + f"{sep} " line += f"{chan_add}" + f"{sep} " line += f"{GB:.4f} GiB" + f"{sep} " line += f"{hr} h {mi} min {sec} s, or {days:.4f} days" out.append(line) out = print_s_claims(claims, output=out, updates=updates, claim_id=claim_id, addresses=addresses, typ=typ, amounts=amounts, ch_name=ch_name, title=title, sanitize=sanitize, sep=sep) if not is_anon: if n_ch < n_chs or anon_exists: out.append("") funcs.print_content(out, file=file, fdate=fdate)
def print_s_claims(claims, output=None, updates=False, claim_id=False, addresses=False, typ=False, amounts=True, ch_name=False, title=False, sanitize=False, start=1, end=0, reverse=False, sep=";"): """Prepare output list in order to print the claims.""" if not output: output = [] if reverse: claims.reverse() n_claims = len(claims) if n_claims < 1: output.append(" No claims") for num, claim in enumerate(claims, start=1): if num < start: continue if end != 0 and num > end: break meta = claim["meta"] value = claim["value"] cid = claim["claim_id"] ad = claim["address"] name = claim["name"] if title and "title" in value: name = value["title"] if sanitize: name = funcs.sanitize_text(name) name = '"' + name + '"' claim_op = claim["claim_op"] tstamp = claim["timestamp"] timestamp = time.strftime("%Y-%m-%d_%H:%M:%S%z", time.localtime(tstamp)) vtype = claim["value_type"] if "stream_type" in value: stream_type = value.get("stream_type", 8 * "_") else: stream_type = 8 * "_" if "source" in value: mtype = value["source"].get("media_type", 14 * "_") else: mtype = 14 * "_" if "signing_channel" in claim: if "canonical_url" in claim["signing_channel"]: channel = claim["signing_channel"]["canonical_url"] channel = channel.lstrip("lbry://") else: channel = claim["signing_channel"]["permanent_url"] _ch, _id = channel.split("#") _ch = _ch.lstrip("lbry://") channel = _ch + "#" + _id[0:3] if sanitize: channel = funcs.sanitize_text(channel) else: channel = 14 * "_" if sanitize: channel = funcs.sanitize_text(channel) length_s = 0 rem_s = 0 rem_min = 0 if "video" in value and "duration" in value["video"]: length_s = value["video"]["duration"] if "audio" in value and "duration" in value["audio"]: length_s = value["audio"]["duration"] rem_s = length_s % 60 rem_min = length_s // 60 size = 0 if "source" in value and "size" in value["source"]: size = float(value["source"]["size"]) size = size / (1024**2) # to MB amount = float(claim["amount"]) t_amount = float(meta.get("effective_amount", 0)) rep = meta.get("reposted", 0) # creation = meta.get("creation_height", 0) # height = claim["height"] rels_time = int(value.get("release_time", 0)) rels_time = time.strftime("%Y-%m-%d_%H:%M:%S%z", time.localtime(rels_time)) line = f"{num:4d}/{n_claims:4d}" + f"{sep} " line += rels_time + f"{sep} " if updates: line += f"{claim_op}" + f"{sep} " line += f"{timestamp}" + f"{sep} " if claim_id: line += f"{cid}" + f"{sep} " if addresses: line += f"{ad}" + f"{sep} " if typ: line += f"{vtype:10s}" + f"{sep} " line += f"{stream_type:9s}" + f"{sep} " line += f"{mtype:17s}" + f"{sep} " if amounts: line += f"{amount:14.8f}" + f"{sep} " line += f"{t_amount:14.8f}" + f"{sep} " if ch_name: line += f"{channel}" + f"{sep} " line += f"r.{rep:3d}" + f"{sep} " line += f"{rem_min:3d}:{rem_s:02d}" + f"{sep} " line += f"{size:9.4f} MB" + f"{sep} " line += f"{name}" output.append(line) return output
def print_channels(channels, updates=False, claim_id=False, addresses=True, accounts=False, amounts=True, sanitize=False, file=None, fdate=False, sep=";"): """Print the list of channels obtained from get_channels.""" n_channels = len(channels) out = [] for num, ch in enumerate(channels, start=1): meta = ch["meta"] value = ch["value"] cid = ch["claim_id"] address = ch["address"] # name = ch["name"] name = ch["canonical_url"].split("lbry://")[1] if sanitize: name = funcs.sanitize_text(name) name = '"' + name + '"' timestamp = ch["timestamp"] timestamp = time.strftime("%Y-%m-%d_%H:%M:%S%z", time.localtime(timestamp)) title = value.get("title", 10 * "_") if sanitize: title = funcs.sanitize_text(title) title = '"' + title + '"' claim_op = ch["claim_op"] amount = float(ch["amount"]) c_timestamp = meta.get("creation_timestamp", 0) c_timestamp = time.strftime("%Y-%m-%d_%H:%M:%S%z", time.localtime(c_timestamp)) if "error" in meta: c_timestamp = 24 * "_" n_claims = meta.get("claims_in_channel", 0) e_amount = float(meta.get("effective_amount", 0)) ch_acc = ch["account"] line = f"{num:2d}/{n_channels:2d}" + f"{sep} " line += f"{c_timestamp}" + f"{sep} " if updates: line += f"{claim_op}" + f"{sep} " line += f"{timestamp}" + f"{sep} " if claim_id: line += f"{cid}" + f"{sep} " if addresses: line += f"{address}" + f"{sep} " if accounts: line += f"in {ch_acc}" + f"{sep} " if amounts: line += f"{amount:14.8f}" + f"{sep} " line += f"{e_amount:14.8f}" + f"{sep} " line += f"c.{n_claims:4d}" + f"{sep} " line += f"{name:48s}" + f"{sep} " line += f"{title}" out.append(line) funcs.print_content(out, file=file, fdate=fdate)
def print_p_lines(peers_info, cid=False, typ=True, title=False, sanitize=False, file=None, fdate=False, sep=";"): """Print a summary for each claim of the peer search.""" n_claims = peers_info["n_claims"] streams_info = peers_info["streams_info"] out = [] for num, info in enumerate(streams_info, start=1): stream = info["stream"] peers = info["peers"] size = info["size"] seconds = info["duration"] local_node = info["local_node"] local_node = f"{local_node}" name = stream["name"] rels_time = int(stream["value"].get("release_time", 0)) rels_time = time.strftime("%Y-%m-%d_%H:%M:%S%z", time.localtime(rels_time)) if title and "title" in stream["value"]: name = stream["value"]["title"] if sanitize: name = funcs.sanitize_text(name) vtype = stream["value_type"] if "stream_type" in stream["value"]: stream_type = stream["value"].get("stream_type") else: stream_type = 8 * "_" claim_id = stream["claim_id"] n_peers = len(peers) name = f'"{name}"' mi = seconds // 60 sec = seconds % 60 duration = f"{mi:3d}:{sec:02d}" size_mb = size / (1024**2) line = f"{num:4d}/{n_claims:4d}" + f"{sep} " line += rels_time + f"{sep} " if cid: line += f"{claim_id}" + f"{sep} " if typ: line += f"{vtype:10s}" + f"{sep} " line += f"{stream_type:9s}" + f"{sep} " line += f"{duration}" + f"{sep} " line += f"{size_mb:9.4f} MB" + f"{sep} " line += f"peers: {n_peers:2d}" + f"{sep} " line += f"hosted: {local_node:5s}" + f"{sep} " line += f"{name}" out.append(line) funcs.print_content(out, file=file, fdate=fdate)