def download_collection(collection, max_claims=2, reverse=False, ddir=None, save_file=True, server="http://localhost:5279"): """Internal function to download the claims inside a collection.""" claims = collection["value"]["claims"] n_claims = len(claims) if not max_claims: max_claims = int(n_claims) if reverse: claims.reverse() info_get = [] print() print("Collection") print(80 * "-") for num, cid in enumerate(claims, start=1): s = srch.search_item(cid=cid, offline=False, server=server) if num > max_claims: break print(f"Claim {num}/{n_claims}") prnt.print_info_pre_get(s, offline=False) info = lbrynet_get(uri=s["canonical_url"], ddir=ddir, save_file=save_file, server=server) info_get.append(info) if info: prnt.print_info_post_get(info) else: print(">>> Empty information from `lbrynet get`") print() return info_get
def count_blobs(uri=None, cid=None, name=None, blobfiles=None, print_msg=True, print_each=True, server="http://*****:*****@MyChannel#3/some-video-name#2' uri = '@MyChannel#3/some-video-name#2' uri = 'some-video-name' The URI is also called the `'canonical_url'` of the claim. cid: str, optional A `'claim_id'` for a claim on the LBRY network. It is a 40 character alphanumeric string. name: str, optional A name of a claim on the LBRY network. It is normally the last part of a full URI. :: uri = 'lbry://@MyChannel#3/some-video-name#2' name = 'some-video-name' blobfiles: str, optional It defaults to `'$HOME/.local/share/lbry/lbrynet/blobfiles'`. The path to the directory where the blobs were downloaded. This is normally seen with `lbrynet settings get`, under `'data_dir'`. It can be any other directory if it is symbolically linked to it, such as `'/opt/lbryblobfiles'` print_msg: bool, optional It defaults to `True`, in which case it will print information on the found claim. If `print_msg=False`, it also implies `print_each=False`. print_each: bool, optional It defaults to `True`, in which case it will print all blobs that belong to the claim, and whether each of them is already in `blobfiles`. server: str, optional It defaults to `'http://*****:*****@_Unknown_" sd_hash = item["value"]["source"]["sd_hash"] if print_msg: print(f"canonical_url: {c_uri}") print(f"claim_id: {c_cid}") print(f"name: {c_name}") print(f"channel: {c_channel}") print(f"sd_hash: {sd_hash}") sd_hash_f = os.path.join(blobfiles, sd_hash) # if not os.path.exists(sd_hash_f) or sd_hash not in list_all_blobs: if not os.path.exists(sd_hash_f): print(f">>> 'sd_hash' blob not in directory: {blobfiles}") print(">>> Start downloading the claim, or redownload it.") return { "error_no_sd_hash": "'sd_hash' blob not in directory " f"{blobfiles}", "canonical_url": c_uri, "claim_id": c_cid, "name": c_name, "channel": c_channel, "sd_hash": sd_hash } fd = open(sd_hash_f) lines = fd.readlines() fd.close() blobs = json.loads(lines[0]) n_blobs = len(blobs["blobs"]) - 1 if print_msg: print(f"Total blobs: {n_blobs}") present_list = [] blob_list = [] blob_missing = [] for blob in blobs["blobs"]: if "blob_hash" not in blob: continue num = blob["blob_num"] blob_hash = blob["blob_hash"] present = os.path.exists(os.path.join(blobfiles, blob_hash)) present_list.append(present) blob_list.append([num, blob_hash, present]) if not present: blob_missing.append([num, blob_hash, present]) if print_msg and print_each: print("{:3d}/{:3d}, {}, {}".format(num, n_blobs, blob_hash, present)) all_present = all(present_list) if print_msg: print(f"All blob files present: {all_present}") blob_info = { "canonical_url": c_uri, "claim_id": c_cid, "name": c_name, "channel": c_channel, "sd_hash": sd_hash, "all_present": all_present, "blobs": blob_list, "missing": blob_missing } return blob_info
def get_all_supports(server="http://localhost:5279"): """Get all supports in a dictionary; all, valid, and invalid. Returns ------- dict A dictionary with information on the supports. The keys are the following: - 'all_supports': list with dictionaries of all supports. - 'all_resolved': list with dictionaries of all resolved claims corresponding to all supports. Invalid claims will simply be `False`. - 'valid_supports': list with dictionaries of supports for valid claims only. - 'valid_resolved': list with dictionaries of resolved claims corresponding to `'valid_supports'` only. - 'invalid_supports': list with dictionaries of supports for invalid claims. The claim IDs in these dictionaries cannot be resolved anymore. False If there is a problem or no list of supports, it will return `False`. """ if not funcs.server_exists(server=server): return False msg = {"method": "support_list", "params": {"page_size": 99000}} output = requests.post(server, json=msg).json() if "error" in output: return False items = output["result"]["items"] n_items = len(items) if n_items < 1: print(f"Supports found: {n_items}") return False valid = [] valid_resolved = [] invalid = [] all_supports = [] all_resolved = [] for item in items: s = srch.search_item(cid=item["claim_id"]) if not s: invalid.append(item) else: valid.append(item) valid_resolved.append(s) all_supports.append(item) all_resolved.append(s) return { "all_supports": all_supports, "all_resolved": all_resolved, "valid_supports": valid, "valid_resolved": valid_resolved, "invalid_supports": invalid }
def get_base_support(uri=None, cid=None, name=None, server="http://localhost:5279"): """Get the existing, base, and our support from a claim. Returns ------- dict A dictionary with information on the support on a claim. The keys are the following: - 'canonical_url' - 'claim_id' - 'existing_support': total support that the claim has; this is `'base_support'` + `'old_support'`. - 'base_support': support that the claim has without our support. - 'old_support': support that we have added to this claim; it may be zero if this claim does not have any support from us. False If there is a problem or no list of supports, it will return `False`. """ if not funcs.server_exists(server=server): return False item = srch.search_item(uri=uri, cid=cid, name=name, offline=False, server=server) if not item: return False uri = item["canonical_url"] cid = item["claim_id"] existing = float(item["amount"]) + float(item["meta"]["support_amount"]) msg = {"method": "support_list", "params": {"claim_id": item["claim_id"]}} output = requests.post(server, json=msg).json() if "error" in output: return False supported_items = output["result"]["items"] old_support = 0 if not supported_items: # Old support remains 0 pass else: for su_item in supported_items: old_support += float(su_item["amount"]) base_support = existing - old_support return { "canonical_url": uri, "claim_id": cid, "existing_support": existing, "base_support": base_support, "old_support": old_support }
def validate_ch(channel, server="http://localhost:5279"): """Add 'valid' key to the channel dictionary.""" channel["valid"] = srch.search_item(uri=channel["uri"], print_error=False, server=server) return channel
def create_comment(comment=None, uri=None, cid=None, name=None, parent_id=None, author_uri=None, author_cid=None, author_name=None, wallet_id="default_wallet", comm_server="https://comments.odysee.com/api/v2", server="http://*****:*****@MyChannel'` - 'channel_url': string indicating the signing channel; it includes the `channel_name` and then the `channel_id` `'lbry://@MyChannel#44660089c9cb227d65c403a4328606173042cadc'` - 'currency': string indicating the currency, for example, 'LBC' - 'support_amount': integer value indicating whether the comment has a support - 'is_hidden': boolean value indicating whether the comment is hidden - 'is_pinned': boolean value indicating whether the comment is pinned - 'is_fiat': boolean value indicating whether fiat currency was sent False If there is a problem, such as non-existing `wallet_id`, or empty `comment`, or invalid claim, or invalid channel, it will return `False`. Signed data ----------- The comment server requires various parameters. :: { "method": "comment.Create", "id": 1, "jsonrpc": "2.0", "params": { "channel_id": "90abc0b66ff34a1378581751958f5b98f9043d17", "channel_name": "@some-channel", "claim_id": "4ba7ec34033a42c76468cdfc463943e5de7e364a", "parent_id": "", # Optional, for replies "comment": "some test comment", "signature": signature, "signing_ts": signing_timestamp } } The `signature` and `signing_timestamp` are obtained from signing the hexadecimal representation of the comment. These values are provided by `sign_comment`. """ print("Create comment") print(80 * "-") if not comment: print(">>> Empty comment.") return False comment = comment.strip() if not comment: print(">>> Empty comment.") return False item = srch.search_item(uri=uri, cid=cid, name=name, server=server) if not item: return False claim_id = item["claim_id"] uri = item["canonical_url"] title = item["value"].get("title", "(None)") cl_time = 0 if "release_time" in item["value"]: cl_time = int(item["value"]["release_time"]) cl_time = time.strftime("%Y-%m-%d_%H:%M:%S%z %A", time.localtime(cl_time)) ch = srch.search_item(uri=author_uri, cid=author_cid, name=author_name, server=server) if not ch: return False print(f"canonical_url: {uri}") print(f"claim_id: {claim_id}") print(f"release_time: {cl_time}") print(f"title: {title}") print("comment author:", ch["name"]) print("comment author ID:", ch["claim_id"]) print(f"comment server: {comm_server}") print(40 * "-") sign = sign_comment(comment, ch["name"], wallet_id=wallet_id, server=server) if not sign: print(">>> Unable to sign; " "we must have the private keys of this channel " "for this operation to succeed.") return False params = {"comment": comment, "claim_id": claim_id, "parent_id": parent_id, "channel_id": ch["claim_id"], "channel_name": ch["name"], "signature": sign["signature"], "signing_ts": sign["signing_ts"]} output = jsonrpc_post(comm_server, "comment.Create", params) if "error" in output: print(">>> Error:", output["error"].get("message", None)) return False result = output["result"] print_cmnt_result(result, file=None, fdate=False) return result
def list_comments(uri=None, cid=None, name=None, sub_replies=True, hidden=False, visible=False, full=False, sanitize=False, file=None, fdate=False, page=1, page_size=999, comm_server="https://comments.odysee.com/api/v2", server="http://localhost:5279"): """List comments for a specific claim on a comment server. Parameters ---------- uri, cid, name: str A unified resource identifier (URI), a `'claim_id'`, or a `'claim_name'` for a claim on the LBRY network. sub_replies: bool, optional It defaults to `True`, in which case it will print the replies (2nd, 3rd, 4th,... levels). If it is `False` it will only print the root level comments (1st level). hidden: bool, optional It defaults to `False`. If it is `True` it will only show the hidden comments. visible: bool, optional It defaults to `False`. If it is `True` it will only show the visible comments. full: bool, optional It defaults to `False`, in which case only 80 characters of the first line of the comment will be printed. If it is `True` it will print the full comment, which may be as big as 2000 characters. sanitize: bool, optional It defaults to `False`, in which case it will not remove the emojis from the comments. If it is `True` it will remove these unicode characters. This option requires the `emoji` package to be installed. file: str, optional It defaults to `None`. It must be a writable path to which the summary will be written. Otherwise the summary will be printed to the terminal. fdate: bool, optional It defaults to `False`. If it is `True` it will add the date to the name of the summary file. comm_server: str, optional It defaults to `'https://comments.odysee.com/api/v2'` It is the address of the comment server. server: str, optional It defaults to `'http://localhost:5279'`. This is the address of the local `lbrynet` daemon. Returns ------- dict It has three keys. - `'root_comments'`: a list of dict, where each dictionary represents a comment at the root level (1st level). Each dictionary representing a comment has the text of the comment under the `'comment'` key. The `'sub_replies'` key has a list of the replies to that comment. :: output['root_comments'][0]['sub_replies'] -> [{..., ...}] If the comment has no replies, the `'sub_replies'` value is an empty list. :: output['root_comments'][0]['sub_replies'] -> [] - `'replies'`: a list of dict, where each dictionary represents a reply to any comment. These replies are not ordered, so they correspond to comments at any level except at the root level (1st level). - `'levels'`: a dictionary with `n` keys, where each key is an integer starting from `1`, and up to `n`. Each key represents a comment level, so `1` is for the root level comments, `2` is for the replies to the root level comments, `3` is for the replies to 2nd level comments, etc. Each value in the dictionary is a list of dict with the comments at that level. The first level list is the same as `'root_comments'`. :: output['root_comments'] == output['levels'][0] output['root_comments'][5] == output['levels'][0][5] False If there is a problem, like a non-existing item, it will return `False`. """ item = srch.search_item(uri=uri, cid=cid, name=name, server=server) if not item: return False claim_id = item["claim_id"] uri = item["canonical_url"] title = item["value"].get("title", "(None)") cl_time = 0 if "release_time" in item["value"]: cl_time = int(item["value"]["release_time"]) cl_time = time.strftime("%Y-%m-%d_%H:%M:%S%z %A", time.localtime(cl_time)) # Only one of them is True if hidden ^ visible: params = {"claim_id": claim_id, "visible": visible, "hidden": hidden, "page": page, "page_size": page_size} else: params = {"claim_id": claim_id, "page": page, "page_size": page_size, "top_level": False, "sort_by": 3} output = jsonrpc_post(comm_server, "comment.List", params) if "error" in output: return False if output["result"]["total_items"] < 1: items = [] else: items = output["result"]["items"] root_comments = [] all_replies = [] for comment in items: if "parent_id" in comment: all_replies.append(comment) else: root_comments.append(comment) n_comms = len(items) n_base = len(root_comments) n_replies = len(all_replies) print(f"canonical_url: {uri}") print(f"claim_id: {claim_id}") print(f"release_time: {cl_time}") print(f"title: {title}") print(f"comment server: {comm_server}") print(80 * "-") print(f"Total comments: {n_comms}") print(f"Total base comments: {n_base}") print(f"Total replies: {n_replies}") n = 1 lvl_comments = {n: augment_replies(root_comments)} while True: replies_sub = find_replies(all_replies, lvl_comments[n]) n_lvl = len(replies_sub) if n_lvl: n += 1 lvl_comments[n] = replies_sub print(f" - Level {n} replies: {n_lvl}") else: break indices = list(range(2, len(lvl_comments) + 1)) indices.reverse() for n in indices: for rep in lvl_comments[n]: for base in lvl_comments[n-1]: if rep["parent_id"] == base["comment_id"]: base["sub_replies"].append(rep) print_f_comments(root_comments, sub_replies=sub_replies, full=full, sanitize=sanitize, file=file, fdate=fdate) return {"root_comments": root_comments, "replies": all_replies, "levels": lvl_comments}
def find_channel(uri=None, cid=None, name=None, full=True, canonical=False, offline=False, server="http://*****:*****@MyChannel#3/some-video-name#2' uri = '@MyChannel#3/some-video-name#2' uri = 'some-video-name' The URI is also called the `'canonical_url'` of the claim. cid: str, optional A `'claim_id'` for a claim on the LBRY network. It is a 40 character alphanumeric string. name: str, optional A name of a claim on the LBRY network. It is normally the last part of a full URI. :: uri = 'lbry://@MyChannel#3/some-video-name#2' name = 'some-video-name' full: bool, optional It defaults to `True`, in which case the returned name includes the digits after `'#'` or `':'` that uniquely identify that channel in the network. If it is `False` it will return just the base name. This parameter only works when `canonical=False`. canonical: bool, optional It defaults to `False`. If it is `True`, the `'canonical_url'` of the channel is returned regardless of the value of `full`. offline: bool, optional It defaults to `False`, in which case it will try to resolve the channel name from the online database (blockchain). If it is `True` it will try to resolve the channel name from the offline database. This will be faster but may not find a name if the channel was not resolved when the claim was initially downloaded. server: str, optional It defaults to `'http://localhost:5279'`. This is the address of the `lbrynet` daemon, which should be running in your computer before using any `lbrynet` command. Normally, there is no need to change this parameter from its default value. Returns ------- str The name of the channel. If `full=False` it returns the common name :: @MyChannel If `full=True` it returns the unique name :: @MyChannel#3 If `canonical=True` it returns :: lbry://@MyChannel#3 False If there is a problem or non existing claim, it will return `False`. """ if not funcs.server_exists(server=server): return False if not (uri or cid or name): print("Find the channel's name from a claim's " "'URI', 'claim_id', or 'name'.") print(f"uri={uri}, cid={cid}, name={name}") return False item = srch.search_item(uri=uri, cid=cid, name=name, offline=offline, server=server) if not item: return False if offline: return item["channel_name"] if ("signing_channel" not in item or "canonical_url" not in item["signing_channel"]): name = "@_Unknown_" return name name = item["signing_channel"]["canonical_url"] if not canonical: name = name.lstrip("lbry://") if not full: name = name.split("#")[0] return name
def sort_invalid(channel=None, reverse=False, server="http://*****:*****@MyChannel#5'`, `'MyChannel#5'`, `'MyChannel'` If a simplified name is used, and there are various channels with the same name, the one with the highest LBC bid will be selected. Enter the full name to choose the right one. reverse: bool, optional It defaults to `False`, in which case older items come first in the output list. If it is `True` newer claims are at the beginning of the list. server: str, optional It defaults to `'http://localhost:5279'`. This is the address of the `lbrynet` daemon, which should be running in your computer before using any `lbrynet` command. Normally, there is no need to change this parameter from its default value. Returns ------- list of dict A list of dictionaries that represent 'invalid claims' that were previously downloaded fully or partially. Each dictionary is filled with information from the standard output of the `lbrynet file list` command, but filtered in such a way that it only includes claims which are no longer searchable online by `lbrynet resolve` or `lbrynet claim search`. The dictionaries are ordered by `'release_time'`, with older claims appearing first. Certain claims don't have `'release_time'` so for them we add this key, and use the value of `'timestamp'` for it. False If there is a problem it will return `False`. """ if not funcs.server_exists(server=server): return False items = sort_items(channel=channel, reverse=reverse, server=server) if not items: return False n_items = len(items) invalid_items = [] for it, item in enumerate(items, start=1): online_item = srch.search_item(cid=item["claim_id"], offline=False, print_error=False, server=server) if not online_item: if len(invalid_items) == 0: print() claim_id = item["claim_id"] claim_name = item["claim_name"] channel = item["channel_name"] print(f"Claim {it:4d}/{n_items:4d}, " f"{claim_id}, {channel}, {claim_name}") invalid_items.append(item) n_invalid = len(invalid_items) if n_invalid > 0: print(f"Invalid items found: {n_invalid} " "(possibly deleted from the network)") else: print(f"Invalid items found: {n_invalid}") return invalid_items
def download_invalid(cid=None, name=None, ddir=None, own_dir=True, server="http://*****:*****@MyChannel#3/some-video-name#2' name = 'some-video-name' ddir: str, optional It defaults to `$HOME`. The path to the download directory. own_dir: bool, optional It defaults to `True`, in which case it places the downloaded content inside a subdirectory named after the channel in `ddir`. server: str, optional It defaults to `'http://*****:*****@_Unknown_" subdir = os.path.join(ddir, channel) if own_dir: if not os.path.exists(subdir): try: os.mkdir(subdir) except (FileNotFoundError, PermissionError) as err: print(f"Cannot open directory for writing; {err}") return False ddir = subdir prnt.print_info_pre_get(item, offline=True) info_save = lbrynet_save(claim_id=claim_id, claim_name=claim_name, ddir=ddir, server=server) if not info_save: print(">>> Empty information from `lbrynet file save`") return False prnt.print_info_post_get(info_save) return info_save
def download_single(uri=None, cid=None, name=None, repost=True, invalid=False, collection=False, max_claims=2, reverse_collection=False, ddir=None, own_dir=True, save_file=True, server="http://*****:*****@MyChannel#3/some-video-name#2' uri = '@MyChannel#3/some-video-name#2' uri = 'some-video-name' The URI is also called the `'canonical_url'` of the claim. cid: str, optional A `'claim_id'` for a claim on the LBRY network. It is a 40 character alphanumeric string. name: str, optional A name of a claim on the LBRY network. It is normally the last part of a full URI. :: uri = 'lbry://@MyChannel#3/some-video-name#2' name = 'some-video-name' repost: bool, optional It defaults to `True`, in which case it will check if the claim is a repost, and if it is, it will download the original claim. If it is `False`, it won't check for a repost, it will simply return `False` because it won't be able to download a repost. invalid: bool, optional It defaults to `False`, in which case it will assume the claim is still valid in the online database. It will use `lbrynet claim search` to search `cid` or `name`. If it is `True` it will assume the claim is no longer valid, that is, that the claim has been removed from the online database and only exists locally. In this case, it will use `lbrynet file list` to resolve `cid` or `name`. This has no effect on `uri`, so if this input is used, it will always try to resolve it from the online database. collection: bool, optional It defaults to `False`, in which case it won't download items in a collection. If it is `True` it will expand the collection and try to download every single item. max_claims: int, optional It defaults to 2. It specifies the maximum number of items to download in a collection. If it is 0, it will consider all claims in the collection. reverse_collection: bool, optional It defaults to `False`, in which case the collection will be downloaded in the same order it is defined. If it is `True` it will reverse the items in the collection so the newest ones will be downloaded first. ddir: str, optional It defaults to `$HOME`. The path to the download directory. own_dir: bool, optional It defaults to `True`, in which case it places the downloaded content inside a subdirectory named after the channel in `ddir`. save_file: bool, optional It defaults to `True`, in which case all blobs of the stream will be downloaded, and the media file (mp4, mp3, mkv, etc.) will be placed in the downloaded directory. If it is `False` it will only download the blobs. server: str, optional It defaults to `'http://localhost:5279'`. This is the address of the `lbrynet` daemon, which should be running in your computer before using any `lbrynet` command. Normally, there is no need to change this parameter from its default value. Returns ------- dict Returns the dictionary that represents the standard output of the `lbrynet_get` function. False If there is a problem or non existing claim, it will return `False`. """ if not funcs.server_exists(server=server): return False if not (uri or cid or name): print("No input claim by 'URI', 'claim_id', or 'name'.") print(f"uri={uri}, cid={cid}, name={name}") return False if (not ddir or not isinstance(ddir, str) or ddir == "~" or not os.path.exists(ddir)): ddir = os.path.expanduser("~") print(f"Download directory should exist; set to ddir='{ddir}'") # Canonical URLs cannot be treated as 'invalid', they are resolved online if not uri and invalid: info_get = download_invalid(cid=cid, name=name, ddir=ddir, own_dir=own_dir, server=server) return info_get # It also checks if it's a reposted claim, and returns the original # claim in case it is. item = srch.search_item(uri=uri, cid=cid, name=name, offline=False, repost=repost, server=server) if not item: return False uri = item["canonical_url"] if "signing_channel" in item and "name" in item["signing_channel"]: # A bug (lbryio/lbry-sdk #3316) prevents # the `lbrynet file list --channel_name=@Channel` # command from finding the channel, therefore the channel must be # resolved with `lbrynet resolve` before it becomes known by other # functions. # # Both the short `@Name` and the canonical `@Name#7` are resolved. # The second form is necessary to get the exact channel, in case # it has the same base name as another channel. channel = item["signing_channel"]["name"] ch_full = item["signing_channel"]["canonical_url"].lstrip("lbry://") srch_ch.resolve_channel(channel=channel, server=server) srch_ch.resolve_channel(channel=ch_full, server=server) # Windows doesn't like # or : in the subdirectory; use a _ # channel = ch_full.replace("#", ":") channel = ch_full.replace("#", "_") else: channel = "@_Unknown_" subdir = os.path.join(ddir, channel) if own_dir: if not os.path.exists(subdir): try: os.mkdir(subdir) except (FileNotFoundError, PermissionError) as err: print(f"Cannot open directory for writing; {err}") return False ddir = subdir prnt.print_info_pre_get(item, offline=False) info_get = [] is_collection = False if item["value_type"] in "collection": is_collection = True if collection and is_collection: info_get = download_collection(item, max_claims=max_claims, reverse=reverse_collection, ddir=ddir, save_file=save_file, server=server) return info_get if not is_collection: info_get = lbrynet_get(uri=uri, ddir=ddir, save_file=save_file, server=server) if not info_get: if is_collection: print(">>> Collection items will not be downloaded " "without `collection=True` option") print(">>> Skip download.") else: print(">>> Empty information from `lbrynet get`") return False prnt.print_info_post_get(info_get) return info_get
def delete_single(uri=None, cid=None, name=None, invalid=False, what="media", server="http://*****:*****@MyChannel#3/some-video-name#2' uri = '@MyChannel#3/some-video-name#2' uri = 'some-video-name' The URI is also called the `'canonical_url'` of the claim. cid: str, optional A `'claim_id'` for a claim on the LBRY network. It is a 40 character alphanumeric string. name: str, optional A name of a claim on the LBRY network. It is normally the last part of a full URI. :: uri = 'lbry://@MyChannel#3/some-video-name#2' name = 'some-video-name' invalid: bool, optional It defaults to `False`, in which case it will assume the claim is still valid in the online database. It will use `lbrynet claim search` to search `cid` or `name`. If it is `True` it will assume the claim is no longer valid, that is, that the claim has been removed from the online database and only exists locally. In this case, it will use `lbrynet file list` to resolve `cid` or `name`. This has no effect on `uri`, so if this input is used, it will always try to resolve it from the online database. what: str, optional It defaults to `'media'`, in which case only the full media file (mp4, mp3, mkv, etc.) is deleted. If it is `'blobs'`, it will delete only the blobs. If it is `'both'`, it will delete both the media file and the blobs. server: str, optional It defaults to `'http://*****:*****@_Unknown_" # Searching offline is necessary to get the download path, # and blob information. item = srch.search_item(cid=claim_id, offline=True, server=server) if not item: print("No claim found locally, probably already deleted.") return True path = item["download_path"] blobs = int(item["blobs_completed"]) blobs_full = int(item["blobs_in_stream"]) if invalid: print(f"claim_name: {claim_name}") else: print(f"canonical_url: {claim_uri}") print(f"claim_id: {claim_id}") print(f"Blobs found: {blobs} of {blobs_full}") if what in "media": print(f"Remove media file: {path}") if path: os.remove(path) print("Media file deleted") else: print("No media found locally, probably already deleted.") return True status = lbrynet_del(claim_id, claim_name=claim_name, what=what, server=server) return status