Beispiel #1
0
def print_ch_summary(channels, file=None, fdate=False):
    """Print a summary paragraph of the channels."""
    t_n_claims = 0
    t_b_amount = 0
    t_e_amount = 0

    for ch in channels:
        n_claims = ch["meta"].get("claims_in_channel", 0)
        amount = float(ch["amount"])
        e_amount = float(ch["meta"].get("effective_amount", 0))

        t_n_claims += n_claims
        t_b_amount += amount
        t_e_amount += e_amount

    out = [
        40 * "-", f"Total claims in channels: {t_n_claims}",
        f"Total base stake on all channels: {t_b_amount:14.8f}",
        f"Total stake on all channels:      {t_e_amount:14.8f}"
    ]

    funcs.print_content(out, file=file, fdate=fdate)

    return {
        "n_claims": t_n_claims,
        "base_amount": t_b_amount,
        "total_amount": t_e_amount
    }
Beispiel #2
0
def print_cmnt_result(result, file=None, fdate=False):
    """Print the response of the comment server when successful."""
    cmt_time = result["timestamp"]
    cmt_time = time.strftime("%Y-%m-%d_%H:%M:%S%z %A",
                             time.localtime(cmt_time))

    sig_ts = int(result["signing_ts"])
    sig_ts = time.strftime("%Y-%m-%d_%H:%M:%S%z %A",
                           time.localtime(sig_ts))

    out = ["claim_id: " + result["claim_id"],
           "timestamp:  " + cmt_time,
           "signing_ts: " + sig_ts,
           "comment author: " + result["channel_name"],
           "comment author ID: " + result["channel_id"],
           "comment_id: " + result["comment_id"],
           "parent_id:  " + result.get("parent_id", "(None)"),
           "currency: " + result.get("currency", "(None)"),
           "support_amount: " + str(result.get("support_amount", 0)),
           "is_fiat: " + str(result.get("is_fiat", "")),
           "is_hidden: " + str(result.get("is_hidden", "")),
           "is_pinned: " + str(result.get("is_pinned", "")),
           "abandoned: " + str(result.get("abandoned", "")),
           "comment:",
           "'''",
           result["comment"],
           "'''"]

    funcs.print_content(out, file=file, fdate=fdate)
Beispiel #3
0
def print_ch_p_lines(ch_peers_info,
                     file=None, fdate=False, sep=";"):
    """Print a summary for each channel in a peer search."""
    if not ch_peers_info or len(ch_peers_info) < 0:
        return False

    n_channels = len(ch_peers_info)

    out = []

    for num, peers_info in enumerate(ch_peers_info, start=1):
        if not peers_info:
            line = f"{num:4d}/{n_channels:4d}" + f"{sep} "
            line += '"None"'
            out.append(line)
            continue

        channel = peers_info["channel"]
        channel = f'"{channel}"'
        n_streams = peers_info["n_streams"]
        total_size = peers_info["total_size"]
        total_seconds = peers_info["total_duration"]
        streams_with_hosts = peers_info["streams_with_hosts"]
        unique_nodes = peers_info["unique_nodes"]
        n_nodes = len(unique_nodes)

        if peers_info["local_node"]:
            n_nodes = f"{n_nodes:3d} + 1"
        else:
            n_nodes = f"{n_nodes:3d}"

        total_size_gb = total_size / (1024**3)
        hr = total_seconds // 3600
        mi = (total_seconds % 3600) // 60
        sec = (total_seconds % 3600) % 60
        duration = f"{hr:3d} h {mi:2d} min {sec:2d} s"

        peer_ratio = peers_info["peer_ratio"]
        hosting_coverage = peers_info["hosting_coverage"] * 100

        line = f"{num:4d}/{n_channels:4d}" + f"{sep} "
        line += f"{channel:42s}" + f"{sep} "
        line += f"streams: {streams_with_hosts:3d}/{n_streams:3d}" + f"{sep} "
        line += f"{total_size_gb:9.4f} GB" + f"{sep} "
        line += f"{duration}" + f"{sep} "
        line += f"peers/stream: {peer_ratio:7.4f}" + f"{sep} "
        line += f"coverage: {hosting_coverage:6.2f}%" + f"{sep} "
        line += f"unique peers: {n_nodes}"
        out.append(line)

    funcs.print_content(out, file=file, fdate=fdate)
Beispiel #4
0
def print_p_summary(peers_info, file=None, fdate=False):
    """Print a summary paragraph of the results from the peer search."""
    channel = peers_info["channel"]
    n_claims = peers_info["n_claims"]
    n_streams = peers_info["n_streams"]
    total_size = peers_info["total_size"]
    total_seconds = peers_info["total_duration"]
    streams_with_hosts = peers_info["streams_with_hosts"]
    total_peers = peers_info["total_peers"]
    n_nodes = len(peers_info["unique_nodes"])

    if peers_info["local_node"]:
        n_nodes = f"{n_nodes} + 1"

    peer_ratio = peers_info["peer_ratio"]
    hosting_coverage = peers_info["hosting_coverage"] * 100

    total_size_gb = total_size / (1024**3)
    days = (total_seconds / 3600) / 24
    hr = total_seconds // 3600
    mi = (total_seconds % 3600) // 60
    sec = (total_seconds % 3600) % 60
    duration = f"{hr} h {mi} min {sec} s, or {days:.4f} days"

    out_list = [
        f"Channel: {channel}", f"Claims searched: {n_claims}",
        f"Downloadable streams: {n_streams}",
        f"- Streams that have at least one host: {streams_with_hosts}",
        f"- Size of streams: {total_size_gb:.4f} GiB",
        f"- Duration of streams: {duration}", "",
        f"Total peers in all searched claims: {total_peers}",
        f"Total unique peers (nodes) hosting streams: {n_nodes}",
        f"Average number of peers per stream: {peer_ratio:.4f}",
        f"Hosting coverage: {hosting_coverage:.2f}%"
    ]

    funcs.print_content(out_list, file=file, fdate=fdate)
Beispiel #5
0
def print_tr_claims(claims,
                    claim_id=False,
                    sanitize=False,
                    file=None,
                    fdate=None,
                    sep=";"):
    """Print generic claims, particularly trending or searched claims."""
    n_claims = len(claims)

    out = []
    for num, claim in enumerate(claims, start=1):
        vtype = claim["value_type"]

        if "stream_type" in claim["value"]:
            stream_type = claim["value"].get("stream_type")
        else:
            stream_type = 8 * "_"

        if "source" in claim["value"]:
            mtype = claim["value"]["source"].get("media_type", 14 * "_")
        else:
            mtype = 14 * "_"

        if "signing_channel" in claim:
            channel = claim["signing_channel"].get("name", 14 * "_")
            if sanitize:
                channel = funcs.sanitize_text(channel)
        else:
            channel = 14 * "_"

        name = claim["name"]
        if sanitize:
            name = funcs.sanitize_text(claim["name"])

        line = f"{num:2d}/{n_claims:2d}" + f"{sep} "

        if claim_id:
            line += claim["claim_id"] + f"{sep} "

        line += f"{vtype:9s}" + f"{sep} "
        line += f"{stream_type:9s}" + f"{sep} "
        line += f"{mtype:17s}" + f"{sep} "
        line += f"{channel:40s}" + f"{sep} "
        line += f'"{name}"'
        out.append(line)

    content = funcs.print_content(out, file=file, fdate=fdate)

    return content
Beispiel #6
0
def print_ch_subs(channels=None,
                  claim_id=False,
                  file=None,
                  fdate=None,
                  sep=";"):
    """Print channels found from the subscriptions."""
    out = []
    n_channels = len(channels)

    for num, channel in enumerate(channels, start=1):
        name, cid = channel["uri"].lstrip("lbry://").split("#")
        f_name = name + "#" + cid[0:3]
        f_name = f'"{f_name}"'

        if "notificationsDisabled" in channel:
            ch_nots = not channel["notificationsDisabled"]
        else:
            ch_nots = False
        ch_nots = f"{ch_nots}"

        if "valid" in channel:
            valid = bool(channel["valid"])
        else:
            valid = "_____"
        valid = f"{valid}"

        line = f"{num:4d}/{n_channels:4d}" + f"{sep} "
        if claim_id:
            line += f"{cid}" + f"{sep} "
        line += f"{f_name:48s}" + f"{sep} "
        line += f"valid: {valid:5s}" + f"{sep} "
        line += f"notifications: {ch_nots:5s}"

        out.append(line)

    funcs.print_content(out, file=file, fdate=fdate)
Beispiel #7
0
def print_ch_subs_latest(ch_latest_claims,
                         claim_id=False,
                         typ=True,
                         title=False,
                         sanitize=False,
                         start=1,
                         end=0,
                         file=None,
                         fdate=False,
                         sep=";"):
    """Print a summary of the channels with their latest published claims."""
    if not ch_latest_claims or len(ch_latest_claims) < 0:
        return False

    n_channels = len(ch_latest_claims)

    out = []

    for num, result in enumerate(ch_latest_claims, start=1):
        if num < start:
            continue
        if end != 0 and num > end:
            break

        channel = result["channel"]
        cid = result["claim_id"]
        claims = result["claims"]

        out.append(f"Channel {num}/{n_channels}, {channel}, {cid}")

        if not claims:
            out.append("  - Invalid channel (removed?)")

            if num < n_channels:
                out.append("")
            continue

        n_claims = len(claims)
        for k, claim in enumerate(claims, start=1):
            source_info = claim["value"]

            r_time = int(source_info.get("release_time", 0))
            r_time = time.strftime("%Y-%m-%d_%H:%M:%S%z",
                                   time.localtime(r_time))

            vtype = claim["value_type"]

            if "stream_type" in source_info:
                stream_type = source_info.get("stream_type")
            else:
                stream_type = 8 * "_"

            size = 0
            if "source" in source_info:
                size = int(source_info["source"].get("size", 0))

            seconds = 0
            if "video" in source_info:
                seconds = source_info["video"].get("duration", 0)
            elif "audio" in source_info:
                seconds = source_info["audio"].get("duration", 0)

            mi = seconds // 60
            sec = seconds % 60
            duration = f"{mi:3d}:{sec:02d}"
            size_mb = size / (1024**2)

            c_name = claim["name"]
            if title and "title" in source_info:
                c_name = source_info["title"]

            if sanitize:
                c_name = funcs.sanitize_text(c_name)

            line = f" {k:2d}/{n_claims:2d}" + f"{sep} "
            line += r_time + f"{sep} "

            if claim_id:
                line += '"' + claim["claim_id"] + '"' + f"{sep} "

            if typ:
                line += f"{vtype:10s}" + f"{sep} "
                line += f"{stream_type:9s}" + f"{sep} "

            line += f"{duration}" + f"{sep} "
            line += f"{size_mb:9.4f} MB" + f"{sep} "
            line += '"' + c_name + '"'
            out.append(line)

        if num < n_channels:
            out.append("")

    print()
    funcs.print_content(out, file=file, fdate=fdate)
Beispiel #8
0
def print_items(items=None,
                show="all",
                blocks=False,
                cid=True,
                blobs=True,
                size=True,
                typ=False,
                ch=False,
                ch_online=True,
                name=True,
                title=False,
                path=False,
                sanitize=False,
                start=1,
                end=0,
                channel=None,
                reverse=False,
                file=None,
                fdate=False,
                sep=";",
                server="http://localhost:5279"):
    """Print information on each claim in the given list of claims.

    Parameters
    ----------
    items: list of dict
        List of items to print information about.
        Each item should be a dictionary filled with information
        from the standard output of the `lbrynet file list` command.
    show: str, optional
        It defaults to `'all'`, in which case it shows all items.
        If it is `'incomplete'` it will show claims that are missing blobs.
        If it is `'full'` it will show claims that have all blobs.
        If it is `'media'` it will show claims that have the media file
        (mp4, mp3, mkv, etc.).
        Normally only items that have all blobs also have a media file;
        however, if the claim is currently being downloaded
        a partial media file may be present.
        If it is `'missing'` it will show claims that don't have
        the media file, whether the full blobs are present or not.
    blocks: bool, optional
        It defaults to `False`, in which case it won't print
        the `height` block of the claims.
        If it is `True` it will print this value, which gives some idea
        of when the claim was registered in the blockchain.
    cid: bool, optional
        It defaults to `True`.
        Show the `'claim_id'` of the claim.
        It is a 40 character alphanumeric string.
    blobs: bool, optional
        It defaults to `True`.
        Show the number of blobs in the file, and how many are complete.
    size: bool, optional
        It defaults to `True`.
        Show the length of the stream in minutes and seconds, like `14:12`,
        when possible (audio and video), and also the size in mebibytes (MB).
    typ: bool, optional
        It defaults to `False`.
        Show the type of stream (video, audio, document, etc.).
    ch: bool, optional
        It defaults to `False`.
        Show the name of the channel that published the claim.

        This is slow if `ch_online=True`.
    ch_online: bool, optional
        It defaults to `True`, in which case it searches for the channel name
        by doing a reverse search of the item online. This makes the search
        slow.

        By setting it to `False` it will consider the channel name
        stored in the input dictionary itself, which will be faster
        but it won't be the full name of the channel. If no channel is found
        offline, then it will set a default value `'_None_'` just so
        it can be printed with no error.

        This parameter only has effect if `ch=True`, or if `channel`
        is used, as it internally sets `ch=True`.
    name: bool, optional
        It defaults to `True`.
        Show the name of the claim.
    title: bool, optional
        It defaults to `False`.
        Show the title of the claim.
    path: bool, optional
        It defaults to `False`.
        Show the full path of the saved media file.
    sanitize: bool, optional
        It defaults to `False`, in which case it will not remove the emojis
        from the name of the claim and channel.
        If it is `True` it will remove these unicode characters.
        This option requires the `emoji` package to be installed.
    start: int, optional
        It defaults to 1.
        Show claims starting from this index in the list of items.
    end: int, optional
        It defaults to 0.
        Show claims until and including this index in the list of items.
        If it is 0, it is the same as the last index in the list.
    channel: str, optional
        It defaults to `None`.
        It must be a channel's name, in which case it shows
        only the claims published by this channel.

        Using this parameter sets `ch=True`, and is slow because
        it needs to perform an additional search for the channel.
    reverse: bool, optional
        It defaults to `False`, in which case older items come first
        in the output list.
        If it is `True` newer claims are at the beginning of the list.
    file: str, optional
        It defaults to `None`.
        It must be a user writable path to which the summary will be written.
        Otherwise the summary will be printed to the terminal.
    fdate: bool, optional
        It defaults to `False`.
        If it is `True` it will add the date to the name of the summary file.
    sep: str, optional
        It defaults to `;`. It is the separator character between
        the data fields in the printed summary. Since the claim name
        can have commas, a semicolon `;` is used by default.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    bool
        It returns `True` if it printed the summary successfully.
        If there is any error it will return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    if not items or not isinstance(items, (list, tuple)):
        print("No input item list. "
              "A list of items must be obtained from `lbrynet file list`.")
        print(f"items={items}, "
              f"show={show}, "
              f"blocks={blocks}, cid={cid}, blobs={blobs}, size={size}, "
              f"typ={typ}, ch={ch}, ch_online={ch_online}, "
              f"name={name}, title={title}, path={path}, "
              f"sanitize={sanitize}, reverse={reverse}, "
              f"start={start}, end={end}, channel={channel}, "
              f"file={file}, fdate={fdate}, sep={sep}")
        if file:
            print("No file written.")
        return False

    n_items = len(items)

    if reverse:
        items.reverse()

    if (not isinstance(show, str)
            or show not in ("all", "media", "missing", "incomplete", "full")):
        print(">>> Error: show can only be 'all', 'media', 'missing', "
              "'incomplete', or 'full'")
        print(f"show={show}")
        return False

    if channel:
        if not isinstance(channel, str):
            print(">>> Error: channel must be a string")
            return False
        ch = True

    if file and not isinstance(file, str):
        print("The file must be a string.")
        print(f"file={file}")
        return False

    out = []

    for num, item in enumerate(items, start=1):
        if num < start:
            continue
        if end != 0 and num > end:
            break

        st_path = item["download_path"]
        st_blobs = item["blobs_completed"]
        st_blobs_in_stream = item["blobs_in_stream"]
        # st_completed = item["completed"]

        # Skip printing an item depending on the value of `show`,
        # and whether the blobs or media files exist or not
        if show in "media" and not st_path:
            continue
        elif show in "missing" and st_path:
            continue
        elif show in "incomplete" and st_blobs == st_blobs_in_stream:
            continue
        elif show in "full" and st_blobs < st_blobs_in_stream:
            continue

        meta = item["metadata"]

        st_height = item["height"]
        st_time = int(meta["release_time"])
        st_time = time.strftime("%Y%m%d_%H:%M:%S%z", time.localtime(st_time))

        st_claim_id = item["claim_id"]
        st_type = meta.get("stream_type", 8 * "_")
        st_claim_name = item["claim_name"]
        st_title = meta["title"]

        length_s = 0

        if ("video" in meta and "duration" in meta["video"]):
            length_s = meta["video"]["duration"]
        if ("audio" in meta and "duration" in meta["audio"]):
            length_s = meta["audio"]["duration"]

        rem_s = length_s % 60
        rem_min = length_s // 60

        st_size = 0
        if ("source" in meta and "size" in meta["source"]):
            st_size = float(meta["source"]["size"])
            st_size = st_size / (1024**2)  # to MB

        if ch:
            if ch_online:
                # Searching online is slower but it gets the full channel name
                st_channel = srch_ch.find_channel(cid=item["claim_id"],
                                                  full=True,
                                                  server=server)
                if not st_channel:
                    print(st_claim_name)
                    print()
                    continue
            else:
                # Searching offline is necessary for "invalid" claims
                # that no longer exist as active claims online.
                # We don't want to skip this item so we force a channel name.
                st_channel = item["channel_name"]
                if not st_channel:
                    st_channel = "_Unknown_"

            # Skip if the item is not published by the specified channel
            if channel and channel not in st_channel:
                continue

            if sanitize:
                st_channel = funcs.sanitize_text(st_channel)

        if sanitize:
            st_claim_name = funcs.sanitize_text(st_claim_name)
            st_title = funcs.sanitize_text(st_title)

        line = f"{num:4d}/{n_items:4d}"

        if blocks:
            line += f"{sep} " + f"{st_height:8d}"

        line += f"{sep} " + f"{st_time}"

        if cid:
            line += f"{sep} " + f"{st_claim_id}"

        if blobs:
            line += f"{sep} " + f"{st_blobs:3d}/{st_blobs_in_stream:3d}"

        if size:
            line += f"{sep} " + f"{rem_min:3d}:{rem_s:02d}"
            line += f"{sep} " + f"{st_size:9.4f} MB"

        if typ:
            line += f"{sep} " + f"{st_type:9s}"

        if st_path:
            line += f"{sep} " + "media   "
        else:
            line += f"{sep} " + "no-media"

        if ch:
            line += f"{sep} " + f"{st_channel}"

        if name:
            line += f"{sep} " + f'"{st_claim_name}"'

        if title:
            line += f"{sep} " + f'"{st_title}"'

        if path:
            line += f"{sep} " + f'"{st_path}"'

        out.append(line)

    print(f"Number of shown items: {len(out)}")

    funcs.print_content(out, file=file, fdate=fdate)

    return True
Beispiel #9
0
def print_network_sd_blobs(data_dir=None,
                           print_blobs=True,
                           file=None,
                           fdate=False,
                           sep=";",
                           server="http://localhost:5279"):
    """Print the downloaded blobs from the logs."""
    data_dir = data_dir or funcs.get_data_dir(server=server)

    if not data_dir:
        return False

    print("Estimate the automatically downloaded claims from the logs")
    print(80 * "-")
    print(f"data_dir: {data_dir}")

    blobs_down = 0

    down_times = []
    down_times_days = []

    success = False

    estimations = []
    log_files = ["lbrynet.log"]
    log_files += [f"lbrynet.log.{i}" for i in range(1, 10)]

    for log_file in log_files:
        filename = os.path.join(data_dir, log_file)
        if os.path.exists(filename):
            print(filename)
        else:
            print(f"{filename}, does not exist")
            continue

        estimation = count_auto_blobs(filename)
        blobs_down += estimation["blobs_down"]
        down_times.extend(estimation["down_times"])
        down_times_days.extend(estimation["down_times_days"])
        success = True
        estimations.append(estimation)

    out = f"Success: {success}"

    if not success:
        print(out)
        return False

    out = []
    out.append(f"Downloaded sd_blobs: {blobs_down}")

    now = time.time()
    out.append("Now: " +
               time.strftime("%Y-%m-%d_%H:%M:%S%z %A", time.localtime(now)))

    max_dtime = 0
    min_dtime = 0

    if len(down_times_days) > 0:
        max_dtime = max(down_times_days)
        min_dtime = min(down_times_days)

    out.append(f"Newest downloaded blob: {max_dtime:7.2f} days ago")
    out.append(f"Oldest downloaded blob: {min_dtime:7.2f} days ago")

    all_sd_blobs = []

    out.append(40 * "-")
    for estimation in estimations:
        all_sd_blobs.extend(estimation["sd_blobs"])
        _name = os.path.basename(estimation["log_file"]) + sep
        _blobs_down = estimation["blobs_down"]

        _now = time.strftime("%Y-%m-%d_%H:%M:%S%z %A",
                             time.localtime(estimation["now"]))
        _max_dtime = 0
        _min_dtime = 0

        if len(estimation["down_times_days"]) > 0:
            _max_dtime = max(estimation["down_times_days"])
            _min_dtime = min(estimation["down_times_days"])

        out.append(f"{_name:15s} "
                   f"down: {_blobs_down:5d}" + f"{sep} "
                   f"down new: {_max_dtime:7.2f}" + f"{sep} "
                   f"down old: {_min_dtime:7.2f}" + f"{sep} "
                   f"{_now}")

    funcs.print_content(out, file=file, fdate=fdate)

    if print_blobs:
        for num, blob in enumerate(all_sd_blobs, start=1):
            print(f"{num:4d}/{blobs_down:4d}: " f"{blob}")

    return estimations
Beispiel #10
0
def claims_bids(show_controlling=False,
                show_non_controlling=True,
                skip_repost=False,
                channels_only=False,
                show_claim_id=False,
                show_repost_status=True,
                show_competing=True,
                show_reposts=True,
                compact=False,
                file=None,
                fdate=False,
                sep=";",
                server="http://localhost:5279"):
    """Display the claims that are competing in name and LBC bidding.

    This is based on an original script by `@BrendonBrewer:3/vanity:8`

    Parameters
    ----------
    show_controlling: bool, optional
        It defaults to `False`, in which case it will not show
        the 'controlling' claims, that is, those which have the highest bid.
        If it is `True` it will show controlling claims.
    show_non_controlling: bool, optional
        It defaults to `True`, in which case it will show
        the 'non-controlling' claims, that is, those which have a lower bid.
        If it is `False` it will not show non-controlling claims.
    skip_repost: bool, optional
        It defaults to `False`, in which case it will process all claims
        whether they are reposts or not.
        If it is `True` it will not process reposts.
    channels_only: bool, optional
        It defaults to `False`, in which case it will process all claims
        whether they are channels or not.
        If it is `True` it will only process the claims that are channels.
    show_claim_id: bool, optional
        It defaults to `False`.
        If it is `True`, the claim ID will be printed for all claims.
        This option only has an effect when `compact=True`.
    show_repost_status: bool, optional
        It defaults to `True`, in which case it will show whether the claims
        are reposts or not.
        This option only has an effect when `compact=True`.
    show_competing: bool, optional
        It defaults to `True`, in which case it will show the number
        of competing claims, that is, those that share the same name
        with the claim being inspected.
        This option only has an effect when `compact=True`.
    show_reposts: bool, optional
        It defaults to `True`, in which case it will show the number
        of reposts for the claim being inspected.
        This option only has an effect when `compact=True`.
    compact: bool, optional
        It defaults to `False`, in which case each claim's information
        will be printed in a paragraph.
        If it is `True` there will be one claim per row, so the summary
        will be more compact.
    file: str, optional
        It defaults to `None`.
        It must be a writable path to which the summary will be written.
        Otherwise the summary will be printed to the terminal.
    fdate: bool, optional
        It defaults to `False`.
        If it is `True` it will add the date to the name of the summary file.
    sep: str, optional
        It defaults to `;`. It is the separator character between
        the data fields in the printed summary. Since the claim name
        can have commas, a semicolon `;` is used by default.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    list of dict
        It returns the list of dictionaries representing the processed claims.
    False
        If there is a problem it will return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    msg = {
        "method": "claim_list",
        "params": {
            "page_size": 99000,
            "resolve": True
        }
    }

    output = requests.post(server, json=msg).json()
    if "error" in output:
        print(">>> No 'result' in the JSON-RPC server output")
        return False

    print("Analysis of the bidding amounts for claims, "
          "including channels, videos, reposts, playlists, etc.")

    num_claims = output["result"]["total_items"]
    print(f"Number of claims: {num_claims}")

    if (not show_controlling and not show_non_controlling):
        print(f"show_controlling: {bool(show_controlling)}")
        print(f"show_non_controlling: {bool(show_non_controlling)}")
        print("Won't show any item; at least one option must be True.")
        return False

    if show_controlling:
        print("- Controlling claims (highest bids) will be considered")
    if show_non_controlling:
        print("- Non-controlling claims (low bids) will be considered")

    if skip_repost:
        print("- Reposts will be omitted")
    else:
        print("- Reposts will be considered")

    if channels_only:
        print("- Only channels will be considered")

    print(80 * "-")

    claims = output["result"]["items"]

    out = []
    claims_filtered = []

    for it, claim in enumerate(claims, start=1):
        is_repost = claim["value_type"] == "repost"
        is_channel = claim["value_type"] == "channel"
        is_controlling = claim["meta"]["is_controlling"]

        if show_controlling and show_non_controlling:
            # Show regardless of controlling status
            if ((skip_repost and is_repost)
                    or (channels_only and not is_channel)):
                # Skip claim depending on whether it is a repost or a channel
                continue
            else:
                # Show everything
                pass
        elif (not show_controlling and not show_non_controlling):
            # Show nothing, regardless of controlling status
            continue
        elif ((show_controlling and not is_controlling)
              or (show_non_controlling and is_controlling)
              or (skip_repost and is_repost)
              or (channels_only and not is_channel)):
            # Skip claim depending on controlling status
            # or whether it is a repost or a channel
            continue

        claims_filtered.append(claim)

        uri = claim["canonical_url"]
        claim_id = claim["claim_id"]
        name = claim["name"]
        staked = float(claim["amount"])
        staked += float(claim["meta"]["support_amount"])

        # It's unlikely that more than 1000 items will share the same name
        msg2 = {
            "method": "claim_search",
            "params": {
                "name": name,
                "page_size": 1000
            }
        }

        output2 = requests.post(server, json=msg2).json()
        if "error" in output2:
            print(">>> No 'result' in the JSON-RPC server output")
            return False

        max_lbc = 0
        competitors = 0
        comp_reposts = 0
        items = output2["result"]["items"]

        for item in items:
            item_lbc = float(item["amount"])
            item_lbc += float(item["meta"]["support_amount"])

            rep_claim_id = ("reposted_claim_id" in item
                            and item["reposted_claim_id"] == claim_id)

            if item["claim_id"] != claim_id or rep_claim_id:
                if max_lbc == 0 or item_lbc > max_lbc:
                    max_lbc = float(item_lbc)

                if item["value_type"] == "repost":
                    comp_reposts += 1
                else:
                    competitors += 1

        name = f'"{name}"'

        if compact:
            line = f"{it:3d}/{num_claims:3d}" + f"{sep} "
            if show_claim_id:
                line += f"{claim_id}" + f"{sep} "

            line += (f"{name:58s}" + f"{sep} " + f"staked: {staked:8.2f}" +
                     f"{sep} " + f"highest_bid: {max_lbc:8.2f}" + f"{sep} " +
                     f"is_controlling: {str(is_controlling):5s}")

            if show_repost_status:
                line += f"{sep} " + f"is_repost: {str(is_repost):5s}"
            if show_competing:
                line += f"{sep} " + f"competing: {competitors:2d}"
            if show_reposts:
                line += f"{sep} " + f"reposts: {comp_reposts:2d}"

            out += [line]
        else:
            paragraph = (f"Claim {it}/{num_claims}, {name}\n"
                         f"canonical_url: {uri}\n"
                         f"claim_id: {claim_id}\n"
                         f"staked: {staked:.3f}\n"
                         f"highest_bid: {max_lbc:.3f} (by others)\n"
                         f"is_controlling: {is_controlling}\n"
                         f"is_repost: {is_repost}\n"
                         f"competing: {competitors}\n")
            if is_repost:
                paragraph += f"reposts: {comp_reposts} + 1 (this one)\n"
            else:
                paragraph += f"reposts: {comp_reposts}\n"

            out += [paragraph]

    funcs.print_content(out, file=file, fdate=fdate)

    return claims_filtered
Beispiel #11
0
def print_claims_summary(ch_claims, file=None, fdate=False):
    """Print a summary paragraph of the channel claims."""
    n_chs = len(ch_claims)

    t_n_claims = 0
    t_size = 0
    t_duration = 0

    t_n_anon_claims = 0
    t_anon_size = 0
    t_anon_duration = 0
    is_anon = False

    for ch_claim in ch_claims:
        if ch_claim["name"] in "_Unknown_":
            n_chs = n_chs - 1

    for ch_claim in ch_claims:
        if ch_claim["name"] in "_Unknown_":
            is_anon = True

        chan_size = ch_claim["size"]
        chan_duration = ch_claim["duration"]
        claims = ch_claim["claims"]

        if is_anon:
            t_n_anon_claims += len(claims)
            t_anon_size += chan_size
            t_anon_duration += chan_duration
        else:
            t_n_claims += len(claims)
            t_size += chan_size
            t_duration += chan_duration

    t_GB = t_size / (1024**3)  # to GiB
    t_hrs = t_duration / 3600
    t_days = t_hrs / 24

    t_hr = t_duration // 3600
    t_mi = (t_duration % 3600) // 60
    t_sec = (t_duration % 3600) % 60

    t_anon_GB = t_anon_size / (1024**3)  # to GiB
    t_anon_hrs = t_anon_duration / 3600
    t_anon_days = t_anon_hrs / 24

    t_anon_hr = t_anon_duration // 3600
    t_anon_mi = (t_anon_duration % 3600) // 60
    t_anon_sec = (t_anon_duration % 3600) % 60

    out1 = [
        40 * "-", f"Total unique channels: {n_chs}",
        f"Total claims in channels: {t_n_claims}",
        f"Total download size: {t_GB:.4f} GiB",
        f"Total duration: {t_hr} h {t_mi} min {t_sec} s, "
        f"or {t_days:.4f} days"
    ]

    out2 = [
        40 * "-", f"Anonymous unique claims: {t_n_anon_claims}",
        f"Total download size of anonymous claims: {t_anon_GB:.4f} GiB",
        "Total duration of anonymous claims: "
        f"{t_anon_hr} h {t_anon_mi} min {t_anon_sec} s, "
        f"or {t_anon_days:.4f} days"
    ]

    out = []

    if t_n_claims > 0:
        out += out1

    if t_n_anon_claims > 0:
        out += out2

    funcs.print_content(out, file=file, fdate=fdate)

    return {
        "n_channels": n_chs,
        "n_ch_claims": t_n_claims,
        "chs_size": t_GB,
        "chs_hr": t_hr,
        "chs_min": t_mi,
        "chs_sec": t_sec,
        "chs_days": t_days,
        "n_anon_claims": t_n_anon_claims,
        "anon_size": t_anon_GB,
        "anon_hr": t_anon_hr,
        "anon_min": t_anon_mi,
        "anon_sec": t_anon_sec,
        "anon_days": t_anon_days
    }
Beispiel #12
0
def print_claims(ch_claims,
                 updates=False,
                 claim_id=False,
                 addresses=False,
                 typ=False,
                 amounts=True,
                 ch_name=False,
                 title=False,
                 sanitize=False,
                 file=None,
                 fdate=False,
                 sep=";"):
    """Print the list of channels and claims."""
    n_chs = len(ch_claims)

    out = []
    t_n_claims = 0
    t_size = 0
    t_duration = 0

    t_n_an_claims = 0
    t_an_size = 0
    t_an_duration = 0
    anon_exists = False
    is_anon = False

    for ch_claim in ch_claims:
        if ch_claim["name"] in "_Unknown_":
            anon_exists = True
            n_chs = n_chs - 1

    for n_ch, ch_claim in enumerate(ch_claims, start=1):
        chan_name = ch_claim["name"]

        if chan_name in "_Unknown_":
            is_anon = True

        if sanitize:
            chan_name = funcs.sanitize_text(chan_name)

        chan_name = '"' + chan_name + '"'

        chan_id = ch_claim["id"]
        chan_add = ch_claim["address"]
        chan_size = ch_claim["size"]
        chan_duration = ch_claim["duration"]

        claims = ch_claim["claims"]

        if is_anon:
            t_n_an_claims += len(claims)
            t_an_size += chan_size
            t_an_duration += chan_duration
        else:
            t_n_claims += len(claims)
            t_size += chan_size
            t_duration += chan_duration

        GB = chan_size / (1024**3)  # to GiB
        hrs = chan_duration / 3600
        days = hrs / 24

        hr = chan_duration // 3600
        mi = (chan_duration % 3600) // 60
        sec = (chan_duration % 3600) % 60

        if is_anon:
            line = ""
        else:
            line = f"{n_ch:2d}/{n_chs:2d}" + f"{sep} "

        line += f"{chan_name}" + f"{sep} "
        line += f"{chan_id}" + f"{sep} "
        line += f"{chan_add}" + f"{sep} "
        line += f"{GB:.4f} GiB" + f"{sep} "
        line += f"{hr} h {mi} min {sec} s, or {days:.4f} days"
        out.append(line)

        out = print_s_claims(claims,
                             output=out,
                             updates=updates,
                             claim_id=claim_id,
                             addresses=addresses,
                             typ=typ,
                             amounts=amounts,
                             ch_name=ch_name,
                             title=title,
                             sanitize=sanitize,
                             sep=sep)

        if not is_anon:
            if n_ch < n_chs or anon_exists:
                out.append("")

    funcs.print_content(out, file=file, fdate=fdate)
Beispiel #13
0
def print_channels(channels,
                   updates=False,
                   claim_id=False,
                   addresses=True,
                   accounts=False,
                   amounts=True,
                   sanitize=False,
                   file=None,
                   fdate=False,
                   sep=";"):
    """Print the list of channels obtained from get_channels."""
    n_channels = len(channels)

    out = []

    for num, ch in enumerate(channels, start=1):
        meta = ch["meta"]
        value = ch["value"]

        cid = ch["claim_id"]
        address = ch["address"]

        # name = ch["name"]
        name = ch["canonical_url"].split("lbry://")[1]

        if sanitize:
            name = funcs.sanitize_text(name)
        name = '"' + name + '"'

        timestamp = ch["timestamp"]
        timestamp = time.strftime("%Y-%m-%d_%H:%M:%S%z",
                                  time.localtime(timestamp))

        title = value.get("title", 10 * "_")
        if sanitize:
            title = funcs.sanitize_text(title)
        title = '"' + title + '"'

        claim_op = ch["claim_op"]
        amount = float(ch["amount"])

        c_timestamp = meta.get("creation_timestamp", 0)
        c_timestamp = time.strftime("%Y-%m-%d_%H:%M:%S%z",
                                    time.localtime(c_timestamp))
        if "error" in meta:
            c_timestamp = 24 * "_"

        n_claims = meta.get("claims_in_channel", 0)
        e_amount = float(meta.get("effective_amount", 0))

        ch_acc = ch["account"]

        line = f"{num:2d}/{n_channels:2d}" + f"{sep} "
        line += f"{c_timestamp}" + f"{sep} "

        if updates:
            line += f"{claim_op}" + f"{sep} "
            line += f"{timestamp}" + f"{sep} "

        if claim_id:
            line += f"{cid}" + f"{sep} "

        if addresses:
            line += f"{address}" + f"{sep} "

        if accounts:
            line += f"in {ch_acc}" + f"{sep} "

        if amounts:
            line += f"{amount:14.8f}" + f"{sep} "
            line += f"{e_amount:14.8f}" + f"{sep} "

        line += f"c.{n_claims:4d}" + f"{sep} "
        line += f"{name:48s}" + f"{sep} "
        line += f"{title}"

        out.append(line)

    funcs.print_content(out, file=file, fdate=fdate)
Beispiel #14
0
def print_accounts(wallet_info,
                   addresses=False,
                   file=None,
                   fdate=False,
                   sep=";"):
    """Print the account information including sub-addresses optionally."""
    accounts = wallet_info["accounts"]

    n_accounts = len(accounts)

    if n_accounts < 1:
        return False

    acc_addresses = []
    acc_values = []
    acc_subaddresses = []

    for num, account in enumerate(accounts, start=1):
        ID = account["id"]
        name = account["name"]
        gen = account["generator"]

        line = (f"{num:2d}/{n_accounts:2d}" + f"{sep} "
                f"{ID}" + f"{sep} "
                f"{gen:19s}" + f"{sep} "
                f'"{name}"')
        acc_addresses.append(line)

        total = account["total"]
        available = account["available"]
        reserved = account["reserved"]

        claims = account["claims"]
        supports = account["supports"]
        tips = account["tips"]

        line2 = (f"{num:2d}/{n_accounts:2d}" + f"{sep} "
                 f"total: {total:14.8f}" + f"{sep} "
                 f"available: {available:14.8f}" + f"{sep} "
                 f"reserved: {reserved:14.8f}" + f"{sep} "
                 f"claims: {claims:14.8f}" + f"{sep} "
                 f"supports: {supports:14.8f}" + f"{sep} "
                 f"tips: {tips:14.8f}")

        acc_values.append(line2)

        sub_addresses = []
        for sub_address in account["addresses"]:
            n_add = sub_address["n"]
            address = sub_address["address"]
            used = sub_address["used_times"]

            sub_addresses.append(f"{n_add:4d}: "
                                 f"{address}" + f"{sep} "
                                 f"uses: {used}")

        g_addresses = "\n".join(sub_addresses)
        group = (f"{num}/{n_accounts}" + f"{sep} "
                 f"{ID}" + f"{sep} "
                 f'"{name}"' + "\n" + g_addresses)

        if num < n_accounts:
            group += "\n"

        acc_subaddresses.append(group)

    wallet = wallet_info["wallet"]
    w_total = wallet["total"]
    w_available = wallet["available"]
    w_reserved = wallet["reserved"]

    w_claims = wallet["claims"]
    w_supports = wallet["supports"]
    w_tips = wallet["tips"]

    space0 = 29 * "-"
    space1 = " " + 26 * "-"
    space2 = " " + 25 * "-"
    space3 = " " + 23 * "-"
    space4 = " " + 25 * "-"
    space5 = " " + 20 * "-"

    w_summary = [
        space0 + space1 + space2 + space3 + space4 + space5,
        7 * " " + f"total: {w_total:14.8f}" + f"{sep} "
        f"available: {w_available:14.8f}" + f"{sep} "
        f"reserved: {w_reserved:14.8f}" + f"{sep} "
        f"claims: {w_claims:14.8f}" + f"{sep} "
        f"supports: {w_supports:14.8f}" + f"{sep} "
        f"tips: {w_tips:14.8f}"
    ]

    wid = wallet["id"]
    wname = wallet["name"]
    w_info = [f'id: "{wid}"' + f"{sep} " + f'"{wname}"']

    out = w_info
    out += acc_addresses + [""]
    out += acc_values + w_summary

    if addresses:
        out += [""] + acc_subaddresses

    funcs.print_content(out, file=file, fdate=fdate)
Beispiel #15
0
def print_ch_p_summary(ch_peers_info,
                       file=None, fdate=False):
    """Print a summary of the results for all peers searched in channels."""
    if not ch_peers_info or len(ch_peers_info) < 0:
        return False

    n_channels = len(ch_peers_info)

    n_streams_t = 0
    total_size_t = 0
    total_seconds_t = 0
    streams_with_hosts_t = 0
    total_peers_t = 0
    unique_peers_t = []
    peer_ratio_sum = 0
    hosting_coverage_sum = 0
    local_node_t = False

    for peers_info in ch_peers_info:
        if not peers_info:
            continue

        channel = peers_info["channel"]
        channel = f'"{channel}"'
        n_streams = peers_info["n_streams"]
        total_size = peers_info["total_size"]
        total_seconds = peers_info["total_duration"]
        streams_with_hosts = peers_info["streams_with_hosts"]
        total_peers = peers_info["total_peers"]
        unique_nodes = peers_info["unique_nodes"]
        peer_ratio = peers_info["peer_ratio"]
        hosting = peers_info["hosting_coverage"]

        n_streams_t += n_streams
        total_size_t += total_size
        total_seconds_t += total_seconds
        streams_with_hosts_t += streams_with_hosts
        total_peers_t += total_peers

        for p in unique_nodes:
            if p not in unique_peers_t:
                unique_peers_t.append(p)

        peer_ratio_sum += peer_ratio
        hosting_coverage_sum += hosting
        local_node_t = local_node_t or peers_info["local_node"]

    total_size_gb_t = total_size_t / (1024**3)
    hr_t = total_seconds_t // 3600
    mi_t = (total_seconds_t % 3600) // 60
    sec_t = (total_seconds_t % 3600) % 60
    duration_t = f"{hr_t} h {mi_t} min {sec_t} s"

    n_nodes_t = len(unique_peers_t)

    if local_node_t:
        n_nodes_t = f"{n_nodes_t} + 1"

    peer_ratio_t = peer_ratio_sum/n_channels
    hosting_coverage_t = hosting_coverage_sum/n_channels * 100

    out = [f"Channels: {n_channels}",
           f"Total streams: {n_streams_t}",
           "- Total streams that have at least one host: "
           f"{streams_with_hosts_t}",
           f"- Total size of streams: {total_size_gb_t:.4f} GiB",
           f"- Total duration of streams: {duration_t}",
           "",
           f"Total peers in all searched claims: {total_peers_t}",
           f"Total unique peers (nodes) hosting streams: {n_nodes_t}",
           "Total average number of peers per stream: "
           f"{peer_ratio_t:.4f}",
           f"Total hosting coverage: {hosting_coverage_t:.2f}%"]

    funcs.print_content(out, file=file, fdate=fdate)
Beispiel #16
0
def print_sch_claims(claims,
                     blocks=False,
                     claim_id=False,
                     typ=False,
                     ch_name=False,
                     title=False,
                     sanitize=False,
                     start=1,
                     end=0,
                     reverse=False,
                     file=None,
                     fdate=None,
                     sep=";"):
    """Print the provided list of claims, particularly those from a channel."""
    n_claims = len(claims)

    if reverse:
        claims.reverse()

    out = []
    for num, claim in enumerate(claims, start=1):
        if num < start:
            continue
        if end != 0 and num > end:
            break

        creation = claim["meta"]["creation_height"]
        height = claim["height"]
        res_time = int(claim["value"].get("release_time", 0))
        res_time = time.strftime("%Y-%m-%d_%H:%M:%S%z",
                                 time.localtime(res_time))

        vtype = claim["value_type"]

        if "stream_type" in claim["value"]:
            stream_type = claim["value"].get("stream_type")
        else:
            stream_type = 8 * "_"

        if "source" in claim["value"]:
            mtype = claim["value"]["source"].get("media_type", 14 * "_")
        else:
            mtype = 14 * "_"

        if "signing_channel" in claim:
            # channel = claim["signing_channel"].get("name", 14 * "_")
            channel = claim["signing_channel"]["canonical_url"]
            channel = channel.lstrip("lbry://")
            if sanitize:
                channel = funcs.sanitize_text(channel)
        else:
            channel = 14 * "_"

        if sanitize:
            channel = funcs.sanitize_text(channel)

        name = claim["name"]

        if title and "title" in claim["value"]:
            name = claim["value"]["title"]

        if sanitize:
            name = funcs.sanitize_text(name)

        length_s = 0
        rem_s = 0
        rem_min = 0

        if "video" in claim["value"] and "duration" in claim["value"]["video"]:
            length_s = claim["value"]["video"]["duration"]
        if "audio" in claim["value"] and "duration" in claim["value"]["audio"]:
            length_s = claim["value"]["audio"]["duration"]

        rem_s = length_s % 60
        rem_min = length_s // 60

        size = 0
        if "source" in claim["value"] and "size" in claim["value"]["source"]:
            size = float(claim["value"]["source"]["size"])
            size = size / (1024**2)  # to MB

        line = f"{num:4d}/{n_claims:4d}" + f"{sep} "

        if blocks:
            line += f"{creation:8d}" + f"{sep}"
            line += f"{height:8d}" + f"{sep} "

        line += res_time + f"{sep} "

        if claim_id:
            line += claim["claim_id"] + f"{sep} "

        if typ:
            line += f"{vtype:10s}" + f"{sep} "
            line += f"{stream_type:9s}" + f"{sep} "
            line += f"{mtype:17s}" + f"{sep} "

        if ch_name:
            line += f"{channel}" + f"{sep} "

        line += f"{rem_min:3d}:{rem_s:02d}" + f"{sep} "
        line += f"{size:9.4f} MB" + f"{sep} "
        line += f'"{name}"'

        out.append(line)

    content = funcs.print_content(out, file=file, fdate=fdate)

    return content
Beispiel #17
0
def list_supports(claim_id=False,
                  invalid=False,
                  combine=True,
                  claims=True,
                  channels=True,
                  file=None,
                  fdate=False,
                  sep=";",
                  server="http://localhost:5279"):
    """Print supported claims, the amount, and the trending score.

    Parameters
    ----------
    claim_id: bool, optional
        It defaults to `False`, in which case only the name of the claim
        is shown.
        If it is `True` the `'claim_id'` will be shown as well.
    invalid: bool, optional
        It defaults to `False`, in which case it will show all supported
        claims, even those that are invalid.
        If it is `True` it will only show invalid claims. Invalid are those
        which were deleted by their authors, so the claim (channel
        or content) is no longer available in the blockchain.
    combine: bool, optional
        It defaults to `True`, in which case the `global`, `group`, `local`,
        and `mixed` trending scores are added into one combined score.
        If it is `False` it will show the four values separately.
    claims: bool, optional
        It defaults to `True`, in which case supported claims will be shown.
        If it is `False` simple claims won't be shown.
    channels: bool, optional
        It defaults to `True`, in which case supported channels will be shown.
        If it is `False` channel claims (which start with the `@` symbol)
        won't be shown.
    file: str, optional
        It defaults to `None`.
        It must be a user writable path to which the summary will be written.
        Otherwise the summary will be printed to the terminal.
    fdate: bool, optional
        It defaults to `False`.
        If it is `True` it will add the date to the name of the summary file.
    sep: str, optional
        It defaults to `;`. It is the separator character between
        the data fields in the printed summary. Since the claim name
        can have commas, a semicolon `;` is used by default.
    server: str, optional
        It defaults to `'http://*****:*****@") else False

        if is_channel and not channels:
            continue
        if not is_channel and not claims:
            continue

        obj = ""
        if claim_id:
            obj += f'"{cid}"' + f"{sep} "

        _name = f'"{name}"'

        if not s:
            _name = "[" + _name + "]"

        obj += f'{_name:58s}'

        _amount = float(item["amount"])
        amount = f"{_amount:14.8f}"

        if not s:
            m = {"support_amount": "0.0"}
            s = {"amount": item["amount"]}
        else:
            if invalid:
                continue
            m = s["meta"]

        existing_support = float(s["amount"]) + float(m["support_amount"])

        trend_gl = m.get("trending_global", 0)
        trend_gr = m.get("trending_group", 0)
        trend_loc = m.get("trending_local", 0)
        trend_mix = m.get("trending_mixed", 0)

        combined = (trend_gl + trend_gr + trend_loc + trend_mix)

        tr_gl = f'{trend_gl:7.2f}'
        tr_gr = f'{trend_gr:7.2f}'
        tr_loc = f'{trend_loc:7.2f}'
        tr_mix = f'{trend_mix:7.2f}'
        tr_combined = f'{combined:7.2f}'
        is_spent = item["is_spent"]

        out = f"{num:3d}/{n_items:3d}" + f"{sep} "
        out += f"{obj}" + f"{sep} " + f"{amount}" + f"{sep} "
        out += f"{existing_support:15.8f}" + f"{sep} "

        if not is_spent:
            if combine:
                out += f"combined: {tr_combined}"
            else:
                out += f"mix: {tr_mix}" + f"{sep} "
                out += f"glob: {tr_gl}" + f"{sep} "
                out += f"grp: {tr_gr}" + f"{sep} "
                out += f"loc: {tr_loc}"
        else:
            continue
        out_list.append(out)

    funcs.print_content(out_list, file=file, fdate=fdate)

    return resolved
Beispiel #18
0
def print_multi_list(multi_ch_info=None, sep=";"):
    """Print the summary of downloaded claims from multiple channels.

    This is meant to be used with the returned list from
    `ch_download_latest_multi`.

    Parameters
    ----------
    list of lists of dicts
        A list of lists, where each internal list represents one channel,
        and this internal list has as many dictionaries as downloaded claims.
        The information in each dictionary represents the standard output
        of the `lbrynet_get` command for each downloaded claim.

        If the download fails, then the corresponding item in the list
        may be `False`, in which case no claim information is printed.
    sep: str, optional
        It defaults to `;`. It is the separator character between
        the data fields in the printed summary. Since the claim name
        can have commas, a semicolon `;` is used by default.

    Returns
    -------
    bool
        It returns `True` if the information was read and printed
        without problems.
        If there is a problem or no list of items, it will return `False`.
    """
    if not multi_ch_info or not isinstance(multi_ch_info, (list, tuple)):
        print("Print information from a list of lists from multiple "
              "channels obtained from `ch_download_latest_multi`.")
        return False

    if len(multi_ch_info) < 1:
        print("Empty list.")
        return False

    # flat_list = [item for sublist in list_ch_info for item in sublist]
    flat_list = []
    for sublist in multi_ch_info:
        if not sublist:
            flat_list.append(None)
            continue

        for item in sublist:
            if not item:
                flat_list.append(None)
                continue
            flat_list.append(item)

    n_items = len(flat_list)

    print("Summary of downloads")
    out = []

    for it, item in enumerate(flat_list, start=1):
        line = "{:2d}/{:2d}".format(it, n_items) + f"{sep} "

        if not item:
            line += "empty item. Failure establishing server connection?"
            out.append(line)
            continue

        if "claim_id" in item:
            line += "{}".format(item["claim_id"]) + f"{sep} "
            line += "{:3d}/{:3d}".format(item["blobs_completed"],
                                         item["blobs_in_stream"]) + f"{sep} "
            line += '"{}"'.format(item["channel_name"])
            line += f"{sep} "
            line += '"{}"'.format(item["claim_name"])
            out.append(line)
        elif "error" in item:
            out.append(line + '"{}"'.format(item["error"]))
        else:
            out.append(line + "not downloaded")

    funcs.print_content(out, file=None, fdate=False)

    return True
Beispiel #19
0
def print_blobs_ratio(data_dir=None, plot_hst=False,
                      file=None, fdate=False, sep=";", tk_frame=None,
                      server="http://localhost:5279"):
    """Estimate the number of blobs uploaded and downloaded from the logs.

    Parse the log files in `data_dir` to find references
    to `lbry.blob.exchange` to see when blobs have been `sent` (uploaded)
    or `downloaded`.

    data_dir: str, optional
        It defaults to `None`, in which case the `data_dir` is taken
        from the saved `lbrynet` settings.
        If it is given it must be the parent directory where the `lbrynet.log`
        files are located.
    plot_hst: bool, optional
        It defaults to `False` it which case it won't create any plot.
        If it is `True` it will try plotting histograms of the blob activity
        in the past days.
        It assumes Numpy and Matplotlib are available, if they are not,
        no plot is generated either.
    file: str, optional
        It defaults to `None`.
        It must be a writable path to which the summary will be written.
        Otherwise the summary will be printed to the terminal.
    fdate: bool, optional
        It defaults to `False`.
        If it is `True` it will add the date to the name of the summary file.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    list of dict
        It returns the list of dictionaries representing the information
        read from each log file.
        The keys are:
            - 'log_file': log file successfully read
            - 'blobs_up': number of blobs uploaded
            - 'blobs_down': number of blobs downloaded
            - 'up_times': list of `datetime.datetimes` with all times
              the blobs where uploaded
            - 'down_times': list of `datetime.datetimes` with all times
              the blobs where downloaded
            - 'now': reference time for up_times_days and down_times_days"
            - 'up_times_days': list of floats representing the days
              from the current time to the day a blob was uploaded
            - 'down_times_days': list of floats representing the days
              from the current time to the day a blob was downloaded
    False
        If there is a problem it will return `False`.
    """
    # if not server_exists(server=server):
    #     return 1
    data_dir = data_dir or funcs.get_data_dir(server=server)

    if not data_dir:
        return False

    print("Estimate the blob upload/download ratio from the logs")
    print(80 * "-")
    print(f"data_dir: {data_dir}")

    blobs_up = 0
    blobs_down = 0

    up_times = []
    down_times = []
    up_times_days = []
    down_times_days = []

    success = False

    estimations = []
    log_files = ["lbrynet.log"]
    log_files += [f"lbrynet.log.{i}" for i in range(1, 10)]

    for log_file in log_files:
        filename = os.path.join(data_dir, log_file)
        if os.path.exists(filename):
            print(filename)
        else:
            print(f"{filename}, does not exist")
            continue

        estimation = count_updown_blobs(filename)
        blobs_up += estimation["blobs_up"]
        blobs_down += estimation["blobs_down"]
        up_times.extend(estimation["up_times"])
        down_times.extend(estimation["down_times"])
        up_times_days.extend(estimation["up_times_days"])
        down_times_days.extend(estimation["down_times_days"])
        success = True
        estimations.append(estimation)

    out = f"Success: {success}"

    if not success:
        print(out)
        return False

    out = []
    out.append(f"Uploaded blobs: {blobs_up}")
    out.append(f"Downloaded blobs: {blobs_down}")

    ratio = 0.0
    try:
        ratio = float(blobs_up)/blobs_down
    except ZeroDivisionError:
        pass

    out.append(f"Up/down ratio: {ratio:8.4f}")
    now = time.time()
    out.append("Now: " + time.strftime("%Y-%m-%d_%H:%M:%S%z %A",
                                       time.localtime(now)))

    max_utime = 0
    min_utime = 0
    max_dtime = 0
    min_dtime = 0

    if len(up_times_days) > 0:
        max_utime = max(up_times_days)
        min_utime = min(up_times_days)

    if len(down_times_days) > 0:
        max_dtime = max(down_times_days)
        min_dtime = min(down_times_days)

    out.append(f"Newest uploaded blob: {max_utime:7.2f} days ago")
    out.append(f"Oldest uploaded blob: {min_utime:7.2f} days ago")
    out.append(f"Newest downloaded blob: {max_dtime:7.2f} days ago")
    out.append(f"Oldest downloaded blob: {min_dtime:7.2f} days ago")

    out.append(40 * "-")
    for estimation in estimations:
        _name = os.path.basename(estimation["log_file"]) + sep
        _blobs_up = estimation["blobs_up"]
        _blobs_down = estimation["blobs_down"]
        ratio = 0.0
        try:
            ratio = float(_blobs_up)/_blobs_down
        except ZeroDivisionError:
            pass

        _now = time.strftime("%Y-%m-%d_%H:%M:%S%z %A",
                             time.localtime(estimation["now"]))
        _max_utime = 0
        _min_utime = 0
        _max_dtime = 0
        _min_dtime = 0

        if len(estimation["up_times_days"]) > 0:
            _max_utime = max(estimation["up_times_days"])
            _min_utime = min(estimation["up_times_days"])

        if len(estimation["down_times_days"]) > 0:
            _max_dtime = max(estimation["down_times_days"])
            _min_dtime = min(estimation["down_times_days"])

        out.append(f"{_name:15s} "
                   f"up: {_blobs_up:5d}" + f"{sep} "
                   f"down: {_blobs_down:5d}" + f"{sep} "
                   f"ratio: {ratio:8.4f}" + f"{sep} "
                   f"up new: {_max_utime:7.2f}" + f"{sep} "
                   f"up old: {_min_utime:7.2f}" + f"{sep} "
                   f"down new: {_max_dtime:7.2f}" + f"{sep} "
                   f"down old: {_min_dtime:7.2f}" + f"{sep} "
                   f"{_now}")

    if not PLOTTING:
        print("Numpy and Matplotlib not available; no plot generated")

    funcs.print_content(out, file=file, fdate=fdate)

    if plot_hst and PLOTTING:
        plot_histogram(up_times_days, down_times_days, now=now,
                       tk_frame=tk_frame)

    return estimations
Beispiel #20
0
def print_p_lines(peers_info,
                  cid=False,
                  typ=True,
                  title=False,
                  sanitize=False,
                  file=None,
                  fdate=False,
                  sep=";"):
    """Print a summary for each claim of the peer search."""
    n_claims = peers_info["n_claims"]
    streams_info = peers_info["streams_info"]

    out = []

    for num, info in enumerate(streams_info, start=1):
        stream = info["stream"]
        peers = info["peers"]
        size = info["size"]
        seconds = info["duration"]
        local_node = info["local_node"]
        local_node = f"{local_node}"

        name = stream["name"]
        rels_time = int(stream["value"].get("release_time", 0))
        rels_time = time.strftime("%Y-%m-%d_%H:%M:%S%z",
                                  time.localtime(rels_time))

        if title and "title" in stream["value"]:
            name = stream["value"]["title"]

        if sanitize:
            name = funcs.sanitize_text(name)

        vtype = stream["value_type"]

        if "stream_type" in stream["value"]:
            stream_type = stream["value"].get("stream_type")
        else:
            stream_type = 8 * "_"

        claim_id = stream["claim_id"]
        n_peers = len(peers)

        name = f'"{name}"'
        mi = seconds // 60
        sec = seconds % 60
        duration = f"{mi:3d}:{sec:02d}"
        size_mb = size / (1024**2)

        line = f"{num:4d}/{n_claims:4d}" + f"{sep} "
        line += rels_time + f"{sep} "

        if cid:
            line += f"{claim_id}" + f"{sep} "

        if typ:
            line += f"{vtype:10s}" + f"{sep} "
            line += f"{stream_type:9s}" + f"{sep} "

        line += f"{duration}" + f"{sep} "
        line += f"{size_mb:9.4f} MB" + f"{sep} "

        line += f"peers: {n_peers:2d}" + f"{sep} "
        line += f"hosted: {local_node:5s}" + f"{sep} "
        line += f"{name}"
        out.append(line)

    funcs.print_content(out, file=file, fdate=fdate)