예제 #1
0
def redownload_latest(number=2, ddir=None, own_dir=True, save_file=True,
                      shuffle=False,
                      server="http://localhost:5279"):
    """Attempt to redownload the latest claims that were already downloaded.

    This function is useful to resume the download of partially
    downloaded claims, that is, those that for any reason didn't complete
    the first time.
    It will download the missing blobs in order to produce
    the full media file (mp4, mp3, mkv, etc.).

    If all blobs are already available, then the media files
    will be recreated (if they don't already exist) in the download directory.

    Parameters
    ----------
    number: int, optional
        It defaults to 2.
        The number of items that will be re-downloaded from the list of claims
        which were already downloaded.
        For example, `number=10` will attempt to re-download
        the 10 newest items.
    ddir: str, optional
        It defaults to `$HOME`.
        The path to the download directory.
    own_dir: bool, optional
        It defaults to `True`, in which case it places the downloaded
        content inside a subdirectory named after the channel in `ddir`.
    save_file: bool, optional
        It defaults to `True`, in which case all blobs of the stream
        will be downloaded, and the media file (mp4, mp3, mkv, etc.)
        will be placed in the downloaded directory.
        If it is `False` it will only download the blobs.
    shuffle: bool, optional
        It defaults to `False`.
        If it is `True` it will shuffle the list of claims
        so that `number` indicates a random number of claims,
        not only the newest ones.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    list of dict
        A list of dictionaries, where each dictionary represents
        the standard output of the `lbrynet_get` command for each
        re-downloaded claim.
    """
    if not funcs.server_exists(server=server):
        return False

    if not number or not isinstance(number, int) or number < 0:
        number = 2
        print("Number must be a positive integer, "
              f"set to default value, number={number}")

    if (not ddir or not isinstance(ddir, str)
            or ddir == "~" or not os.path.exists(ddir)):
        ddir = os.path.expanduser("~")
        print(f"Download directory should exist; set to ddir='{ddir}'")

    sorted_items = sort.sort_items(server=server)
    sorted_items.reverse()

    if shuffle:
        random.shuffle(sorted_items)
        random.shuffle(sorted_items)
        random.shuffle(sorted_items)

    list_info_get = []

    print(80 * "-")

    for it, item in enumerate(sorted_items, start=1):
        if it > number:
            break

        print(f"Re-download claim {it}/{number}")
        d = dld.download_single(cid=item["claim_id"],
                                ddir=ddir, own_dir=own_dir,
                                save_file=save_file,
                                server=server)
        list_info_get.append(d)
        print()

    return list_info_get
예제 #2
0
def count_blobs_all(blobfiles=None,
                    channel=None,
                    start=1,
                    end=0,
                    print_msg=False,
                    print_each=False,
                    server="http://*****:*****@MyChannel#5'`, `'MyChannel#5'`, `'MyChannel'`

        If a simplified name is used, and there are various channels
        with the same name, the one with the highest LBC bid will be selected.
        Enter the full name to choose the right one.
    print_msg: bool, optional
        It defaults to `True`, in which case it will print information
        on the found claim.
        If `print_msg=False`, it also implies `print_each=False`.
    print_each: bool, optional
        It defaults to `False`.
        If it is `True` it will print all blobs
        that belong to the claim, and whether each of them is already
        in `blobfiles`.
    start: int, optional
        It defaults to 1.
        Count the blobs from claims starting from this index
        in the list of items.
    end: int, optional
        It defaults to 0.
        Count the blobs from claims until and including this index
        in the list of items.
        If it is 0, it is the same as the last index in the list.
    server: str, optional
        It defaults to `'http://*****:*****@"):
            channel = "@" + channel

    items = sort.sort_items(channel=channel, server=server)
    if not items:
        return False

    n_items = len(items)
    print()

    if channel:
        print(f"Count all blob files for: {channel}")
    else:
        print("Count all blob files")
    print(80 * "-")
    print(f"Blobfiles: {blobfiles}")

    blob_all_info = []
    claims_blobs_complete = 0
    claims_blobs_incomplete = 0
    claims_no_sd_hash = 0
    claims_not_found = 0
    claims_other_error = 0
    n_blobs = 0

    for it, item in enumerate(items, start=1):
        if it < start:
            continue
        if end != 0 and it > end:
            break

        if print_msg:
            print(f"Claim {it}/{n_items}, {item['claim_name']}")
        blob_info = count_blobs(cid=item["claim_id"],
                                blobfiles=blobfiles,
                                print_msg=print_msg,
                                print_each=print_each,
                                server=server)
        if print_msg or "error_not_found" in blob_info:
            print()

        info = {"num": it, "blob_info": blob_info}
        blob_all_info.append(info)

        if blob_info:
            if "all_present" in blob_info and blob_info["all_present"]:
                claims_blobs_complete += 1
                n_blobs += 1  # for the 'sd_hash'
                n_blobs += len(blob_info["blobs"])
            elif "all_present" in blob_info and not blob_info["all_present"]:
                claims_blobs_incomplete += 1
                n_blobs += 1  # for the 'sd_hash'
                n_blobs += len(blob_info["blobs"])

            if "error_no_sd_hash" in blob_info:
                claims_no_sd_hash += 1
            if "error_not_found" in blob_info:
                claims_not_found += 1
        else:
            claims_other_error += 1

    print(f"claims with complete blobs: {claims_blobs_complete}")
    print(f"claims with incomplete blobs: {claims_blobs_incomplete} "
          "(continue download)")
    print(f"claims with no 'sd_hash' present: {claims_no_sd_hash} "
          "(restart download)")
    print(f"invalid claims: {claims_not_found} "
          "(no valid URI or claim ID; possibly removed from the network)")
    print(f"claims with other errors: {claims_other_error}")
    print(8 * "-")
    total = (claims_blobs_complete + claims_blobs_incomplete +
             claims_no_sd_hash + claims_not_found + claims_other_error)
    total_valid = (claims_blobs_complete + claims_blobs_incomplete +
                   claims_no_sd_hash)
    total_invalid = claims_not_found + claims_other_error
    print(f"total claims processed: {total}")
    print(f"total valid claims: {total_valid} "
          "(minimum number of 'sd_hash' blobs that must exist)")
    print(f"invalid claims: {total_invalid} "
          "(should be deleted including all their blobs)")
    print(f"blobs that should exist for these claims: {n_blobs}")

    return blob_all_info
예제 #3
0
def cleanup_space(main_dir=None,
                  size=1000,
                  percent=90,
                  never_delete=None,
                  what="media",
                  server="http://localhost:5279"):
    """Clean up space in the download drive when it is sufficiently full.

    Parameters
    ----------
    main_dir: str
        It defaults to `$HOME`.
        This is the main or root directory that holds both
        the downloaded media files (mp4, mp3, mkv, etc.)
        as well as the downloaded binary blobs.

        On Linux, media files may go to `'$HOME/Downloads'`
        and blobs are normally found in
        `'$HOME/.locals/share/lbry/lbrynet/blobfiles'`
        so `main_dir` would be `$HOME`, or `'/home/user'`
    size: int, optional
        It defaults to 1000.
        Maximum size in GB of `main_dir`.
        Ideally the downloaded media files and blobs never cross this limit.
    percent: float, optional
        It defaults to 90.
        Percentage of `size` that indicates a soft limit
        for the downloaded files.
        After this limit is crossed it will try to free space in `main_dir`
        by deleting older files and blobs, depending on the value
        of `which_delete`.
    never_delete: list of str, optional
        It defaults to `None`.
        If it exists it is a list with channel names.
        The content produced by these channels will not be deleted
        so the media files and blobs will remain in `main_dir`.

        This is slow as it needs to perform an additional search
        for the channel.
    what: str, optional
        It defaults to `'media'`, in which case only the full media file
        (mp4, mp3, mkv, etc.) is deleted.
        If it is `'blobs'` it will delete only the binary blobs.
        If it is `'both'` it will delete both the media file
        and the blobs.

        As long as the blobs are present, the content can be seeded
        to the network, and the full file can be restored.
        That is, while the blobs exist the file is not completely deleted.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    bool
        It returns `True` if the limit indicated by `size` and `percent`
        was crossed by the downloaded files, and some of the older files
        were successfully deleted to bring usage of `main_dir` within limits.

        It returns `False` if there is a problem, or if the limit
        was not crossed and thus there is nothing to clean up,
        or if after going through all claims, it failed to clear
        enough space to bring usage within limits.
    """
    if not funcs.server_exists(server=server):
        return False

    if (not main_dir or not isinstance(main_dir, str) or main_dir == "~"
            or not os.path.exists(main_dir)):
        main_dir = os.path.expanduser("~")
        print("Download directory should exist; "
              f"set to main_dir='{main_dir}'")

    if not isinstance(size, (int, float)) or size <= 0:
        size = 1000
        print("Max disk usage should be a positive number; "
              f"set to size={size} GB")

    if (not isinstance(percent, (int, float)) or percent <= 0
            or percent > 100):
        percent = 90
        print("Percentage should be a positive number from 0 to 100; "
              f"set to percent={percent} %")

    if never_delete and not isinstance(never_delete, (list, tuple)):
        print("Must be a list of channels that should never be deleted.")
        print(f"never_delete={never_delete}")
        return False

    if (not isinstance(what, str) or what not in ("media", "blobs", "both")):
        print(">>> Error: what can only be 'media', 'blobs', 'both'")
        print(f"what={what}")
        return False

    limit_crossed = measure_usage(main_dir=main_dir,
                                  size=size,
                                  percent=percent)
    if not limit_crossed:
        print("Nothing to clean up.")
        return False

    sorted_items = sort.sort_items(server=server)
    n_items = len(sorted_items)

    for it, item in enumerate(sorted_items, start=1):
        print(80 * "-")
        out = "{:4d}/{:4d}, {}, ".format(it, n_items, item["claim_name"])

        if never_delete:
            channel = srch_ch.find_channel(cid=item["claim_id"],
                                           full=False,
                                           server=server)
            if channel in never_delete:
                print(out + f"item from {channel} will not be deleted. "
                      "Skipping.")
                continue

        print(out + "item will be deleted.")
        clean.delete_single(cid=item["claim_id"], what=what, server=server)

        limit_crossed = measure_usage(main_dir=main_dir,
                                      size=size,
                                      percent=percent)
        if not limit_crossed:
            print("Usage below limit. Stop deleting.")
            print()
            break
        print()

    if limit_crossed:
        print(">>> Went through all downloaded claims, "
              "and failed to clear enough space.")
        print("Terminating.")
        return False

    return True
예제 #4
0
def blobs_action(blobfiles=None, action="get",
                 start=1, end=0,
                 server="http://localhost:5279"):
    """Get or announce all binary blobs from the blobfiles directory.

    Parameters
    ----------
    blobfiles: str
        It defaults to `'$HOME/.local/share/lbry/lbrynet/blobfiles'`.
        The path to the directory where the blobs were downloaded.
        This is normally seen with `lbrynet settings get`, under `'data_dir'`.
        It can be any other directory if it is symbolically linked
        to it, such as `'/opt/lbryblobfiles'`
    action: str, optional
        It defaults to `'get'`, in which case it re-downloads all blobs
        in the `blobfiles` directory.
        It can be `'get'`, `'announce'`, or `'both'`.
    start: int, optional
        It defaults to 1.
        Operate on the blob starting from this index in the
        directory of blobs `blobfiles`.
    end: int, optional
        It defaults to 0.
        Operate until and including this index in the list of blobs
        in the directory of blobs `blobfiles`.
        If it is 0, it is the same as the last index in the list.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    bool
        It returns `True` if it finished refreshing all blobs successfully
        although this may take some time if all blobs are considered.
        If there is a problem or non existing blobfiles directory,
        it will return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    if (not blobfiles or not isinstance(blobfiles, str)
            or not os.path.exists(blobfiles)):
        print("Perform an action with the blobs from the blobfiles directory")
        print(f"blobfiles={blobfiles}, action={action}")
        print("This is typically '$HOME/.local/share/lbry/lbrynet/blobfiles'")

        home = os.path.expanduser("~")
        blobfiles = os.path.join(home,
                                 ".local", "share",
                                 "lbry", "lbrynet", "blobfiles")

        if not os.path.exists(blobfiles):
            print(f"Blobfiles directory does not exist: {blobfiles}")
            return False

    if (not isinstance(action, str)
            or action not in ("get", "announce", "both")):
        print(">>> Error: action can only be 'announce', 'get', 'both'")
        print(f"action={action}")
        return False

    list_blobs = os.listdir(blobfiles)
    n_blobs = len(list_blobs)

    for it, blob in enumerate(list_blobs, start=1):
        if it < start:
            continue
        if end != 0 and it > end:
            break

        out = "{:6d}/{:6d}, ".format(it, n_blobs)

        blob_get(blob=blob, action=action, out=out,
                 server=server)

    return True
예제 #5
0
def blob_get(blob=None, action="get", out="",
             server="http://localhost:5279"):
    """Get or announce one blob from the LBRY network.

    At the moment it cannot be used with missing blobs;
    the command hangs and never timeouts.
    It can only be used with a blob that is already downloaded.

    This bug is reported in lbryio/lbry-sdk, issue #2070.

    Therefore, at this moment this function is not very useful.

    Parameters
    ----------
    blob: str
        The 96-alphanumeric character that denotes a blob.
        This will be downloaded to the `blobfiles` directory,
        which in Linux is normally
        `'$HOME/.locals/share/lbry/lbrynet/blobfiles'`
    action: str, optional
        It defaults to `'get'`, in which case it downloads
        the specified `blob`.
        It can be `'get'`, `'announce'`, or `'both'`.
    out: str, optional
        It defaults to an empty string `""`.
        It is an arbitrary string that will be printed before the string
        `'lbrynet blob get <blob>'`.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    bool
        It returns `True` if it finished downloading or announcing
        the indicated blob successfully.
        If there is a problem or non existing blob hash,
        it will return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    if not isinstance(blob, str) or len(blob) < 96:
        print(">>> Error: blob must be a 96-character alphanumeric string")
        print(f"blob={blob}")
        return False

    if (not isinstance(action, str)
            or action not in ("get", "announce", "both")):
        print(">>> Error: action can only be 'announce', 'get', 'both'")
        print(f"action={action}")
        return False

    cmd = ["lbrynet",
           "blob",
           "get",
           blob]

    if action in "announce":
        cmd[2] = "announce"

    print(out + " ".join(cmd))

    msg = {"method": cmd[1] + "_" + cmd[2],
           "params": {"blob_hash": blob}}
    output = requests.post(server, json=msg).json()

    if "error" in output:
        print(output["error"]["data"]["name"])

    if action in "both":
        cmd[2] = "announce"
        print(out + " ".join(cmd))

        msg = {"method": cmd[1] + "_" + cmd[2],
               "params": {"blob_hash": blob}}
        output = requests.post(server, json=msg).json()

        if "error" in output:
            print(output["error"]["data"]["name"])

    return True
예제 #6
0
def abandon_support(uri=None,
                    cid=None,
                    name=None,
                    keep=0.0,
                    server="http://*****:*****@MyChannel#3/some-video-name#2'
            uri = '@MyChannel#3/some-video-name#2'
            uri = 'some-video-name'

        The URI is also called the `'canonical_url'` of the claim.
    cid: str, optional
        A `'claim_id'` for a claim on the LBRY network.
        It is a 40 character alphanumeric string.
    name: str, optional
        A name of a claim on the LBRY network.
        It is normally the last part of a full URI.
        ::
            uri = 'lbry://@MyChannel#3/some-video-name#2'
            name = 'some-video-name'
    keep: float, optional
        It defaults to `0.0`.
        It is the amount of LBC support that should remain in the claim
        after we remove our previous support. That is, we can use
        this parameter to assign a new support value.
        If it is `0.0` all support is removed.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    dict
        A dictionary with information on the result of the support.
        The keys are the following:
        - 'canonical_url': canonical URI of the claim.
        - 'claim_id': unique 40 character alphanumeric string.
        - 'existing_support': existing support before we add or remove ours;
          this is the sum of `base_support` and `old_support`.
        - 'base_support': existing minimum support that we do not control;
          all published claims must have a positive `base_support`.
        - 'old_support': support that we have added to this claim in the past;
          it may be zero.
        - 'new_support': new support that was successfully deposited
          in the claim, equal to `keep`.
        - 'txid': transaction ID in the blockchain that records the operation.
    False
        If there is a problem or non existing claim, or lack of funds,
        it will return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    supports = get_base_support(uri=uri, cid=cid, name=name)
    if not supports:
        return False

    uri = supports["canonical_url"]
    claim_id = supports["claim_id"]
    existing = supports["existing_support"]
    base_support = supports["base_support"]
    old_support = supports["old_support"]

    calc, text = calculate_abandon(claim_id=claim_id, keep=keep, server=server)
    if not calc:
        return False

    new_support = calc["new_support"]
    txid = calc["txid"]

    out = [
        f"canonical_url: {uri}", f"claim_id: {claim_id}",
        f"Existing support: {existing:14.8f}",
        f"Base support:     {base_support:14.8f}",
        f"Old support:      {old_support:14.8f}",
        f"New support:      {keep:14.8f}", ""
    ]
    out += text

    print("\n".join(out))

    return {
        "canonical_url": uri,
        "claim_id": claim_id,
        "existing_support": existing,
        "base_support": base_support,
        "old_support": old_support,
        "new_support": new_support,
        "txid": txid
    }
예제 #7
0
def target_support(uri=None,
                   cid=None,
                   name=None,
                   target=0.0,
                   server="http://*****:*****@MyChannel#3/some-video-name#2'
            uri = '@MyChannel#3/some-video-name#2'
            uri = 'some-video-name'

        The URI is also called the `'canonical_url'` of the claim.
    cid: str, optional
        A `'claim_id'` for a claim on the LBRY network.
        It is a 40 character alphanumeric string.
    name: str, optional
        A name of a claim on the LBRY network.
        It is normally the last part of a full URI.
        ::
            uri = 'lbry://@MyChannel#3/some-video-name#2'
            name = 'some-video-name'
    target: float, optional
        It defaults to `0.0`.
        It is the amount of LBC support that we want the claim to have
        at the end of our support.
        For example, if the current support is `100`, and we specify a target
        of `500`, we will be supporting the claim with `400`
        in order to reach the target.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    dict
        A dictionary with information on the result of the support.
        The keys are the following:
        - 'canonical_url': canonical URI of the claim.
        - 'claim_id': unique 40 character alphanumeric string.
        - 'existing_support': existing support before we add or remove ours;
          this is the sum of `base_support` and `old_support`.
        - 'base_support': existing minimum support that we do not control;
          all published claims must have a positive `base_support`.
        - 'old_support': support that we have added to this claim in the past;
          it may be zero.
        - 'target': target support that we want after running this method.
          It must be a positive number.
        - 'must_add': amount of support that we must add or remove (negative)
          to reach the `target`; it may be zero if `target`
          is already below the `base_support`.
        - 'new_support': new support that was successfully deposited
          in the claim; it may be zero if `target` is already below
          the `base_support`, or if `old_support` already satisfies
          our `target`.
        - 'txid': transaction ID in the blockchain that records the operation;
          it may be `None` if the transaction was not made because the `target`
          was already achieved before applying additional support.
    False
        If there is a problem or non existing claim, or lack of funds,
        it will return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    supports = get_base_support(uri=uri, cid=cid, name=name)
    if not supports:
        return False

    uri = supports["canonical_url"]
    claim_id = supports["claim_id"]
    existing = supports["existing_support"]
    base_support = supports["base_support"]
    old_support = supports["old_support"]

    target = abs(target)
    out = [
        f"canonical_url: {uri}", f"claim_id: {claim_id}",
        f"Existing support: {existing:14.8f}",
        f"Base support:     {base_support:14.8f}",
        f"Old support:      {old_support:14.8f}", "",
        f"Target:           {target:14.8f}"
    ]

    new_support = 0.0
    must_add = 0.0

    if target > base_support:
        # Target above base, calculate addition
        must_add = target - existing
        new_support = old_support + must_add
    elif target < base_support:
        if not old_support:
            # Target below base support, and no old support, nothing to add,
            # reset support to 0
            pass
        else:
            # Target below base support, and old support, remove it
            must_add = -old_support
    else:
        # Same target as base support, nothing to add, reset support to 0
        pass

    out.append(f"Must add:         {must_add:14.8f}")
    out.append(f"New support:      {new_support:14.8f}")

    applied = 0.0
    t_input = 0.0
    t_output = 0.0
    t_fee = 0.0
    txid = None

    # The SDK accepts the amount as a string, not directly as a number.
    # The minimum amount is 0.00000001, so we convert all quantities
    # to have 8 decimal significant numbers.
    #
    # Only perform the transaction if the new support is different
    # from the old support
    if new_support != old_support:
        if not old_support and new_support > 0:
            # No existing support, so we create it
            msg = {
                "method": "support_create",
                "params": {
                    "claim_id": claim_id,
                    "amount": f"{new_support:.8f}"
                }
            }
            output = requests.post(server, json=msg).json()
        else:
            # Existing support, so we update it with the new value
            msg = {
                "method": "support_abandon",
                "params": {
                    "claim_id": claim_id,
                    "keep": f"{new_support:.8f}"
                }
            }
            output = requests.post(server, json=msg).json()

        if "error" in output:
            error = output["error"]
            if "data" in error:
                print(">>> Error: {}, {}".format(error["data"]["name"],
                                                 error["message"]))
            else:
                print(f">>> Error: {error}")
            print(f">>> Requested amount: {new_support:.8f}")
            return False

        applied = new_support
        t_input = float(output["result"]["total_input"])
        t_output = float(output["result"]["total_output"])
        t_fee = float(output["result"]["total_fee"])
        txid = output["result"]["txid"]

    out += [
        "", f"Applied:          {applied:14.8f}",
        f"total_input:      {t_input:14.8f}",
        f"total_output:     {t_output:14.8f}",
        f"total_fee:        {t_fee:14.8f}", f"txid: {txid}"
    ]

    print("\n".join(out))

    return {
        "canonical_url": uri,
        "claim_id": cid,
        "existing_support": existing,
        "base_support": base_support,
        "old_support": old_support,
        "target": target,
        "must_add": must_add,
        "new_support": new_support,
        "txid": txid
    }
예제 #8
0
def resolve_channel(channel=None, server="http://*****:*****@MyChannel#5'`, `'MyChannel#5'`, `'MyChannel'`

        If a simplified name is used, and there are various channels
        with the same name, the one with the highest LBC bid will be selected.
        Enter the full name to choose the right one.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    dict
        Returns the dictionary that represents the channel that was found
        matching the `channel` address.
    False
        If the dictionary has the `'error'` key, it will print the contents
        of this key, and return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    if not channel or not isinstance(channel, str):
        print("Channel must be a string.")
        print(f"channel={channel}")
        return False

    # The channel must start with @, otherwise we may resolve a claim
    if not channel.startswith("@"):
        channel = "@" + channel

    cmd = ["lbrynet", "resolve", channel]

    msg = {"method": cmd[1], "params": {"urls": channel}}

    output = requests.post(server, json=msg).json()

    if "error" in output:
        print(">>> No 'result' in the JSON-RPC server output")
        return False

    ch_item = output["result"][channel]

    if "error" in ch_item:
        error = ch_item["error"]
        if "name" in error:
            print(">>> Error: {}, {}".format(error["name"], error["text"]))
        else:
            print(">>> Error: {}".format(error))
        print(f">>> Check that the name is correct, channel={channel}")
        return False

    return ch_item
예제 #9
0
def list_ch_subs(shared=True,
                 show_all=True,
                 filtering="valid",
                 valid=True,
                 notifications=False,
                 threads=32,
                 claim_id=False,
                 file=None,
                 fdate=False,
                 sep=";",
                 server="http://localhost:5279"):
    """Search and print the channels from our subscriptions.

    Parameters
    ----------
    shared: bool, optional
        It defaults to `True`, in which case it uses the shared database
        synchronized with Odysee online.
        If it is `False` it will use only the local database
        to `lbrynet`, for example, used by the LBRY Desktop application.
    show_all: bool, optional
        It defaults to `True`, in which case all followed channels
        will be printed, regardless of `filtering`, `valid`,
        or `notifications`.
        If it is `False` then we can control what channels to show
        with `filtering`, `valid`, or `notifications`.
    filtering: str, optional
        It defaults to `'valid'`. It is the type of filtering that
        will be done as long as `show_all=False`.
        It can be `'valid'` (depending on the value of `valid` parameter),
        `'notifications'` (depending on the value of `notifications`),
        or `'both'` (depending on the values of `valid` and `notifications`).
        If `'both'`, the list of channels will be filtered by `valid` first,
        and then by `notifications`.
    valid: bool, optional
        It defaults to `True` in which case only the channels that resolve
        online will be returned.
        If it is `False` it will return only those channels that no longer
        resolve online.
        This parameter only works when `show_all=False`
        and `filtering='valid'` or `'both'`.
    notifications: bool, optional
        It defaults to `False` in which case only the channels
        that have notifications disabled will be returned.
        If it is `True` it will return only those channels
        that have notifications enabled.
        This parameter only works when `show_all=False`
        and `filtering='notifications'` or `'both'`.
    threads: int, optional
        It defaults to 32.
        It is the number of threads that will be used to resolve channels
        online, meaning that many channels will be searched in parallel.
        This number shouldn't be large if the CPU doesn't have many cores.
    claim_id: bool, optional
        It defaults to `False`.
        If it is `True` it will print the `claim_id` of the channel.
    file: str, optional
        It defaults to `None`.
        It must be a writable path to which the summary will be written.
        Otherwise the summary will be printed to the terminal.
    fdate: bool, optional
        It defaults to `False`.
        If it is `True` it will add the date to the name of the summary file.
    sep: str, optional
        It defaults to `;`. It is the separator character between
        the data fields in the printed summary. Since the claim name
        can have commas, a semicolon `;` is used by default.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    list of dict
        It returns the list of dictionaries representing
        the filtered channels depending on the values of `shared`, `show_all`,
        `filtering`, `valid`, and `notifications`.

        Each dictionary has three keys:
        - 'uri': the `'permanent_url'` of the channel.
        - 'notificationsDisabled': a boolean value, indicating whether
          the notification is enabled for that channel or not.
        - 'valid': it's the dictionary of the resolved channel,
          or `False` if the channel is invalid and doesn't exist.
    False
        If there is a problem it will return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    ch_filtered = search_ch_subs(shared=shared,
                                 show_all=show_all,
                                 filtering=filtering,
                                 valid=valid,
                                 notifications=notifications,
                                 threads=threads,
                                 server=server)

    print_ch_subs(ch_filtered,
                  claim_id=claim_id,
                  file=file,
                  fdate=fdate,
                  sep=sep)

    return ch_filtered
예제 #10
0
def ch_search_latest(channel=None, number=2, server="http://*****:*****@MyChannel#5'`, `'MyChannel#5'`, `'MyChannel'`

        If a simplified name is used, and there are various channels
        with the same name, the one with the highest LBC bid will be selected.
        Enter the full name to choose the right one.
    number: int, optional
        It defaults to 2.
        The number of claims to search that were last posted by `channel`.
        If `number=0` it will search all claims ever published
        by this channel.
    server: str, optional
        It defaults to `'http://*****:*****@"):
        channel = "@" + channel

    if number == 0:
        output = srchall.ch_search_all_claims(channel,
                                              reverse=True,
                                              server=server)
        claims = output["claims"]
    elif 0 < number <= 50:
        claims = ch_search_fifty_claims(channel,
                                        number=number,
                                        reverse=True,
                                        server=server)
    elif number > 50:
        output = srchall.ch_search_n_claims(channel,
                                            number=number,
                                            reverse=True,
                                            server=server)
        claims = output["claims"]

    return claims
예제 #11
0
def find_channel(uri=None,
                 cid=None,
                 name=None,
                 full=True,
                 canonical=False,
                 offline=False,
                 server="http://*****:*****@MyChannel#3/some-video-name#2'
            uri = '@MyChannel#3/some-video-name#2'
            uri = 'some-video-name'

        The URI is also called the `'canonical_url'` of the claim.
    cid: str, optional
        A `'claim_id'` for a claim on the LBRY network.
        It is a 40 character alphanumeric string.
    name: str, optional
        A name of a claim on the LBRY network.
        It is normally the last part of a full URI.
        ::
            uri = 'lbry://@MyChannel#3/some-video-name#2'
            name = 'some-video-name'
    full: bool, optional
        It defaults to `True`, in which case the returned
        name includes the digits after `'#'` or `':'` that uniquely identify
        that channel in the network.
        If it is `False` it will return just the base name.
        This parameter only works when `canonical=False`.
    canonical: bool, optional
        It defaults to `False`.
        If it is `True`, the `'canonical_url'` of the channel is returned
        regardless of the value of `full`.
    offline: bool, optional
        It defaults to `False`, in which case it will try to resolve
        the channel name from the online database (blockchain).

        If it is `True` it will try to resolve the channel name
        from the offline database. This will be faster but may not
        find a name if the channel was not resolved when the claim
        was initially downloaded.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    str
        The name of the channel.
        If `full=False` it returns the common name
        ::
            @MyChannel
        If `full=True` it returns the unique name
        ::
            @MyChannel#3
        If `canonical=True` it returns
        ::
            lbry://@MyChannel#3
    False
        If there is a problem or non existing claim, it will return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    if not (uri or cid or name):
        print("Find the channel's name from a claim's "
              "'URI', 'claim_id', or 'name'.")
        print(f"uri={uri}, cid={cid}, name={name}")
        return False

    item = srch.search_item(uri=uri,
                            cid=cid,
                            name=name,
                            offline=offline,
                            server=server)
    if not item:
        return False

    if offline:
        return item["channel_name"]

    if ("signing_channel" not in item
            or "canonical_url" not in item["signing_channel"]):
        name = "@_Unknown_"
        return name

    name = item["signing_channel"]["canonical_url"]

    if not canonical:
        name = name.lstrip("lbry://")

        if not full:
            name = name.split("#")[0]

    return name
예제 #12
0
파일: print.py 프로젝트: belikor/lbrytools
def print_items(items=None,
                show="all",
                blocks=False,
                cid=True,
                blobs=True,
                size=True,
                typ=False,
                ch=False,
                ch_online=True,
                name=True,
                title=False,
                path=False,
                sanitize=False,
                start=1,
                end=0,
                channel=None,
                reverse=False,
                file=None,
                fdate=False,
                sep=";",
                server="http://localhost:5279"):
    """Print information on each claim in the given list of claims.

    Parameters
    ----------
    items: list of dict
        List of items to print information about.
        Each item should be a dictionary filled with information
        from the standard output of the `lbrynet file list` command.
    show: str, optional
        It defaults to `'all'`, in which case it shows all items.
        If it is `'incomplete'` it will show claims that are missing blobs.
        If it is `'full'` it will show claims that have all blobs.
        If it is `'media'` it will show claims that have the media file
        (mp4, mp3, mkv, etc.).
        Normally only items that have all blobs also have a media file;
        however, if the claim is currently being downloaded
        a partial media file may be present.
        If it is `'missing'` it will show claims that don't have
        the media file, whether the full blobs are present or not.
    blocks: bool, optional
        It defaults to `False`, in which case it won't print
        the `height` block of the claims.
        If it is `True` it will print this value, which gives some idea
        of when the claim was registered in the blockchain.
    cid: bool, optional
        It defaults to `True`.
        Show the `'claim_id'` of the claim.
        It is a 40 character alphanumeric string.
    blobs: bool, optional
        It defaults to `True`.
        Show the number of blobs in the file, and how many are complete.
    size: bool, optional
        It defaults to `True`.
        Show the length of the stream in minutes and seconds, like `14:12`,
        when possible (audio and video), and also the size in mebibytes (MB).
    typ: bool, optional
        It defaults to `False`.
        Show the type of stream (video, audio, document, etc.).
    ch: bool, optional
        It defaults to `False`.
        Show the name of the channel that published the claim.

        This is slow if `ch_online=True`.
    ch_online: bool, optional
        It defaults to `True`, in which case it searches for the channel name
        by doing a reverse search of the item online. This makes the search
        slow.

        By setting it to `False` it will consider the channel name
        stored in the input dictionary itself, which will be faster
        but it won't be the full name of the channel. If no channel is found
        offline, then it will set a default value `'_None_'` just so
        it can be printed with no error.

        This parameter only has effect if `ch=True`, or if `channel`
        is used, as it internally sets `ch=True`.
    name: bool, optional
        It defaults to `True`.
        Show the name of the claim.
    title: bool, optional
        It defaults to `False`.
        Show the title of the claim.
    path: bool, optional
        It defaults to `False`.
        Show the full path of the saved media file.
    sanitize: bool, optional
        It defaults to `False`, in which case it will not remove the emojis
        from the name of the claim and channel.
        If it is `True` it will remove these unicode characters.
        This option requires the `emoji` package to be installed.
    start: int, optional
        It defaults to 1.
        Show claims starting from this index in the list of items.
    end: int, optional
        It defaults to 0.
        Show claims until and including this index in the list of items.
        If it is 0, it is the same as the last index in the list.
    channel: str, optional
        It defaults to `None`.
        It must be a channel's name, in which case it shows
        only the claims published by this channel.

        Using this parameter sets `ch=True`, and is slow because
        it needs to perform an additional search for the channel.
    reverse: bool, optional
        It defaults to `False`, in which case older items come first
        in the output list.
        If it is `True` newer claims are at the beginning of the list.
    file: str, optional
        It defaults to `None`.
        It must be a user writable path to which the summary will be written.
        Otherwise the summary will be printed to the terminal.
    fdate: bool, optional
        It defaults to `False`.
        If it is `True` it will add the date to the name of the summary file.
    sep: str, optional
        It defaults to `;`. It is the separator character between
        the data fields in the printed summary. Since the claim name
        can have commas, a semicolon `;` is used by default.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    bool
        It returns `True` if it printed the summary successfully.
        If there is any error it will return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    if not items or not isinstance(items, (list, tuple)):
        print("No input item list. "
              "A list of items must be obtained from `lbrynet file list`.")
        print(f"items={items}, "
              f"show={show}, "
              f"blocks={blocks}, cid={cid}, blobs={blobs}, size={size}, "
              f"typ={typ}, ch={ch}, ch_online={ch_online}, "
              f"name={name}, title={title}, path={path}, "
              f"sanitize={sanitize}, reverse={reverse}, "
              f"start={start}, end={end}, channel={channel}, "
              f"file={file}, fdate={fdate}, sep={sep}")
        if file:
            print("No file written.")
        return False

    n_items = len(items)

    if reverse:
        items.reverse()

    if (not isinstance(show, str)
            or show not in ("all", "media", "missing", "incomplete", "full")):
        print(">>> Error: show can only be 'all', 'media', 'missing', "
              "'incomplete', or 'full'")
        print(f"show={show}")
        return False

    if channel:
        if not isinstance(channel, str):
            print(">>> Error: channel must be a string")
            return False
        ch = True

    if file and not isinstance(file, str):
        print("The file must be a string.")
        print(f"file={file}")
        return False

    out = []

    for num, item in enumerate(items, start=1):
        if num < start:
            continue
        if end != 0 and num > end:
            break

        st_path = item["download_path"]
        st_blobs = item["blobs_completed"]
        st_blobs_in_stream = item["blobs_in_stream"]
        # st_completed = item["completed"]

        # Skip printing an item depending on the value of `show`,
        # and whether the blobs or media files exist or not
        if show in "media" and not st_path:
            continue
        elif show in "missing" and st_path:
            continue
        elif show in "incomplete" and st_blobs == st_blobs_in_stream:
            continue
        elif show in "full" and st_blobs < st_blobs_in_stream:
            continue

        meta = item["metadata"]

        st_height = item["height"]
        st_time = int(meta["release_time"])
        st_time = time.strftime("%Y%m%d_%H:%M:%S%z", time.localtime(st_time))

        st_claim_id = item["claim_id"]
        st_type = meta.get("stream_type", 8 * "_")
        st_claim_name = item["claim_name"]
        st_title = meta["title"]

        length_s = 0

        if ("video" in meta and "duration" in meta["video"]):
            length_s = meta["video"]["duration"]
        if ("audio" in meta and "duration" in meta["audio"]):
            length_s = meta["audio"]["duration"]

        rem_s = length_s % 60
        rem_min = length_s // 60

        st_size = 0
        if ("source" in meta and "size" in meta["source"]):
            st_size = float(meta["source"]["size"])
            st_size = st_size / (1024**2)  # to MB

        if ch:
            if ch_online:
                # Searching online is slower but it gets the full channel name
                st_channel = srch_ch.find_channel(cid=item["claim_id"],
                                                  full=True,
                                                  server=server)
                if not st_channel:
                    print(st_claim_name)
                    print()
                    continue
            else:
                # Searching offline is necessary for "invalid" claims
                # that no longer exist as active claims online.
                # We don't want to skip this item so we force a channel name.
                st_channel = item["channel_name"]
                if not st_channel:
                    st_channel = "_Unknown_"

            # Skip if the item is not published by the specified channel
            if channel and channel not in st_channel:
                continue

            if sanitize:
                st_channel = funcs.sanitize_text(st_channel)

        if sanitize:
            st_claim_name = funcs.sanitize_text(st_claim_name)
            st_title = funcs.sanitize_text(st_title)

        line = f"{num:4d}/{n_items:4d}"

        if blocks:
            line += f"{sep} " + f"{st_height:8d}"

        line += f"{sep} " + f"{st_time}"

        if cid:
            line += f"{sep} " + f"{st_claim_id}"

        if blobs:
            line += f"{sep} " + f"{st_blobs:3d}/{st_blobs_in_stream:3d}"

        if size:
            line += f"{sep} " + f"{rem_min:3d}:{rem_s:02d}"
            line += f"{sep} " + f"{st_size:9.4f} MB"

        if typ:
            line += f"{sep} " + f"{st_type:9s}"

        if st_path:
            line += f"{sep} " + "media   "
        else:
            line += f"{sep} " + "no-media"

        if ch:
            line += f"{sep} " + f"{st_channel}"

        if name:
            line += f"{sep} " + f'"{st_claim_name}"'

        if title:
            line += f"{sep} " + f'"{st_title}"'

        if path:
            line += f"{sep} " + f'"{st_path}"'

        out.append(line)

    print(f"Number of shown items: {len(out)}")

    funcs.print_content(out, file=file, fdate=fdate)

    return True
예제 #13
0
def ch_download_latest(channel=None, number=2,
                       repost=True,
                       ddir=None, own_dir=True, save_file=True,
                       server="http://*****:*****@MyChannel#5'`, `'MyChannel#5'`, `'MyChannel'`

        If a simplified name is used, and there are various channels
        with the same name, the one with the highest LBC bid will be selected.
        Enter the full name to choose the right one.
    number: int, optional
        It defaults to 2.
        The number of items to download that were last posted by `channel`.
    repost: bool, optional
        It defaults to `True`, in which case it will check if the claims
        are reposts, and if they are, the original claims will be downloaded.
        If it is `False`, it won't check the claims for reposts,
        so if they are reposts they won't be downloaded
        as reposts can't be directly downloaded.
    ddir: str, optional
        It defaults to `$HOME`.
        The path to the download directory.
    own_dir: bool, optional
        It defaults to `True`, in which case it places the downloaded
        content inside a subdirectory named after the channel in `ddir`.
    save_file: bool, optional
        It defaults to `True`, in which case all blobs of the stream
        will be downloaded, and the media file (mp4, mp3, mkv, etc.)
        will be placed in the downloaded directory.
        If it is `False` it will only download the blobs.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    list of dict
        A list of dictionaries, where each dictionary represents
        the standard output of the `lbrynet_get` command for each
        downloaded claim.
    False
        If there is a problem, or no existing channel,
        it will return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    if not channel or not isinstance(channel, str):
        print("Download items from a single channel.")
        print(f"channel={channel}")
        return False

    if not number or not isinstance(number, int) or number < 0:
        number = 2
        print("Number must be a positive integer, "
              f"set to default value, number={number}")

    if (not ddir or not isinstance(ddir, str)
            or ddir == "~" or not os.path.exists(ddir)):
        ddir = os.path.expanduser("~")
        print(f"Download directory should exist; set to ddir='{ddir}'")

    list_info_get = []
    items = srch_ch.ch_search_latest(channel=channel, number=number,
                                     server=server)
    if not items:
        print()
        return False

    n_items = len(items)

    for it, item in enumerate(items, start=1):
        print(f"Claim {it}/{n_items}")
        info_get = dld.download_single(cid=item["claim_id"],
                                       repost=repost,
                                       ddir=ddir, own_dir=own_dir,
                                       save_file=save_file,
                                       server=server)
        list_info_get.append(info_get)
        print()

    return list_info_get
예제 #14
0
def download_claims(ddir=None, own_dir=True, save_file=True,
                    start=1, end=0, file=None, sep=";", invalid=False,
                    server="http://localhost:5279"):
    """Download claims from a file, or redownload the ones already present.

    Parameters
    ----------
    ddir: str, optional
        It defaults to `$HOME`.
        The path to the download directory.
    own_dir: bool, optional
        It defaults to `True`, in which case it places the downloaded
        content inside a subdirectory named after the channel in `ddir`.
    save_file: bool, optional
        It defaults to `True`, in which case all blobs of the stream
        will be downloaded, and the media file (mp4, mp3, mkv, etc.)
        will be placed in the downloaded directory.
        If it is `False` it will only download the blobs.
    start: int, optional
        It defaults to 1.
        Operate on the item starting from this index in the internal list
        of claims or in the claims provided by `file`.
    end: int, optional
        It defaults to 0.
        Operate until and including this index in the internal list of claims
        or in the claims provided by `file`.
        If it is 0, it is the same as the last index.
    file: str, optional
        It defaults to `None`.
        The file to read claims from. It is a comma-separated value (CSV)
        list of claims, in which each row represents a claim,
        and one data field is the `'claim_id'` which can be used
        with `download_single` to get that claim. The value of `sep`
        indicates the separator between the fields.

        If `file=None` it will re-download the claims obtained
        from `sort_items` which should already be present
        in the system fully or partially.
    sep: str, optional
        It defaults to `;`. It is the separator character between
        the data fields in the read file. Since the claim name
        can have commas, a semicolon `;` is used by default.
    invalid: bool, optional
        It defaults to `False`, in which case it will assume
        the processed claims are still valid in the online database.
        It will use `lbrynet claim search` to resolve the `claim_id`.

        If it is `True` it will assume the claims are no longer valid,
        that is, that the claims have been removed from the online database
        and only exist locally.
        In this case, it will use `lbrynet file list` to resolve
        the `claim_id`.

        Therefore this parameter is required if `file` is a document
        containing 'invalid' claims, otherwise the claims won't be found.
        For 'invalid' claims they cannot be downloaded anymore from the online
        database; if their binary blobs are complete, the media files
        (mp4, mp3, mkv, etc.) will simply be recreated in `ddir`.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    list of dict
        A list of dictionaries, where each dictionary represents
        the standard output of the `lbrynet_get` command for each
        downloaded claim.
    False
        If there is a problem, non-existing claims, or non-existing file,
        it will return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    print(80 * "-")

    if not file:
        print("Redownload from existing claims")
        sorted_items = sort.sort_items(server=server)

        if not sorted_items:
            print(">>> Error: no claims previously downloaded.")
            return False
    else:
        if file and not isinstance(file, str) or not os.path.exists(file):
            print("The file path must exist.")
            print(f"file={file}")
            return False

        print("Download from existing file")
        sorted_items = srch.parse_claim_file(file=file, sep=sep)
        print()

        if not sorted_items:
            print(">>> Error: the file must have a 'claim_id' "
                  "(40-character alphanumeric string); "
                  "could not parse the file.")
            print(f"file={file}")
            return False

    n_items = len(sorted_items)

    list_info_get = []

    for it, item in enumerate(sorted_items, start=1):
        if it < start:
            continue
        if end != 0 and it > end:
            break

        print(f"Claim {it}/{n_items}")
        info_get = dld.download_single(cid=item["claim_id"],
                                       invalid=invalid,
                                       ddir=ddir, own_dir=own_dir,
                                       save_file=save_file,
                                       server=server)
        list_info_get.append(info_get)
        print()

    return list_info_get
예제 #15
0
def create_support(uri=None,
                   cid=None,
                   name=None,
                   amount=0.0,
                   server="http://*****:*****@MyChannel#3/some-video-name#2'
            uri = '@MyChannel#3/some-video-name#2'
            uri = 'some-video-name'

        The URI is also called the `'canonical_url'` of the claim.
    cid: str, optional
        A `'claim_id'` for a claim on the LBRY network.
        It is a 40 character alphanumeric string.
    name: str, optional
        A name of a claim on the LBRY network.
        It is normally the last part of a full URI.
        ::
            uri = 'lbry://@MyChannel#3/some-video-name#2'
            name = 'some-video-name'
    amount: float, optional
        It defaults to `0.0`.
        It is the amount of LBC support that will be deposited,
        whether there is a previous support or not.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    dict
        A dictionary with information on the result of the support.
        The keys are the following:
        - 'canonical_url': canonical URI of the claim.
        - 'claim_id': unique 40 character alphanumeric string.
        - 'existing_support': existing support before we add or remove ours;
          this is the sum of `base_support` and `old_support`.
        - 'base_support': existing minimum support that we do not control;
          all published claims must have a positive `base_support`.
        - 'old_support': support that we have added to this claim in the past;
          it may be zero.
        - 'new_support': new support that was successfully deposited
          in the claim, equal to `keep`.
        - 'txid': transaction ID in the blockchain that records the operation.
    False
        If there is a problem or non existing claim, or lack of funds,
        it will return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    supports = get_base_support(uri=uri, cid=cid, name=name)
    if not supports:
        return False

    uri = supports["canonical_url"]
    claim_id = supports["claim_id"]
    existing = supports["existing_support"]
    base_support = supports["base_support"]
    old_support = supports["old_support"]

    new_support = 0.0
    t_input = 0.0
    t_output = 0.0
    t_fee = 0.0
    txid = None

    amount = abs(amount)
    msg = {
        "method": "support_create",
        "params": {
            "claim_id": claim_id,
            "amount": f"{amount:.8f}"
        }
    }

    output = requests.post(server, json=msg).json()

    if "error" in output:
        error = output["error"]
        if "data" in error:
            print(">>> Error: {}, {}".format(error["data"]["name"],
                                             error["message"]))
        else:
            print(f">>> Error: {error}")
        print(f">>> Requested amount: {amount:.8f}")
        return False

    new_support = amount
    t_input = float(output["result"]["total_input"])
    t_output = float(output["result"]["total_output"])
    t_fee = float(output["result"]["total_fee"])
    txid = output["result"]["txid"]

    out = [
        f"canonical_url: {uri}", f"claim_id: {claim_id}",
        f"Existing support: {existing:14.8f}",
        f"Base support:     {base_support:14.8f}",
        f"Old support:      {old_support:14.8f}",
        f"New support:      {new_support:14.8f}", "",
        f"Applied:          {new_support:14.8f}",
        f"total_input:      {t_input:14.8f}",
        f"total_output:     {t_output:14.8f}",
        f"total_fee:        {t_fee:14.8f}", f"txid: {txid}"
    ]
    print("\n".join(out))

    return {
        "canonical_url": uri,
        "claim_id": claim_id,
        "existing_support": existing,
        "base_support": base_support,
        "old_support": old_support,
        "new_support": new_support,
        "txid": txid
    }
예제 #16
0
def search_ch_subs_latest(number=4,
                          override=False,
                          shared=True,
                          show_all=True,
                          filtering="valid",
                          valid=True,
                          notifications=True,
                          threads=32,
                          server="http://localhost:5279"):
    """Search the latest claims from our subscribed channels in the wallet.

    Parameters
    ----------
    number: int, optional
        It defaults to 4.
        Number of newest claims to get from each channel in our subscriptions.
        If it's 0, it will be set to 1, otherwise it would search
        all claims which would take a very long time.
        Override the minimum value of 1 by using `override=True`.
    override: bool, optional
        It defaults to `False`.
        If it is `True` then `number` can be set to 0.
    shared: bool, optional
        It defaults to `True`. See `search_ch_subs`.
    show_all: bool, optional
        It defaults to `True`. See `search_ch_subs`.
    filtering: str, optional
        It defaults to `'valid'`. See `search_ch_subs`.
    valid: bool, optional
        It defaults to `True`. See `search_ch_subs`.
    notifications: bool, optional
        It defaults to `True`. See `search_ch_subs`.
    threads: int, optional
        It defaults to 32. See `search_ch_subs`.
    server: str, optional
        It defaults to `'http://localhost:5279'`. See `search_ch_subs`.

    Returns
    -------
    list of dict
        It returns a list of dictionaries where each dictionary corresponds
        to the information of a subscribed channel in the wallet.
        The channels in the wallet are filtered depending
        on the values of `shared`, `show_all`, `filtering`, `valid`,
        and `notifications`.

        Each dictionary has three keys:
        - 'channel': the `'name'` of the channel with three characters
          from the claim ID, like `@channel#123`.
        - 'claim_id': the full 40-digit alphanumeric unique ID
          of the channel.
        - 'claims': list of dict; each dictionary corresponds
          to one of the newest claims of the channel.
          The list has a maximum length of `number`.
          If the channel is invalid, `'claims'` will be `False`,
          meaning that the channel was probably deleted and is no longer
          found online.
    False
        If there is a problem it will return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    s_time = time.strftime("%Y-%m-%d_%H:%M:%S%z %A", time.localtime())

    if number < 0:
        number = 1

    if number == 0 and not override:
        number = 1
        print("Number of claims set to: 1")

    ch_filtered = search_ch_subs(shared=shared,
                                 show_all=show_all,
                                 filtering=filtering,
                                 valid=valid,
                                 notifications=notifications,
                                 threads=threads,
                                 server=server)

    if not ch_filtered or len(ch_filtered) < 1:
        return False

    ch_latest_claims = []

    # Iterables to be passed to the ThreadPoolExecutor
    n_channels = len(ch_filtered)
    numbers = (number for n in range(n_channels))
    servers = (server for n in range(n_channels))

    if threads:
        with fts.ThreadPoolExecutor(max_workers=threads) as executor:
            # The input must be iterables
            results = executor.map(ch_search_latest_d, ch_filtered, numbers,
                                   servers)
            print(f"Channel claims search; max threads: {threads}")
            ch_latest_claims = list(results)  # generator to list
    else:
        for channel in ch_filtered:
            claims = ch_search_latest_d(channel=channel,
                                        number=number,
                                        server=server)
            ch_latest_claims.append(claims)

    e_time = time.strftime("%Y-%m-%d_%H:%M:%S%z %A", time.localtime())
    print()
    print(f"start: {s_time}")
    print(f"end:   {e_time}")

    return ch_latest_claims
예제 #17
0
def get_all_supports(server="http://localhost:5279"):
    """Get all supports in a dictionary; all, valid, and invalid.

    Returns
    -------
    dict
        A dictionary with information on the supports.
        The keys are the following:
        - 'all_supports': list with dictionaries of all supports.
        - 'all_resolved': list with dictionaries of all resolved claims
          corresponding to all supports.
          Invalid claims will simply be `False`.
        - 'valid_supports': list with dictionaries of supports
          for valid claims only.
        - 'valid_resolved': list with dictionaries of resolved claims
          corresponding to `'valid_supports'` only.
        - 'invalid_supports': list with dictionaries of supports
          for invalid claims. The claim IDs in these dictionaries
          cannot be resolved anymore.
    False
        If there is a problem or no list of supports, it will return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    msg = {"method": "support_list", "params": {"page_size": 99000}}
    output = requests.post(server, json=msg).json()

    if "error" in output:
        return False

    items = output["result"]["items"]
    n_items = len(items)

    if n_items < 1:
        print(f"Supports found: {n_items}")
        return False

    valid = []
    valid_resolved = []
    invalid = []

    all_supports = []
    all_resolved = []

    for item in items:
        s = srch.search_item(cid=item["claim_id"])

        if not s:
            invalid.append(item)
        else:
            valid.append(item)
            valid_resolved.append(s)

        all_supports.append(item)
        all_resolved.append(s)

    return {
        "all_supports": all_supports,
        "all_resolved": all_resolved,
        "valid_supports": valid,
        "valid_resolved": valid_resolved,
        "invalid_supports": invalid
    }
예제 #18
0
def list_ch_subs_latest(number=4,
                        override=False,
                        claim_id=False,
                        typ=True,
                        title=False,
                        sanitize=False,
                        shared=True,
                        show_all=True,
                        filtering="valid",
                        valid=True,
                        notifications=False,
                        threads=32,
                        start=1,
                        end=0,
                        file=None,
                        fdate=False,
                        sep=";",
                        server="http://localhost:5279"):
    """List the latest claims from our subscribed channels.

    Parameters
    ----------
    number: int, optional
        It defaults to 4.
        Number of newest claims to get from each channel in our subscriptions.
        If it's 0, it will be set to 1, otherwise it would search
        all claims which would take a very long time.
        Override the minimum value of 1 by using `override=True`.
    override: bool, optional
        It defaults to `False`.
        If it is `True` then `number` can be set to 0.
    claim_id: bool, optional
        It defaults to `False`.
        If it is `True` it will print the `'claim_id'` of the claims
        of the channel.
    typ: bool, optional
        It defaults to `True` in which case it will print the claim type,
        and stream type (if any) of the claims of the channel.
    title: bool, optional
        It defaults to `False` in which case the claim `'name'`
        will be printed.
        If it is `True`, the claim `'title'` will be printed instead.
    sanitize: bool, optional
        It defaults to `False`, in which case it will not remove the emojis
        from the name or title of the claim.
        If it is `True` it will remove these unicode characters.
        This option requires the `emoji` package to be installed.
    shared: bool, optional
        It defaults to `True`, in which case it uses the shared database
        synchronized with Odysee online.
        If it is `False` it will use only the local database
        to `lbrynet`, for example, used by the LBRY Desktop application.
    show_all: bool, optional
        It defaults to `True`, in which case all followed channels
        will be printed, regardless of `filtering`, `valid`,
        or `notifications`.
        If it is `False` then we can control what channels to show
        with `filtering`, `valid`, or `notifications`.
    filtering: str, optional
        It defaults to `'valid'`. It is the type of filtering that
        will be done as long as `show_all=False`.
        It can be `'valid'` (depending on the value of `valid` parameter),
        `'notifications'` (depending on the value of `notifications`),
        or `'both'` (depending on the values of `valid` and `notifications`).
        If `'both'`, the list of channels will be filtered by `valid` first,
        and then by `notifications`.
    valid: bool, optional
        It defaults to `True` in which case only the channels that resolve
        online will be returned.
        If it is `False` it will return only those channels that no longer
        resolve online.
        This parameter only works when `show_all=False`
        and `filtering='valid'` or `'both'`.
    notifications: bool, optional
        It defaults to `False` in which case only the channels
        that have notifications disabled will be returned.
        If it is `True` it will return only those channels
        that have notifications enabled.
        This parameter only works when `show_all=False`
        and `filtering='notifications'` or `'both'`.
    threads: int, optional
        It defaults to 32.
        It is the number of threads that will be used to resolve channels
        online, meaning that many channels will be searched in parallel.
        This number shouldn't be large if the CPU doesn't have many cores.
    start: int, optional
        It defaults to 1.
        Show claims starting from this index in the list of items.
    end: int, optional
        It defaults to 0.
        Show claims until and including this index in the list of items.
        If it is 0, it is the same as the last index in the list.
    file: str, optional
        It defaults to `None`.
        It must be a writable path to which the summary will be written.
        Otherwise the summary will be printed to the terminal.
    fdate: bool, optional
        It defaults to `False`.
        If it is `True` it will add the date to the name of the summary file.
    sep: str, optional
        It defaults to `;`. It is the separator character between
        the data fields in the printed summary. Since the claim name
        can have commas, a semicolon `;` is used by default.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    list of dict
        It returns a list of dictionaries where each dictionary corresponds
        to the information of a subscribed channel in the wallet.
        The channels in the wallet are filtered depending
        on the values of `shared`, `show_all`, `filtering`, `valid`,
        and `notifications`.

        Each dictionary has three keys:
        - 'channel': the `'name'` of the channel with three characters
          from the claim ID, like `@channel#123`.
        - 'claim_id': the full 40-digit alphanumeric unique ID
          of the channel.
        - 'claims': list of dict; each dictionary corresponds
          to one of the newest claims of the channel.
          The list has a maximum length of `number`.
          If the channel is invalid, `'claims'` will be `False`,
          meaning that the channel was probably deleted and is no longer
          found online.
    False
        If there is a problem it will return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    ch_latest_claims = search_ch_subs_latest(number=number,
                                             override=override,
                                             shared=shared,
                                             show_all=show_all,
                                             filtering=filtering,
                                             valid=valid,
                                             notifications=notifications,
                                             threads=threads,
                                             server=server)

    print_ch_subs_latest(ch_latest_claims,
                         claim_id=claim_id,
                         typ=typ,
                         title=title,
                         sanitize=sanitize,
                         start=start,
                         end=end,
                         file=file,
                         fdate=fdate,
                         sep=sep)

    return ch_latest_claims
예제 #19
0
def abandon_support_inv(invalids=None,
                        cid=None,
                        name=None,
                        keep=0.0,
                        server="http://*****:*****@MyChannel#3/some-video-name#2'
            name = 'some-video-name'
    keep: float, optional
        It defaults to `0.0`.
        It is the amount of LBC support that should remain in the claim
        after we remove our previous support. That is, we can use
        this parameter to assign a new support value.
        If it is `0.0` all support is removed.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    dict
        A dictionary with information on the result of the support.
        The keys are the following:
        - 'claim_name': name of the claim; the canonical URI is not available
          because the claim can't be resolved online any more.
        - 'claim_id': unique 40 character alphanumeric string.
        - 'existing_support': existing support before we add or remove ours;
          this should be the same as `old_support`.
        - 'base_support': since this claim does not resolve any more,
          it should be zero.
        - 'old_support': support that we have added to this claim in the past;
          it cannot be zero because we use this method only with claims
          that have been previously supported (and are now invalid).
        - 'new_support': new support that was successfully deposited
          in the claim, equal to `keep`.
        - 'txid': transaction ID in the blockchain that records the operation.
    False
        If there is a problem or non existing claim, or lack of funds,
        it will return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    if not cid and not name:
        print(80 * "-")
        print(f'cid={cid}\n' f'name="{name}"')
        return False

    existing = 0
    base_support = 0
    old_support = 0
    found = False

    if not invalids:
        all_supports = get_all_supports(server=server)

        if not all_supports:
            return False

        invalids = all_supports["invalid_supports"]

    for supp in invalids:
        if ((cid and cid in supp["claim_id"])
                or (name and name in supp["name"])):
            existing = float(supp["amount"])
            old_support = float(supp["amount"])
            claim_id = supp["claim_id"]
            c_name = supp["name"]
            found = True

    if not found:
        print(80 * "-")
        print("Claim not found among the invalid claims")
        print(f'cid={cid}\n' f'name="{name}"')
        return False

    calc, text = calculate_abandon(claim_id=claim_id, keep=keep, server=server)
    if not calc:
        return False

    new_support = calc["new_support"]
    txid = calc["txid"]

    out = [
        f"claim_name: {c_name}", f"claim_id: {claim_id}",
        f"Existing support: {existing:14.8f}",
        f"Base support:     {base_support:14.8f}",
        f"Old support:      {old_support:14.8f}",
        f"New support:      {keep:14.8f}", ""
    ]
    out += text

    print("\n".join(out))

    return {
        "claim_name": c_name,
        "claim_id": claim_id,
        "existing_support": existing,
        "base_support": base_support,
        "old_support": old_support,
        "new_support": new_support,
        "txid": txid
    }
예제 #20
0
def search_ch_subs(shared=True,
                   show_all=True,
                   filtering="valid",
                   valid=True,
                   notifications=False,
                   threads=32,
                   server="http://localhost:5279"):
    """Search the wallet for the channel subscriptions.

    Parameters
    ----------
    shared: bool, optional
        It defaults to `True`, in which case it uses the shared database
        synchronized with Odysee online.
        If it is `False` it will use only the local database
        to `lbrynet`, for example, used by the LBRY Desktop application.
    show_all: bool, optional
        It defaults to `True`, in which case all followed channels
        will be printed, regardless of `filtering`, `valid`,
        or `notifications`.
        If it is `False` then we can control what channels to show
        with `filtering`, `valid`, or `notifications`.
    filtering: str, optional
        It defaults to `'valid'`. It is the type of filtering that
        will be done as long as `show_all=False`.
        It can be `'valid'` (depending on the value of `valid` parameter),
        `'notifications'` (depending on the value of `notifications`),
        or `'both'` (depending on the values of `valid` and `notifications`).
        If `'both'`, the list of channels will be filtered by `valid` first,
        and then by `notifications`.
    valid: bool, optional
        It defaults to `True` in which case only the channels that resolve
        online will be returned.
        If it is `False` it will return only those channels that no longer
        resolve online.
        This parameter only works when `show_all=False`
        and `filtering='valid'` or `'both'`.
    notifications: bool, optional
        It defaults to `False` in which case only the channels
        that have notifications disabled will be returned.
        If it is `True` it will return only those channels
        that have notifications enabled.
        This parameter only works when `show_all=False`
        and `filtering='notifications'` or `'both'`.
    threads: int, optional
        It defaults to 32.
        It is the number of threads that will be used to resolve channels
        online, meaning that many channels will be searched in parallel.
        This number shouldn't be large if the CPU doesn't have many cores.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    list of dict
        It returns the list of dictionaries representing
        the filtered channels depending on the values of `shared`, `show_all`,
        `filtering`, `valid`, and `notifications`.

        Each dictionary has three keys:
        - 'uri': the `'permanent_url'` of the channel.
        - 'notificationsDisabled': a boolean value, indicating whether
          the notification is enabled for that channel or not.
        - 'valid': it's the dictionary of the resolved channel,
          or `False` if the channel is invalid and doesn't exist.
    False
        If there is a problem it will return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    msg = {"method": "preference_get", "params": {}}

    output = requests.post(server, json=msg).json()
    if "error" in output:
        print(">>> No 'result' in the JSON-RPC server output")
        return False

    result = output["result"]

    sync = result.get("enable-sync", False)

    r_local = result["local"]

    if "shared" in result:
        r_shared = result["shared"]

    print("Channel subscriptions")
    print(80 * "-")
    print(f"Synchronization: {sync}")
    print("Show all:", bool(show_all))
    if not show_all:
        print(f"Filtering: '{filtering}'")
        print("- Valid:", bool(valid))
        print("- Notifications:", bool(notifications))

    following_local = r_local["value"]["following"]

    if "shared" in result:
        following_shared = r_shared["value"]["following"]

    if shared and "shared" in result:
        print("Database: shared")
        channels = following_shared
        n_channels = len(following_shared)
    else:
        if shared:
            print("No shared database, will use local")
        else:
            print("Database: local")
        channels = following_local
        n_channels = len(following_local)

    res_channels = []

    # Iterables to be passed to the ThreadPoolExecutor
    servers = (server for n in range(n_channels))

    if threads:
        with fts.ThreadPoolExecutor(max_workers=threads) as executor:
            # The input must be iterables
            res_channels = executor.map(validate_ch, channels, servers)
            print(f"Resolving channels; max threads: {threads}")
            res_channels = list(res_channels)  # generator to list
    else:
        for channel in channels:
            result = validate_ch(channel, server=server)
            res_channels.append(result)

    if show_all:
        return res_channels

    ch_valid_filtered = filter_valid(res_channels, f_valid=valid)

    ch_notif_filtered = filter_notif(res_channels,
                                     f_notifications=notifications)

    ch_both = filter_notif(ch_valid_filtered, f_notifications=notifications)

    if filtering in "valid":
        ch_filtered = ch_valid_filtered
    elif filtering in "notifications":
        ch_filtered = ch_notif_filtered
    elif filtering in "both":
        ch_filtered = ch_both

    return ch_filtered
예제 #21
0
def list_supports(claim_id=False,
                  invalid=False,
                  combine=True,
                  claims=True,
                  channels=True,
                  file=None,
                  fdate=False,
                  sep=";",
                  server="http://localhost:5279"):
    """Print supported claims, the amount, and the trending score.

    Parameters
    ----------
    claim_id: bool, optional
        It defaults to `False`, in which case only the name of the claim
        is shown.
        If it is `True` the `'claim_id'` will be shown as well.
    invalid: bool, optional
        It defaults to `False`, in which case it will show all supported
        claims, even those that are invalid.
        If it is `True` it will only show invalid claims. Invalid are those
        which were deleted by their authors, so the claim (channel
        or content) is no longer available in the blockchain.
    combine: bool, optional
        It defaults to `True`, in which case the `global`, `group`, `local`,
        and `mixed` trending scores are added into one combined score.
        If it is `False` it will show the four values separately.
    claims: bool, optional
        It defaults to `True`, in which case supported claims will be shown.
        If it is `False` simple claims won't be shown.
    channels: bool, optional
        It defaults to `True`, in which case supported channels will be shown.
        If it is `False` channel claims (which start with the `@` symbol)
        won't be shown.
    file: str, optional
        It defaults to `None`.
        It must be a user writable path to which the summary will be written.
        Otherwise the summary will be printed to the terminal.
    fdate: bool, optional
        It defaults to `False`.
        If it is `True` it will add the date to the name of the summary file.
    sep: str, optional
        It defaults to `;`. It is the separator character between
        the data fields in the printed summary. Since the claim name
        can have commas, a semicolon `;` is used by default.
    server: str, optional
        It defaults to `'http://*****:*****@") else False

        if is_channel and not channels:
            continue
        if not is_channel and not claims:
            continue

        obj = ""
        if claim_id:
            obj += f'"{cid}"' + f"{sep} "

        _name = f'"{name}"'

        if not s:
            _name = "[" + _name + "]"

        obj += f'{_name:58s}'

        _amount = float(item["amount"])
        amount = f"{_amount:14.8f}"

        if not s:
            m = {"support_amount": "0.0"}
            s = {"amount": item["amount"]}
        else:
            if invalid:
                continue
            m = s["meta"]

        existing_support = float(s["amount"]) + float(m["support_amount"])

        trend_gl = m.get("trending_global", 0)
        trend_gr = m.get("trending_group", 0)
        trend_loc = m.get("trending_local", 0)
        trend_mix = m.get("trending_mixed", 0)

        combined = (trend_gl + trend_gr + trend_loc + trend_mix)

        tr_gl = f'{trend_gl:7.2f}'
        tr_gr = f'{trend_gr:7.2f}'
        tr_loc = f'{trend_loc:7.2f}'
        tr_mix = f'{trend_mix:7.2f}'
        tr_combined = f'{combined:7.2f}'
        is_spent = item["is_spent"]

        out = f"{num:3d}/{n_items:3d}" + f"{sep} "
        out += f"{obj}" + f"{sep} " + f"{amount}" + f"{sep} "
        out += f"{existing_support:15.8f}" + f"{sep} "

        if not is_spent:
            if combine:
                out += f"combined: {tr_combined}"
            else:
                out += f"mix: {tr_mix}" + f"{sep} "
                out += f"glob: {tr_gl}" + f"{sep} "
                out += f"grp: {tr_gr}" + f"{sep} "
                out += f"loc: {tr_loc}"
        else:
            continue
        out_list.append(out)

    funcs.print_content(out_list, file=file, fdate=fdate)

    return resolved
예제 #22
0
def list_ch_peers(channels=None,
                  number=None, shuffle=True,
                  ch_threads=8, claim_threads=32,
                  file=None, fdate=False, sep=";",
                  server="http://*****:*****@MyChannel#5', 3],
                         ['GoodChannel#f', 4],
                         ['Fast_channel', 2]
                       ]
    number: int, optional
        It defaults to `None`.
        If this is present, it will override the individual
        numbers in `channels`.
        That is, the number of claims that will be searched
        will be the same for every channel.
    shuffle: bool, optional
        It defaults to `True`, in which case it will shuffle
        the list of channels so that they are not processed in the order
        that they come in the list.
    ch_threads: int, optional
        It defaults to 8.
        It is the number of threads that will be used to process channels,
        meaning that many channels will be searched in parallel.
    claim_threads: int, optional
        It defaults to 32.
        It is the number of threads that will be used to search for peers,
        meaning that many claims will be searched in parallel.
        This number shouldn't be large if the CPU doesn't have many cores.
    file: str, optional
        It defaults to `None`.
        It must be a user writable path to which the summary will be written.
        Otherwise the summary will be printed to the terminal.
    fdate: bool, optional
        It defaults to `False`.
        If it is `True` it will add the date to the name of the summary file.
    sep: str, optional
        It defaults to `;`. It is the separator character between
        the data fields in the printed summary. Since the claim name
        can have commas, a semicolon `;` is used by default.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    list of dict
        Each element of the list is the output of `peers.search_ch_peers`,
        with the peer information of every channel.
    False
        If there is a problem it will return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    ch_peers_info = ch_search_ch_peers(channels=channels,
                                       number=number, shuffle=shuffle,
                                       ch_threads=ch_threads,
                                       claim_threads=claim_threads,
                                       server=server)

    print_ch_peers_info(ch_peers_info,
                        file=file, fdate=fdate, sep=sep)

    return ch_peers_info
예제 #23
0
def redownload_blobs(uri=None, cid=None, name=None,
                     ddir=None, own_dir=True,
                     blobfiles=None, print_each=False,
                     server="http://*****:*****@MyChannel#3/some-video-name#2'
            uri = '@MyChannel#3/some-video-name#2'
            uri = 'some-video-name'

        The URI is also called the `'canonical_url'` of the claim.
    cid: str, optional
        A `'claim_id'` for a claim on the LBRY network.
        It is a 40 character alphanumeric string.
    name: str, optional
        A name of a claim on the LBRY network.
        It is normally the last part of a full URI.
        ::
            uri = 'lbry://@MyChannel#3/some-video-name#2'
            name = 'some-video-name'
    ddir: str, optional
        It defaults to `$HOME`.
        The path to the download directory.
    own_dir: bool, optional
        It defaults to `True`, in which case it places the downloaded
        content inside a subdirectory named after the channel in `ddir`.
    blobfiles: str, optional
        It defaults to `'$HOME/.local/share/lbry/lbrynet/blobfiles'`.
        The path to the directory where the blobs were downloaded.
        This is normally seen with `lbrynet settings get`, under `'data_dir'`.
        It can be any other directory if it is symbolically linked
        to it, such as `'/opt/lbryblobfiles'`
    print_each: bool, optional
        It defaults to `False`.
        If it is `True` it will not print all blobs
        that belong to the claim, and whether each of them is already
        in `blobfiles`.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    bool
        It returns `True` if all blobs are already present in the system
        so nothing needs to be downloaded.
        It returns `False` if the item does not exist,
        of if at least one blob was downloaded.

    Bug
    ---
    At the moment downloading missing blobs is not possible;
    the command hangs and never timeouts.
    ::
        lbrynet blob get <hash>

    This bug is reported in lbryio/lbry-sdk, issue #2070.

    If the bug is solved, `blob_get` could be called with the missing blob
    hash to only get that piece.
    """
    if not funcs.server_exists(server=server):
        return False

    blob_info = blobs.count_blobs(uri=uri, cid=cid, name=name,
                                  blobfiles=blobfiles,
                                  print_each=print_each,
                                  server=server)

    if "error_not_found" in blob_info:
        return False

    print(80 * "-")
    if "error_no_sd_hash" in blob_info:
        print(blob_info["error_no_sd_hash"]
              + "; start download from the start.")
    elif blob_info["all_present"]:
        print("All blobs files present, nothing to download.")
        return True
    else:
        print("Blobs missing; redownload claim.")
    print()

    # If the bug #2070 is solved, this could be run.
    # print("Blobs missing; redownload blobs")
    # for blob in blob_info["missing"]:
    #     out = f"{blob[0]}, "
    #     blob_get(blob=blob[1], action="get", out=out,
    #              server=server)

    # The missing blobs will only be downloaded if the media file
    # is not present so we must make sure it is deleted.
    # print("Blobs missing; redownload claim")
    print("Ensure the media file is deleted.")
    clean.delete_single(cid=blob_info["claim_id"], what="media",
                        server=server)
    print()
    dld.download_single(cid=blob_info["claim_id"],
                        ddir=ddir, own_dir=own_dir,
                        server=server)

    return False
예제 #24
0
def ch_search_ch_peers(channels=None,
                       number=None, shuffle=True,
                       ch_threads=8, claim_threads=32,
                       server="http://*****:*****@MyChannel#5', 3],
                         ['GoodChannel#f', 4],
                         ['Fast_channel', 2]
                       ]
    number: int, optional
        It defaults to `None`.
        If this is present, it will override the individual
        numbers in `channels`.
        That is, the number of claims that will be searched
        will be the same for every channel.
    shuffle: bool, optional
        It defaults to `True`, in which case it will shuffle
        the list of channels so that they are not processed in the order
        that they come in the list.
    ch_threads: int, optional
        It defaults to 8.
        It is the number of threads that will be used to process channels,
        meaning that many channels will be searched in parallel.
    claim_threads: int, optional
        It defaults to 32.
        It is the number of threads that will be used to search for peers,
        meaning that many claims will be searched in parallel.
        This number shouldn't be large if the CPU doesn't have many cores.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    list of dict
        Each element of the list is the output of `peers.search_ch_peers`,
        with the peer information of every channel.
    """
    if not funcs.server_exists(server=server):
        return False

    s_time = time.strftime("%Y-%m-%d_%H:%M:%S%z %A", time.localtime())

    processed_chs = funcs.process_ch_num(channels=channels,
                                         number=number, shuffle=shuffle)

    if not processed_chs or len(processed_chs) < 1:
        return False

    ch_peers_info = []

    # Iterables to be passed to the ThreadPoolExecutor
    n_channels = len(processed_chs)
    chns = (processed["channel"] for processed in processed_chs)
    numbers = (processed["number"] for processed in processed_chs)
    c_threads = (claim_threads for n in range(n_channels))
    falses = (False for n in range(n_channels))
    falses2 = (False for n in range(n_channels))
    servers = (server for n in range(n_channels))

    if ch_threads:
        with fts.ThreadPoolExecutor(max_workers=ch_threads) as executor:
            # The input must be iterables
            results = executor.map(peers.search_ch_peers,
                                   chns, numbers, c_threads,
                                   falses, falses2, servers)
            print(f"Channel-peer search; max threads: {ch_threads}")
            ch_peers_info = list(results)  # generator to list
    else:
        for num, processed in enumerate(processed_chs, start=1):
            channel = processed["channel"]
            number = processed["number"]

            print(f"Channel {num}/{n_channels}, {channel}")
            peers_info = peers.search_ch_peers(channel=channel,
                                               number=number,
                                               threads=claim_threads,
                                               print_time=False,
                                               print_msg=False,
                                               server=server)
            print()
            ch_peers_info.append(peers_info)

    e_time = time.strftime("%Y-%m-%d_%H:%M:%S%z %A", time.localtime())
    print(f"start: {s_time}")
    print(f"end:   {e_time}")

    return ch_peers_info
예제 #25
0
def list_playlists(shared=True,
                   file=None,
                   fdate=False,
                   server="http://localhost:5279"):
    """Display the playlists.

    Parameters
    ----------
    shared: bool, optional
        It defaults to `True`, in which case it uses the shared database
        synchronized with Odysee online.
        If it is `False` it will use only the local database
        to the LBRY Desktop application.
    file: str, optional
        It defaults to `None`.
        It must be a writable path to which the summary will be written.
        Otherwise the summary will be printed to the terminal.
    fdate: bool, optional
        It defaults to `False`.
        If it is `True` it will add the date to the name of the summary file.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    bool
        It returns `True` if it prints the information successfully.
        If there is a problem it will return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    msg = {"method": "preference_get", "params": {}}

    output = requests.post(server, json=msg).json()
    if "error" in output:
        print(">>> No 'result' in the JSON-RPC server output")
        return False

    result = output["result"]

    if "enable-sync" in result:
        sync = result["enable-sync"]
    else:
        sync = False

    r_local = result["local"]

    if "shared" in result:
        r_shared = result["shared"]

    print("Playlists")
    print(80 * "-")
    print(f"Synchronization: {sync}")

    builtin = False
    if "builtinCollections" in r_local["value"]:
        pl_builtin_local = r_local["value"]["builtinCollections"]
        builtin = True

    unpublished = False
    if "unpublishedCollections" in r_local["value"]:
        pl_unpub_local = r_local["value"]["unpublishedCollections"]
        unpublished = True

    if "shared" in result and "builtinCollections" in r_shared["value"]:
        pl_builtin_shared = r_shared["value"]["builtinCollections"]
        builtin = True

    if "shared" in result and "unpublishedCollections" in r_shared["value"]:
        pl_unpub_shared = r_shared["value"]["unpublishedCollections"]
        unpublished = True

    if not builtin or not unpublished:
        if shared:
            print(f"Database: shared")
        else:
            print(f"Database: local")

        print(f"Builtin collection: {builtin}")
        print(f"Unpublished collection: {unpublished}")
        print("No playlists. Exit.")
        return False

    if shared and "shared" in result:
        print(f"Database: shared")
        pl_builtin = pl_builtin_shared
        pl_unpub = pl_unpub_shared
    else:
        if shared:
            print("No shared database, will use local")
        else:
            print(f"Database: local")
        pl_builtin = pl_builtin_local
        pl_unpub = pl_unpub_local

    pl_favorites = pl_builtin["favorites"]
    n_favs = len(pl_favorites["items"])
    time_favs = time.localtime(int(pl_favorites["updatedAt"]))
    time_favs = time.strftime("%Y-%m-%d_%H%M", time_favs)

    pl_watchlater = pl_builtin["watchlater"]
    n_later = len(pl_watchlater["items"])
    time_later = time.localtime(int(pl_watchlater["updatedAt"]))
    time_later = time.strftime("%Y-%m-%d_%H%M", time_later)

    out = [f"Favorites, updated: {time_favs}"]
    out2 = [f"Watch later, updated: {time_later}"]

    for it, item in enumerate(pl_favorites["items"], start=1):
        line = f"{it:3d}/{n_favs:3d}, "
        uri, cid = item.lstrip("lbry://").split("#")
        uri = uri + "#" + cid[0:3]
        out += [line + f"{uri}"]

    for it, item in enumerate(pl_watchlater["items"], start=1):
        line = f"{it:3d}/{n_later:3d}, "
        uri, cid = item.lstrip("lbry://").split("#")
        uri = uri + "#" + cid[0:3]
        out2 += [line + f"{uri}"]

    out3 = []

    for it, k in enumerate(pl_unpub, start=1):
        updated = time.localtime(int(pl_unpub[k]["updatedAt"]))
        updated = time.strftime("%Y-%m-%d_%H%M", updated)
        name = pl_unpub[k]["name"]
        title = f"{name}, updated: {updated}"

        items = pl_unpub[k]["items"]
        n_items = len(items)

        elems = []
        for itt, item in enumerate(items, start=1):
            line = f"{itt:3d}/{n_items:3d}, "
            uri, cid = item.lstrip("lbry://").split("#")
            uri = uri + "#" + cid[0:3]
            line = line + f"{uri}"
            elems.append(line)

        lines = "\n".join(elems)
        out3 += [f"{title}\n" + lines + "\n"]

    fd = 0

    if file:
        dirn = os.path.dirname(file)
        base = os.path.basename(file)

        if fdate:
            fdate = time.strftime("%Y%m%d_%H%M", time.localtime()) + "_"
        else:
            fdate = ""

        file = os.path.join(dirn, fdate + base)

        try:
            fd = open(file, "w")
        except (FileNotFoundError, PermissionError) as err:
            print(f"Cannot open file for writing; {err}")

    if file and fd:
        print("\n".join(out), file=fd)
        print("", file=fd)
        print("\n".join(out2), file=fd)
        print("", file=fd)
        print("\n".join(out3), file=fd)
        fd.close()
        print(f"Summary written: {file}")
    else:
        print("\n".join(out))
        print("")
        print("\n".join(out2))
        print("")
        print("\n".join(out3))

    return True
예제 #26
0
def list_ch_subs_peers(number=2, shuffle=False,
                       start=1, end=0,
                       shared=True, valid=True,
                       ch_threads=32, claim_threads=16,
                       file=None, fdate=None, sep=";",
                       server="http://localhost:5279"):
    """Print the summary of peers for claims for subscribed channels.

    The list of channels to which we are subscribed is found
    in the wallet file in the local machine, assuming it is synchronized
    with Odysee.

    Parameters
    ----------
    number: int, optional
        It defaults to 2.
        The maximum number of claims that will be searched for peers
        for every subscribed channel.
    shuffle: bool, optional
        It defaults to `False`, in which case it will process
        the channels in the order that they are found in the wallet file.
        If it is `True`, the list of channels is shuffled
        so that they are processed in random order.
    start: int, optional
        It defaults to 1.
        Process channels starting from this index in the list of channels.
    end: int, optional
        It defaults to 0.
        Process channels until and including this index in the list
        of channels.
        If it is 0, it is the same as the last index in the list.
    shared: bool, optional
        It defaults to `True`, in which case it uses the shared database
        synchronized with Odysee online.
        If it is `False` it will use only the local database
        to `lbrynet`, for example, used by the LBRY Desktop application.
    valid: bool, optional
        It defaults to `True`, in which case it will only list the valid
        channels which can be resolved online.
        If it is `False` it will list all channels in the wallet, even those
        that do not resolve online, meaning that probably they were deleted.
        These invalid channels will be shown with brackets around
        the channel's name, for example, `[@channel]`.
    ch_threads: int, optional
        It defaults to 32.
        It is the number of threads that will be used to process channels,
        meaning that many channels will be searched in parallel.
    claim_threads: int, optional
        It defaults to 16.
        It is the number of threads that will be used to search for peers,
        meaning that many claims will be searched in parallel.
        This number shouldn't be large if the CPU doesn't have many cores.
    file: str, optional
        It defaults to `None`.
        It must be a user writable path to which the summary will be written.
        Otherwise the summary will be printed to the terminal.
    fdate: bool, optional
        It defaults to `False`.
        If it is `True` it will add the date to the name of the summary file.
    sep: str, optional
        It defaults to `;`. It is the separator character between
        the data fields in the printed summary. Since the claim name
        can have commas, a semicolon `;` is used by default.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    list of dict
        Each element of the list is the output of `peers.search_ch_peers`,
        with the peer information of every channel.
    False
        If there is a problem it will return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    sub_channels = chs.list_ch_subs(shared=shared,
                                    show_all=not valid,
                                    filtering="valid", valid=True,
                                    threads=32,
                                    claim_id=False,
                                    file=None, fdate=False, sep=sep,
                                    server=server)

    channels = []

    for num, channel in enumerate(sub_channels, start=1):
        if num < start:
            continue
        if end != 0 and num > end:
            break

        name, cid = channel["uri"].lstrip("lbry://").split("#")
        c_name = name + "#" + cid[0:3]

        if not channel["valid"]:
            c_name = "[" + c_name + "]"

        channels.append([c_name, number])

    ch_peers_info = list_ch_peers(channels=channels,
                                  number=None, shuffle=shuffle,
                                  ch_threads=ch_threads,
                                  claim_threads=claim_threads,
                                  file=file, fdate=fdate, sep=sep,
                                  server=server)

    return ch_peers_info
예제 #27
0
def count_blobs(uri=None,
                cid=None,
                name=None,
                blobfiles=None,
                print_msg=True,
                print_each=True,
                server="http://*****:*****@MyChannel#3/some-video-name#2'
            uri = '@MyChannel#3/some-video-name#2'
            uri = 'some-video-name'

        The URI is also called the `'canonical_url'` of the claim.
    cid: str, optional
        A `'claim_id'` for a claim on the LBRY network.
        It is a 40 character alphanumeric string.
    name: str, optional
        A name of a claim on the LBRY network.
        It is normally the last part of a full URI.
        ::
            uri = 'lbry://@MyChannel#3/some-video-name#2'
            name = 'some-video-name'
    blobfiles: str, optional
        It defaults to `'$HOME/.local/share/lbry/lbrynet/blobfiles'`.
        The path to the directory where the blobs were downloaded.
        This is normally seen with `lbrynet settings get`, under `'data_dir'`.
        It can be any other directory if it is symbolically linked
        to it, such as `'/opt/lbryblobfiles'`
    print_msg: bool, optional
        It defaults to `True`, in which case it will print information
        on the found claim.
        If `print_msg=False`, it also implies `print_each=False`.
    print_each: bool, optional
        It defaults to `True`, in which case it will print all blobs
        that belong to the claim, and whether each of them is already
        in `blobfiles`.
    server: str, optional
        It defaults to `'http://*****:*****@_Unknown_"

    sd_hash = item["value"]["source"]["sd_hash"]

    if print_msg:
        print(f"canonical_url: {c_uri}")
        print(f"claim_id: {c_cid}")
        print(f"name: {c_name}")
        print(f"channel: {c_channel}")
        print(f"sd_hash: {sd_hash}")

    sd_hash_f = os.path.join(blobfiles, sd_hash)

    # if not os.path.exists(sd_hash_f) or sd_hash not in list_all_blobs:
    if not os.path.exists(sd_hash_f):
        print(f">>> 'sd_hash' blob not in directory: {blobfiles}")
        print(">>> Start downloading the claim, or redownload it.")
        return {
            "error_no_sd_hash": "'sd_hash' blob not in directory "
            f"{blobfiles}",
            "canonical_url": c_uri,
            "claim_id": c_cid,
            "name": c_name,
            "channel": c_channel,
            "sd_hash": sd_hash
        }

    fd = open(sd_hash_f)
    lines = fd.readlines()
    fd.close()

    blobs = json.loads(lines[0])
    n_blobs = len(blobs["blobs"]) - 1

    if print_msg:
        print(f"Total blobs: {n_blobs}")

    present_list = []
    blob_list = []
    blob_missing = []

    for blob in blobs["blobs"]:
        if "blob_hash" not in blob:
            continue

        num = blob["blob_num"]
        blob_hash = blob["blob_hash"]
        present = os.path.exists(os.path.join(blobfiles, blob_hash))
        present_list.append(present)
        blob_list.append([num, blob_hash, present])

        if not present:
            blob_missing.append([num, blob_hash, present])

        if print_msg and print_each:
            print("{:3d}/{:3d}, {}, {}".format(num, n_blobs, blob_hash,
                                               present))

    all_present = all(present_list)

    if print_msg:
        print(f"All blob files present: {all_present}")

    blob_info = {
        "canonical_url": c_uri,
        "claim_id": c_cid,
        "name": c_name,
        "channel": c_channel,
        "sd_hash": sd_hash,
        "all_present": all_present,
        "blobs": blob_list,
        "missing": blob_missing
    }
    return blob_info
예제 #28
0
def get_base_support(uri=None,
                     cid=None,
                     name=None,
                     server="http://localhost:5279"):
    """Get the existing, base, and our support from a claim.

    Returns
    -------
    dict
        A dictionary with information on the support on a claim.
        The keys are the following:
        - 'canonical_url'
        - 'claim_id'
        - 'existing_support': total support that the claim has;
          this is `'base_support'` + `'old_support'`.
        - 'base_support': support that the claim has without our support.
        - 'old_support': support that we have added to this claim;
          it may be zero if this claim does not have any support from us.
    False
        If there is a problem or no list of supports, it will return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    item = srch.search_item(uri=uri,
                            cid=cid,
                            name=name,
                            offline=False,
                            server=server)

    if not item:
        return False

    uri = item["canonical_url"]
    cid = item["claim_id"]

    existing = float(item["amount"]) + float(item["meta"]["support_amount"])

    msg = {"method": "support_list", "params": {"claim_id": item["claim_id"]}}

    output = requests.post(server, json=msg).json()

    if "error" in output:
        return False

    supported_items = output["result"]["items"]
    old_support = 0

    if not supported_items:
        # Old support remains 0
        pass
    else:
        for su_item in supported_items:
            old_support += float(su_item["amount"])

    base_support = existing - old_support

    return {
        "canonical_url": uri,
        "claim_id": cid,
        "existing_support": existing,
        "base_support": base_support,
        "old_support": old_support
    }
예제 #29
0
        A dictionary with three keys:
        - 'claims': a list of dictionaries where every dictionary represents
          a claim returned by `claim_search`.
          The list is ordered in ascending order by default (old claims first),
          and in descending order (newer claims first) if `reverse=True`.
        - 'size': number of bytes of all downloadable claims (streams)
          put together.
        - 'duration': total duration of the claims in seconds.
          It will count only stream types which have a duration
          such as audio and video.
          The duration can be divided by 3600 to obtain hours,
          then by 24 to obtain days.
    False
        It there is a problem it will return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    if not channel.startswith("@"):
        channel = "@" + channel

    print(f"Channel: {channel}")
    print(f"Search claims below block height: {last_height}")
    print(f"Number: {number}")
    print(80 * "-")

    if not number:
        return False

    all_claims = []
예제 #30
0
def ch_download_latest_multi(channels=None,
                             repost=True,
                             number=None, shuffle=True,
                             ddir=None, own_dir=True, save_file=True,
                             server="http://*****:*****@MyChannel#5', 3],
                         ['GoodChannel#f', 4],
                         ['Fast_channel', 2]
                       ]
    repost: bool, optional
        It defaults to `True`, in which case it will check if the claims
        are reposts, and if they are, the original claims will be downloaded.
        If it is `False`, it won't check the claims for reposts,
        so if they are reposts they won't be downloaded
        as reposts can't be directly downloaded.
    number: int, optional
        It defaults to `None`.
        If this is present, it will override the individual
        numbers in `channels`.
        That is, the number of claims that will be downloaded
        will be the same for every channel.
    shuffle: bool, optional
        It defaults to `True`, in which case it will shuffle
        the list of channels so that they are not processed in the order
        that they come.

        If the list is very long the LBRY daemon may stop processing
        the claims, so it may happen that only the first channels
        are processed but not the last ones.
        So by processing the channels in random order, we increase
        the probability of processing all channels by running this function
        multiple times.
    ddir: str, optional
        It defaults to `$HOME`.
        The path to the download directory.
    own_dir: bool, optional
        It defaults to `True`, in which case it places the downloaded
        content inside a subdirectory named after the channel in `ddir`.
    save_file: bool, optional
        It defaults to `True`, in which case all blobs of the stream
        will be downloaded, and the media file (mp4, mp3, mkv, etc.)
        will be placed in the downloaded directory.
        If it is `False` it will only download the first blob (`sd_hash`)
        in the stream, so the file will be in the local database
        but the complete file won't be placed in the download directory.
    server: str, optional
        It defaults to `'http://*****:*****@MyChannel#5',
                         'GoodChannel#f',
                         'Fast_channel'
                       ]
            number = 4

        In this case `number` must be specified explicitly to control
        the number of claims that will be downloaded for every channel.

    Returns
    -------
    list of lists of dicts
        A list of lists, where each internal list represents one channel,
        and this internal list has as many dictionaries as downloaded claims.
        The information in each dictionary represents the standard output
        of the `lbrynet_get` command for each downloaded claim.
    False
        If there is a problem, or an empty channel list,
        it will return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    if (not ddir or not isinstance(ddir, str)
            or ddir == "~" or not os.path.exists(ddir)):
        ddir = os.path.expanduser("~")
        print(f"Download directory should exist; set to ddir='{ddir}'")

    processed_chs = funcs.process_ch_num(channels=channels,
                                         number=number, shuffle=shuffle)

    if not processed_chs or len(processed_chs) < 1:
        return False

    multi_ch_info = []

    n_channels = len(processed_chs)

    for num, processed in enumerate(processed_chs, start=1):
        channel = processed["channel"]
        number = processed["number"]

        print(f"Channel {num}/{n_channels}, {channel}")
        ch_info = ch_download_latest(channel=channel, number=number,
                                     repost=repost,
                                     ddir=ddir, own_dir=own_dir,
                                     save_file=save_file,
                                     server=server)

        multi_ch_info.append(ch_info)

    return multi_ch_info