コード例 #1
0
ファイル: blobs_auto.py プロジェクト: belikor/lbrytools
def sd_blobs_compared(print_blobs=True, server="http://localhost:5279"):
    """Find the blobs estimated that we have already donwloaded."""
    items = sort.sort_items(server=server)
    print()
    estimates = print_network_sd_blobs(print_blobs=False, server=server)
    print()

    print("These are sd_hashes from the logs that are already downloaded")
    print(80 * "-")

    all_sd_blobs = []
    for est in estimates:
        all_sd_blobs.extend(est["sd_blobs"])

    exist = []
    for num, blob in enumerate(all_sd_blobs, start=1):
        for claim in items:
            if blob in claim["sd_hash"] and claim not in exist:
                exist.append(claim)
                print(f"{num:4d}; {blob}; {claim['claim_name']}")

    return exist
コード例 #2
0
def count_blobs_all(blobfiles=None,
                    channel=None,
                    start=1,
                    end=0,
                    print_msg=False,
                    print_each=False,
                    server="http://*****:*****@MyChannel#5'`, `'MyChannel#5'`, `'MyChannel'`

        If a simplified name is used, and there are various channels
        with the same name, the one with the highest LBC bid will be selected.
        Enter the full name to choose the right one.
    print_msg: bool, optional
        It defaults to `True`, in which case it will print information
        on the found claim.
        If `print_msg=False`, it also implies `print_each=False`.
    print_each: bool, optional
        It defaults to `False`.
        If it is `True` it will print all blobs
        that belong to the claim, and whether each of them is already
        in `blobfiles`.
    start: int, optional
        It defaults to 1.
        Count the blobs from claims starting from this index
        in the list of items.
    end: int, optional
        It defaults to 0.
        Count the blobs from claims until and including this index
        in the list of items.
        If it is 0, it is the same as the last index in the list.
    server: str, optional
        It defaults to `'http://*****:*****@"):
            channel = "@" + channel

    items = sort.sort_items(channel=channel, server=server)
    if not items:
        return False

    n_items = len(items)
    print()

    if channel:
        print(f"Count all blob files for: {channel}")
    else:
        print("Count all blob files")
    print(80 * "-")
    print(f"Blobfiles: {blobfiles}")

    blob_all_info = []
    claims_blobs_complete = 0
    claims_blobs_incomplete = 0
    claims_no_sd_hash = 0
    claims_not_found = 0
    claims_other_error = 0
    n_blobs = 0

    for it, item in enumerate(items, start=1):
        if it < start:
            continue
        if end != 0 and it > end:
            break

        if print_msg:
            print(f"Claim {it}/{n_items}, {item['claim_name']}")
        blob_info = count_blobs(cid=item["claim_id"],
                                blobfiles=blobfiles,
                                print_msg=print_msg,
                                print_each=print_each,
                                server=server)
        if print_msg or "error_not_found" in blob_info:
            print()

        info = {"num": it, "blob_info": blob_info}
        blob_all_info.append(info)

        if blob_info:
            if "all_present" in blob_info and blob_info["all_present"]:
                claims_blobs_complete += 1
                n_blobs += 1  # for the 'sd_hash'
                n_blobs += len(blob_info["blobs"])
            elif "all_present" in blob_info and not blob_info["all_present"]:
                claims_blobs_incomplete += 1
                n_blobs += 1  # for the 'sd_hash'
                n_blobs += len(blob_info["blobs"])

            if "error_no_sd_hash" in blob_info:
                claims_no_sd_hash += 1
            if "error_not_found" in blob_info:
                claims_not_found += 1
        else:
            claims_other_error += 1

    print(f"claims with complete blobs: {claims_blobs_complete}")
    print(f"claims with incomplete blobs: {claims_blobs_incomplete} "
          "(continue download)")
    print(f"claims with no 'sd_hash' present: {claims_no_sd_hash} "
          "(restart download)")
    print(f"invalid claims: {claims_not_found} "
          "(no valid URI or claim ID; possibly removed from the network)")
    print(f"claims with other errors: {claims_other_error}")
    print(8 * "-")
    total = (claims_blobs_complete + claims_blobs_incomplete +
             claims_no_sd_hash + claims_not_found + claims_other_error)
    total_valid = (claims_blobs_complete + claims_blobs_incomplete +
                   claims_no_sd_hash)
    total_invalid = claims_not_found + claims_other_error
    print(f"total claims processed: {total}")
    print(f"total valid claims: {total_valid} "
          "(minimum number of 'sd_hash' blobs that must exist)")
    print(f"invalid claims: {total_invalid} "
          "(should be deleted including all their blobs)")
    print(f"blobs that should exist for these claims: {n_blobs}")

    return blob_all_info
コード例 #3
0
def cleanup_space(main_dir=None,
                  size=1000,
                  percent=90,
                  never_delete=None,
                  what="media",
                  server="http://localhost:5279"):
    """Clean up space in the download drive when it is sufficiently full.

    Parameters
    ----------
    main_dir: str
        It defaults to `$HOME`.
        This is the main or root directory that holds both
        the downloaded media files (mp4, mp3, mkv, etc.)
        as well as the downloaded binary blobs.

        On Linux, media files may go to `'$HOME/Downloads'`
        and blobs are normally found in
        `'$HOME/.locals/share/lbry/lbrynet/blobfiles'`
        so `main_dir` would be `$HOME`, or `'/home/user'`
    size: int, optional
        It defaults to 1000.
        Maximum size in GB of `main_dir`.
        Ideally the downloaded media files and blobs never cross this limit.
    percent: float, optional
        It defaults to 90.
        Percentage of `size` that indicates a soft limit
        for the downloaded files.
        After this limit is crossed it will try to free space in `main_dir`
        by deleting older files and blobs, depending on the value
        of `which_delete`.
    never_delete: list of str, optional
        It defaults to `None`.
        If it exists it is a list with channel names.
        The content produced by these channels will not be deleted
        so the media files and blobs will remain in `main_dir`.

        This is slow as it needs to perform an additional search
        for the channel.
    what: str, optional
        It defaults to `'media'`, in which case only the full media file
        (mp4, mp3, mkv, etc.) is deleted.
        If it is `'blobs'` it will delete only the binary blobs.
        If it is `'both'` it will delete both the media file
        and the blobs.

        As long as the blobs are present, the content can be seeded
        to the network, and the full file can be restored.
        That is, while the blobs exist the file is not completely deleted.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    bool
        It returns `True` if the limit indicated by `size` and `percent`
        was crossed by the downloaded files, and some of the older files
        were successfully deleted to bring usage of `main_dir` within limits.

        It returns `False` if there is a problem, or if the limit
        was not crossed and thus there is nothing to clean up,
        or if after going through all claims, it failed to clear
        enough space to bring usage within limits.
    """
    if not funcs.server_exists(server=server):
        return False

    if (not main_dir or not isinstance(main_dir, str) or main_dir == "~"
            or not os.path.exists(main_dir)):
        main_dir = os.path.expanduser("~")
        print("Download directory should exist; "
              f"set to main_dir='{main_dir}'")

    if not isinstance(size, (int, float)) or size <= 0:
        size = 1000
        print("Max disk usage should be a positive number; "
              f"set to size={size} GB")

    if (not isinstance(percent, (int, float)) or percent <= 0
            or percent > 100):
        percent = 90
        print("Percentage should be a positive number from 0 to 100; "
              f"set to percent={percent} %")

    if never_delete and not isinstance(never_delete, (list, tuple)):
        print("Must be a list of channels that should never be deleted.")
        print(f"never_delete={never_delete}")
        return False

    if (not isinstance(what, str) or what not in ("media", "blobs", "both")):
        print(">>> Error: what can only be 'media', 'blobs', 'both'")
        print(f"what={what}")
        return False

    limit_crossed = measure_usage(main_dir=main_dir,
                                  size=size,
                                  percent=percent)
    if not limit_crossed:
        print("Nothing to clean up.")
        return False

    sorted_items = sort.sort_items(server=server)
    n_items = len(sorted_items)

    for it, item in enumerate(sorted_items, start=1):
        print(80 * "-")
        out = "{:4d}/{:4d}, {}, ".format(it, n_items, item["claim_name"])

        if never_delete:
            channel = srch_ch.find_channel(cid=item["claim_id"],
                                           full=False,
                                           server=server)
            if channel in never_delete:
                print(out + f"item from {channel} will not be deleted. "
                      "Skipping.")
                continue

        print(out + "item will be deleted.")
        clean.delete_single(cid=item["claim_id"], what=what, server=server)

        limit_crossed = measure_usage(main_dir=main_dir,
                                      size=size,
                                      percent=percent)
        if not limit_crossed:
            print("Usage below limit. Stop deleting.")
            print()
            break
        print()

    if limit_crossed:
        print(">>> Went through all downloaded claims, "
              "and failed to clear enough space.")
        print("Terminating.")
        return False

    return True
コード例 #4
0
def redownload_latest(number=2, ddir=None, own_dir=True, save_file=True,
                      shuffle=False,
                      server="http://localhost:5279"):
    """Attempt to redownload the latest claims that were already downloaded.

    This function is useful to resume the download of partially
    downloaded claims, that is, those that for any reason didn't complete
    the first time.
    It will download the missing blobs in order to produce
    the full media file (mp4, mp3, mkv, etc.).

    If all blobs are already available, then the media files
    will be recreated (if they don't already exist) in the download directory.

    Parameters
    ----------
    number: int, optional
        It defaults to 2.
        The number of items that will be re-downloaded from the list of claims
        which were already downloaded.
        For example, `number=10` will attempt to re-download
        the 10 newest items.
    ddir: str, optional
        It defaults to `$HOME`.
        The path to the download directory.
    own_dir: bool, optional
        It defaults to `True`, in which case it places the downloaded
        content inside a subdirectory named after the channel in `ddir`.
    save_file: bool, optional
        It defaults to `True`, in which case all blobs of the stream
        will be downloaded, and the media file (mp4, mp3, mkv, etc.)
        will be placed in the downloaded directory.
        If it is `False` it will only download the blobs.
    shuffle: bool, optional
        It defaults to `False`.
        If it is `True` it will shuffle the list of claims
        so that `number` indicates a random number of claims,
        not only the newest ones.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    list of dict
        A list of dictionaries, where each dictionary represents
        the standard output of the `lbrynet_get` command for each
        re-downloaded claim.
    """
    if not funcs.server_exists(server=server):
        return False

    if not number or not isinstance(number, int) or number < 0:
        number = 2
        print("Number must be a positive integer, "
              f"set to default value, number={number}")

    if (not ddir or not isinstance(ddir, str)
            or ddir == "~" or not os.path.exists(ddir)):
        ddir = os.path.expanduser("~")
        print(f"Download directory should exist; set to ddir='{ddir}'")

    sorted_items = sort.sort_items(server=server)
    sorted_items.reverse()

    if shuffle:
        random.shuffle(sorted_items)
        random.shuffle(sorted_items)
        random.shuffle(sorted_items)

    list_info_get = []

    print(80 * "-")

    for it, item in enumerate(sorted_items, start=1):
        if it > number:
            break

        print(f"Re-download claim {it}/{number}")
        d = dld.download_single(cid=item["claim_id"],
                                ddir=ddir, own_dir=own_dir,
                                save_file=save_file,
                                server=server)
        list_info_get.append(d)
        print()

    return list_info_get
コード例 #5
0
def download_claims(ddir=None, own_dir=True, save_file=True,
                    start=1, end=0, file=None, sep=";", invalid=False,
                    server="http://localhost:5279"):
    """Download claims from a file, or redownload the ones already present.

    Parameters
    ----------
    ddir: str, optional
        It defaults to `$HOME`.
        The path to the download directory.
    own_dir: bool, optional
        It defaults to `True`, in which case it places the downloaded
        content inside a subdirectory named after the channel in `ddir`.
    save_file: bool, optional
        It defaults to `True`, in which case all blobs of the stream
        will be downloaded, and the media file (mp4, mp3, mkv, etc.)
        will be placed in the downloaded directory.
        If it is `False` it will only download the blobs.
    start: int, optional
        It defaults to 1.
        Operate on the item starting from this index in the internal list
        of claims or in the claims provided by `file`.
    end: int, optional
        It defaults to 0.
        Operate until and including this index in the internal list of claims
        or in the claims provided by `file`.
        If it is 0, it is the same as the last index.
    file: str, optional
        It defaults to `None`.
        The file to read claims from. It is a comma-separated value (CSV)
        list of claims, in which each row represents a claim,
        and one data field is the `'claim_id'` which can be used
        with `download_single` to get that claim. The value of `sep`
        indicates the separator between the fields.

        If `file=None` it will re-download the claims obtained
        from `sort_items` which should already be present
        in the system fully or partially.
    sep: str, optional
        It defaults to `;`. It is the separator character between
        the data fields in the read file. Since the claim name
        can have commas, a semicolon `;` is used by default.
    invalid: bool, optional
        It defaults to `False`, in which case it will assume
        the processed claims are still valid in the online database.
        It will use `lbrynet claim search` to resolve the `claim_id`.

        If it is `True` it will assume the claims are no longer valid,
        that is, that the claims have been removed from the online database
        and only exist locally.
        In this case, it will use `lbrynet file list` to resolve
        the `claim_id`.

        Therefore this parameter is required if `file` is a document
        containing 'invalid' claims, otherwise the claims won't be found.
        For 'invalid' claims they cannot be downloaded anymore from the online
        database; if their binary blobs are complete, the media files
        (mp4, mp3, mkv, etc.) will simply be recreated in `ddir`.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    list of dict
        A list of dictionaries, where each dictionary represents
        the standard output of the `lbrynet_get` command for each
        downloaded claim.
    False
        If there is a problem, non-existing claims, or non-existing file,
        it will return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    print(80 * "-")

    if not file:
        print("Redownload from existing claims")
        sorted_items = sort.sort_items(server=server)

        if not sorted_items:
            print(">>> Error: no claims previously downloaded.")
            return False
    else:
        if file and not isinstance(file, str) or not os.path.exists(file):
            print("The file path must exist.")
            print(f"file={file}")
            return False

        print("Download from existing file")
        sorted_items = srch.parse_claim_file(file=file, sep=sep)
        print()

        if not sorted_items:
            print(">>> Error: the file must have a 'claim_id' "
                  "(40-character alphanumeric string); "
                  "could not parse the file.")
            print(f"file={file}")
            return False

    n_items = len(sorted_items)

    list_info_get = []

    for it, item in enumerate(sorted_items, start=1):
        if it < start:
            continue
        if end != 0 and it > end:
            break

        print(f"Claim {it}/{n_items}")
        info_get = dld.download_single(cid=item["claim_id"],
                                       invalid=invalid,
                                       ddir=ddir, own_dir=own_dir,
                                       save_file=save_file,
                                       server=server)
        list_info_get.append(info_get)
        print()

    return list_info_get
コード例 #6
0
def print_channels(full=True,
                   canonical=False,
                   simple=False,
                   invalid=False,
                   offline=False,
                   start=1,
                   end=0,
                   print_msg=True,
                   file=None,
                   fdate=False,
                   pre_num=True,
                   sep=";",
                   server="http://localhost:5279"):
    """Print a unique list of channels by inspecting all downloaded claims.

    Certain claims were published anonymously, so for these the channel
    is `@_Unknown_`.

    Parameters
    ----------
    full: bool, optional
        It defaults to `True`, in which case the returned
        name includes the digits after `'#'` or `':'` that uniquely identify
        that channel in the network.
        If it is `False` it will return just the base name.
        This parameter only works with `invalid=False` and `offline=False`,
        as the full name always needs to be resolved online.
        This value is ignored if `canonical=True`.
    canonical: bool, optional
        It defaults to `False`.
        If it is `True` the `'canonical_url'` of the channel is returned
        regardless of the value of `full`.
        This parameter only works with `invalid=False` and `offline=False`,
        as the canonical name always needs to be resolved online.
    simple: bool, optional
        It defaults to `False`, in which case the channels are printed
        in three columns.
        If it is `True` the channels will be printed as a single,
        long string, each channel separated from another by a `sep` symbol.
    invalid: bool, optional
        It defaults to `False`, in which case it will try to resolve
        the list of claims from the online database (blockchain),
        and will also try to resolve the channel name online, unless
        `offline=True`.

        If it is `True` it will assume the claims are no longer valid,
        that is, that the claims have been removed from the online database
        and only exist locally.
        This also implies `offline=True`, meaning that the channel name
        will be determined from the offline database.
    offline: bool, optional
        It defaults to `False`, in which case it will try to resolve
        the channel name from the online database (blockchain).

        If it is `True` it will try to resolve the channel name
        from the offline database. This will be faster but may not
        print all known channels, only those that have been resolved
        when the claims were initially downloaded.
    start: int, optional
        It defaults to 1.
        Count the channels starting from this index in the list of channels.
    end: int, optional
        It defaults to 0.
        Count the channels until and including this index
        in the list of channels.
        If it is 0, it is the same as the last index in the list.
    print_msg: bool, optional
        It defaults to `True`, in which case it will print the final time
        taken to print the channels.
        If it is `False` it will not print this information.
    file: str, optional
        It defaults to `None`.
        It must be a writable path to which the summary will be written.
        Otherwise the summary will be printed to the terminal.
    fdate: bool, optional
        It defaults to `False`.
        If it is `True` it will add the date to the name of the summary file.
    pre_num: bool, optional
        It defaults to `True`, in which case it will print the index
        of the channel at the beginning of the line; this way it is easy
        to count the channels.
        If it is `False` it won't show a number, just the channels.
    sep: str, optional
        It defaults to `;`. It is the separator for the fields.
        Since the claim name accepts commas, a semicolon is chosen by default.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    list of str
        It returns a list with all channel names found.
    False
        If there is a problem like non existing channels,
        it will return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    s_time = time.strftime("%Y-%m-%d_%H:%M:%S%z %A", time.localtime())
    if invalid:
        items = sort.sort_invalid(server=server)
    else:
        items = sort.sort_items(server=server)

    if not items:
        if invalid:
            print("No invalid claims found. No channels will be listed.")
        else:
            print("No items found. No channels will be listed.")
        return False

    print()
    if invalid:
        offline = True

    all_channels = []

    for it, item in enumerate(items, start=1):
        if it < start:
            continue
        if end != 0 and it > end:
            break

        channel = srch_ch.find_channel(cid=item["claim_id"],
                                       full=full,
                                       canonical=canonical,
                                       offline=offline,
                                       server=server)
        if channel:
            all_channels.append(channel)

    if not all_channels:
        print("No unique channels could be determined.")
        if invalid:
            print("It is possible that the channels "
                  "were not resolved when the claims "
                  "were initially downloaded.")
        else:
            print("It is possible that the claims are now invalid, "
                  "or that the channels were not resolved when "
                  "the claims were initially downloaded.")
        return False

    all_channels = list(set(all_channels))
    all_channels.sort()

    n_channels = len(all_channels)

    if invalid or offline:
        print(f"Original channels found: {n_channels} "
              "(does not include unresolved channels)")
    else:
        print(f"Original channels found: {n_channels} "
              "(does not include invalid claims, or unresolved channels)")
    print(80 * "-")

    fd = 0

    if file:
        dirn = os.path.dirname(file)
        base = os.path.basename(file)

        if fdate:
            fdate = time.strftime("%Y%m%d_%H%M", time.localtime()) + "_"
        else:
            fdate = ""

        file = os.path.join(dirn, fdate + base)

        try:
            fd = open(file, "w")
        except (FileNotFoundError, PermissionError) as err:
            print(f"Cannot open file for writing; {err}")

    if simple:
        out = f"{sep} ".join(all_channels)

        if file and fd:
            print(out, file=fd)
            fd.close()
            print(f"Summary written: {file}")
        else:
            print(out)

        e_time = time.strftime("%Y-%m-%d_%H:%M:%S%z %A", time.localtime())
        if print_msg:
            print()
            print(f"start: {s_time}")
            print(f"end:   {e_time}")
        return all_channels

    # Maximum channel length can be used to evenly space all channels
    # in columns. How do we integrate this into the format specifier?
    # print(f"{c1:<length>s}")
    #
    # length = 0
    # for ch in all_channels:
    #     if len(ch) > length:
    #         length = len(ch)

    # Determine how many rows are required to display
    # all channels in three columns
    # c1    c2     c3
    # c4    c5
    res = n_channels % 3
    if res == 0:
        rows = n_channels / 3
    else:
        rows = n_channels / 3 + 1

    index = 0
    row = 1

    # Print rows that are full, only if the number of rows is more than 1
    if rows > 1:
        for u in range(int(rows) - 1):
            c1 = all_channels[index + 0] + f"{sep}"
            c2 = all_channels[index + 1] + f"{sep}"
            c3 = all_channels[index + 2] + f"{sep}"
            if pre_num:
                out = f"{row:3d}: {c1:33s} {c2:33s} {c3:33s}"
            else:
                out = f"{c1:33s} {c2:33s} {c3:33s}"
            if file and fd:
                print(out, file=fd)
            else:
                print(out)
            index += 3
            row += 3

    # Print the last row, which may be the only row if row=1
    if res == 1:
        c1 = all_channels[index + 0]
        if pre_num:
            out = f"{row:3d}: {c1:33s}"
        else:
            out = f"{c1:33s}"
    if res == 2:
        c1 = all_channels[index + 0] + f"{sep}"
        c2 = all_channels[index + 1]
        if pre_num:
            out = f"{row:3d}: {c1:33s} {c2:33s}"
        else:
            out = f"{c1:33s} {c2:33s}"
    if res == 0:
        c1 = all_channels[index + 0] + f"{sep}"
        c2 = all_channels[index + 1] + f"{sep}"
        c3 = all_channels[index + 2]
        if pre_num:
            out = f"{row:3d}: {c1:33s} {c2:33s} {c3:33s}"
        else:
            out = f"{c1:33s} {c2:33s} {c3:33s}"

    if file and fd:
        print(out, file=fd)
        fd.close()
        print(f"Summary written: {file}")
    else:
        print(out)

    e_time = time.strftime("%Y-%m-%d_%H:%M:%S%z %A", time.localtime())
    if print_msg:
        print()
        print(f"start: {s_time}")
        print(f"end:   {e_time}")

    return all_channels
コード例 #7
0
ファイル: blobs_mv.py プロジェクト: belikor/lbrytools
def blobs_move_all(move_dir=None,
                   blobfiles=None,
                   print_missing=False,
                   action="copy",
                   channel=None,
                   start=1,
                   end=0,
                   server="http://*****:*****@MyChannel#5'`, `'MyChannel#5'`, `'MyChannel'`

        If a simplified name is used, and there are various channels
        with the same name, the one with the highest LBC bid will be selected.
        Enter the full name to choose the right one.
    start: int, optional
        It defaults to 1.
        Move the blobs from claims starting from this index
        in the list of items.
    end: int, optional
        It defaults to 0.
        Move the blobs from claims until and including this index
        in the list of items.
        If it is 0, it is the same as the last index in the list.
    server: str, optional
        It defaults to `'http://*****:*****@"):
            channel = "@" + channel

    items = sort.sort_items(channel=channel, server=server)
    if not items:
        return False

    n_items = len(items)
    print()

    list_blobs_info = []

    print("Copy/move all blob files")
    print(80 * "-")

    for it, item in enumerate(items, start=1):
        if it < start:
            continue
        if end != 0 and it > end:
            break

        print(f"Claim {it}/{n_items}, {item['claim_name']}")
        blob_m = blobs_move(cid=item["claim_id"],
                            move_dir=move_dir,
                            blobfiles=blobfiles,
                            print_missing=print_missing,
                            action=action,
                            server=server)
        list_blobs_info.append(blob_m)
        print()

    return list_blobs_info
コード例 #8
0
ファイル: clean_multi.py プロジェクト: belikor/lbrytools
def remove_claims(start=1,
                  end=0,
                  file=None,
                  invalid=False,
                  what="media",
                  server="http://localhost:5279"):
    """Delete claims from a file, or delete the ones already present.

    Parameters
    ----------
    start: int, optional
        It defaults to 1.
        Operate on the item starting from this index in the internal list
        of claims or in the claims provided by `file`.
    end: int, optional
        It defaults to 0.
        Operate until and including this index in the internal list of claims
        or in the claims provided by `file`.
        If it is 0, it is the same as the last index.
    file: str, optional
        It defaults to `None`.
        The file to read claims from. It is a comma-separated value (CSV)
        list of claims, in which each row represents a claim,
        and one element is the `'claim_id'` which can be used
        with `delete_single` to delete that claim.

        If `file=None` it will delete the claims obtained
        from `sort_items` which should already be present
        in the system fully or partially.
    invalid: bool, optional
        It defaults to `False`, in which case it will assume
        the processed claims are still valid in the online database.
        It will use `lbrynet claim search` to resolve the `claim_id`.

        If it is `True` it will assume the claims are no longer valid,
        that is, that the claims have been removed from the online database
        and only exist locally.
        In this case, it will use `lbrynet file list` to resolve
        the `claim_id`.

        Therefore this parameter is required if `file` is a document
        containing 'invalid' claims, otherwise the claims won't be found
        and won't be deleted.
    what: str, optional
        It defaults to `'media'`, in which case only the full media file
        (mp4, mp3, mkv, etc.) is deleted.
        If it is `'blobs'`, it will delete only the blobs.
        If it is `'both'`, it will delete both the media file
        and the blobs.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    list of bool
        It returns a list of booleans, where each boolean represents
        a deleted item; `True` if the claim was deleted successfully,
        and `False` if it was not.
    False
        If there is a problem, non-existing claims, or non-existing file,
        it will return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    print(80 * "-")

    if not file:
        print("Remove claims from existing list")
        sorted_items = sort.sort_items(server=server)

        if not sorted_items:
            print(">>> Error: no claims previously downloaded.")
            return False
    else:
        if file and not isinstance(file, str) or not os.path.exists(file):
            print("The file path must exist.")
            print(f"file={file}")
            return False

        print("Remove claims from existing file")
        sorted_items = srch.parse_claim_file(file=file)
        print()

        if not sorted_items:
            print(">>> Error: the file must have a 'claim_id' "
                  "(40-character alphanumeric string); "
                  "could not parse the file.")
            print(f"file={file}")
            return False

    n_items = len(sorted_items)

    list_del_info = []

    for it, item in enumerate(sorted_items, start=1):
        if it < start:
            continue
        if end != 0 and it > end:
            break

        print(f"Claim {it}/{n_items}")
        del_info = clean.delete_single(cid=item["claim_id"],
                                       invalid=invalid,
                                       what=what,
                                       server=server)
        list_del_info.append(del_info)
        print()

    return list_del_info
コード例 #9
0
ファイル: clean_multi.py プロジェクト: belikor/lbrytools
def ch_cleanup(channel=None,
               number=2,
               what="media",
               server="http://*****:*****@MyChannel#5'`, `'MyChannel#5'`, `'MyChannel'`

        If a simplified name is used, and there are various channels
        with the same name, the one with the highest LBC bid will be selected.
        Enter the full name to choose the right one.
    number: int, optional
        It defaults to 2.
        The number of items to keep from `channel`.
        These will be the newest ones according to their `'release_time'`
        or `'timestamp'`, if the former is missing.
    what: str, optional
        It defaults to `'media'`, in which case only the full media file
        (mp4, mp3, mkv, etc.) is deleted.
        If it is `'blobs'`, it will delete only the blobs.
        If it is `'both'`, it will delete both the media file
        and the blobs.
    server: str, optional
        It defaults to `'http://localhost:5279'`.
        This is the address of the `lbrynet` daemon, which should be running
        in your computer before using any `lbrynet` command.
        Normally, there is no need to change this parameter from its default
        value.

    Returns
    -------
    list of bool
        It returns a list of booleans, where each boolean represents
        a deleted item; `True` if the claim was deleted successfully,
        and `False` if it was not.
    False
        If there is a problem or non existing channel,
        it will return `False`.
    """
    if not funcs.server_exists(server=server):
        return False

    if not channel or not isinstance(channel, str):
        print("Clean up items from a single channel.")
        print(f"channel={channel}")
        return False

    if (number is None or number is False or not isinstance(number, int)
            or number < 0):
        number = 2
        print("Number must be a positive integer, "
              f"set to default value, number={number}")

    if (not isinstance(what, str) or what not in ("media", "blobs", "both")):
        print(">>> Error: what can only be 'media', 'blobs', 'both'")
        print(f"what={what}")
        return False

    list_info_del = []
    sorted_items = sort.sort_items(channel=channel, server=server)
    if not sorted_items:
        print()
        return False

    n_items = len(sorted_items)

    remaining = n_items - 0

    for it, item in enumerate(sorted_items, start=1):
        if remaining <= number:
            print(8 * "-")
            print(f"Finished deleting; remaining {remaining}")
            print()
            break

        print(f"Claim {it}/{n_items}")

        del_info = clean.delete_single(cid=item["claim_id"],
                                       what=what,
                                       server=server)
        list_info_del.append(del_info)
        remaining = n_items - it

        if remaining > number:
            print()

        if remaining == 0:
            print(8 * "-")
            print(f"Finished deleting; remaining {remaining}")
            print()

    return list_info_del
コード例 #10
0
ファイル: clean_multi.py プロジェクト: belikor/lbrytools
def remove_media(never_delete=None, server="http://*****:*****@")
            skip = False

            for safe_channel in never_delete:
                if channel in safe_channel:
                    skip = True
                    break

            if skip:
                print(out + f"item from {channel} will not be deleted. "
                      "Skipping.")
                continue

        path = item["download_path"]
        if path:
            os.remove(path)
            print(out + f"delete {path}")
        else:
            print(out + "no media found locally, probably already deleted.")

    print("Media files deleted")
    return True