Exemplo n.º 1
0
Arquivo: kbps.py Projeto: n8wachT/else
def kbps(time=None, size=None, kbps=None):
    if [time, size, kbps].count(None) != 1:
        raise ValueError('Incorrect number of unknowns')

    if size is None:
        seconds = hms_s(time)
        kibs = int(kbps) / 8
        size = kibs * 1024
        size *= seconds
        out = bytestring.bytestring(size)
        return out

    if time is None:
        size = bytestring.parsebytes(size)
        kilobits = size / 128
        time = kilobits / int(kbps)
        return s_hms(time)

    if kbps is None:
        seconds = hms_s(time)
        size = bytestring.parsebytes(size)
        kibs = size / 1024
        kilobits = kibs * 8
        kbps = kilobits / seconds
        kbps = '%d kbps' % int(round(kbps))
        return kbps
Exemplo n.º 2
0
def threaded_dl_argparse(args):
    if os.path.isfile(args.url_file):
        f = open(args.url_file, 'r')
        with f:
            urls = f.read()
    else:
        urls = clipext.resolve(args.url_file)
    urls = urls.replace('\r', '').split('\n')

    urls = [u.split(' ', 1) if ' ' in u else u for u in urls]

    headers = args.headers
    if headers is not None:
        if len(headers) == 1 and headers[0].startswith('{'):
            headers = ast.literal_eval(headers[0])
        else:
            keys = headers[::2]
            vals = headers[1::2]
            headers = {key: val for (key, val) in zip(keys, vals)}

    bytespersecond = args.bytespersecond
    if bytespersecond is not None:
        bytespersecond = bytestring.parsebytes(bytespersecond)

    threaded_dl(
        urls,
        bytespersecond=bytespersecond,
        filename_format=args.filename_format,
        headers=headers,
        thread_count=args.thread_count,
        timeout=args.timeout,
    )
Exemplo n.º 3
0
def init_argparse(args):
    if isinstance(args.piece_size, str):
        piece_size = bytestring.parsebytes(args.piece_size)
    else:
        piece_size = args.piece_size
    url = clipext.resolve(args.url)
    init(url, localname=args.localname, piece_size=piece_size)
Exemplo n.º 4
0
def init_argparse(args):
    if isinstance(args.piece_size, str):
        piece_size = bytestring.parsebytes(args.piece_size)
    else:
        piece_size = args.piece_size
    url = pipeable.input(args.url, split_lines=False)
    init(url, localname=args.localname, piece_size=piece_size)
Exemplo n.º 5
0
def limiter_or_none(value):
    if isinstance(value, str):
        value = bytestring.parsebytes(value)
    if isinstance(value, ratelimiter.Ratelimiter):
        limiter = value
    elif value is not None:
        limiter = ratelimiter.Ratelimiter(allowance=value, period=1)
    else:
        limiter = None
    return limiter
Exemplo n.º 6
0
def _unitconvert(value):
    '''
    When parsing hyphenated ranges, this function is used to convert
    strings like "1k" to 1024 and "1:00" to 60.
    '''
    if value is None:
        return None
    if ':' in value:
        return hms_to_seconds(value)
    elif all(c in '0123456789.' for c in value):
        return float(value)
    else:
        return bytestring.parsebytes(value)
Exemplo n.º 7
0
def reserve_disk_space_argparse(args):
    try:
        status = reserve_disk_space(reserve=bytestring.parsebytes(
            args.reserve),
                                    drive=args.drive)
        free = bytestring.bytestring(status.free)
        reserve = bytestring.bytestring(status.reserve)
        log.info('There is %s available out of %s.', free, reserve)
        return 0
    except NotEnoughSpace as exc:
        free = bytestring.bytestring(exc.free)
        reserve = bytestring.bytestring(exc.reserve)
        log.fatal('Only %s available out of %s.', free, reserve)
        return 1
Exemplo n.º 8
0
def download_argparse(args):
    url = args.url

    url = clipext.resolve(url)
    callback = {
        None: Progress1,
        '1': Progress1,
        '2': Progress2,
    }.get(args.callback, args.callback)

    bytespersecond = args.bytespersecond
    if bytespersecond is not None:
        bytespersecond = bytestring.parsebytes(bytespersecond)

    headers = {}
    if args.range is not None:
        headers['range'] = 'bytes=%s' % args.range

    retry = args.retry
    if not retry:
        retry = 1

    while retry != 0:
        # Negative numbers permit infinite retries.
        try:
            download_file(
                url=url,
                localname=args.localname,
                bytespersecond=bytespersecond,
                callback_progress=callback,
                do_head=args.no_head is False,
                headers=headers,
                overwrite=args.overwrite,
                timeout=args.timeout,
                verbose=True,
            )
        except (NotEnoughBytes, requests.exceptions.ConnectionError):
            retry -= 1
            if retry == 0:
                raise
        else:
            break
Exemplo n.º 9
0

def make_randomfile(length, filename=None):
    if filename is None:
        filename = rid(8) + '.txt'
    chunks = math.ceil(length / CHUNK_SIZE)
    written = 0
    f = open(filename, 'w')
    for x in range(chunks):
        b = min(CHUNK_SIZE, length - written)
        f.write(rid(b))
        written += b
    f.close()
    print('Created %s' % filename)


bytes = listget(sys.argv, 1, None)
if bytes is None:
    bytes = 2**10
else:
    bytes = bytestring.parsebytes(bytes)

filecount = 1
filename = listget(sys.argv, 2, None)
if filename is not None and filename.isdigit():
    filecount = int(filename)
    filename = None

for x in range(filecount):
    make_randomfile(bytes, filename)
Exemplo n.º 10
0
def download(
        databasename,
        outputdir=None,
        bytespersecond=None,
        headers=None,
        overwrite=False,
    ):
    '''
    Download all of the Enabled files. The filepaths will match that of the
    website, using `outputdir` as the root directory.

    Parameters:
        outputdir:
            The directory to mirror the files into. If not provided, the domain
            name is used.

        bytespersecond:
            The speed to ratelimit the downloads. Can be an integer, or a string like
            '500k', according to the capabilities of `bytestring.parsebytes`

            Note that this is bytes, not bits.

        headers:
            Additional headers to pass to each `download_file` call.

        overwrite:
            If True, delete local copies of existing files and rewrite them.
            Otherwise, completed files are skipped.
    '''
    sql = sqlite3.connect(databasename)
    cur = sql.cursor()

    if outputdir in (None, ''):
        # This assumes that all URLs in the database are from the same domain.
        # If they aren't, it's the user's fault because Walkers don't leave the given site
        # on their own.
        cur.execute('SELECT url FROM urls LIMIT 1')
        url = cur.fetchone()[0]
        outputdir = url_split(url)['domain']

    if isinstance(bytespersecond, str):
        bytespersecond = bytestring.parsebytes(bytespersecond)

    cur.execute('SELECT * FROM urls WHERE do_download == 1 ORDER BY url')
    for fetch in fetch_generator(cur):
        url = fetch[SQL_URL]

        url_filepath = url_split(url)
        folder = os.path.join(outputdir, url_filepath['folder'])
        os.makedirs(folder, exist_ok=True)

        fullname = os.path.join(folder, url_filepath['filename'])

        write('Downloading "%s"' % fullname)
        downloady.download_file(
            url,
            localname=fullname,
            bytespersecond=bytespersecond,
            callback_progress=downloady.Progress2,
            headers=headers,
            overwrite=overwrite,
        )
Exemplo n.º 11
0
def zerofile_argparse(args):
    return zerofile(
        filename=args.filename,
        length=bytestring.parsebytes(args.length),
    )
Exemplo n.º 12
0
def download(
        databasename,
        outputdir=None,
        bytespersecond=None,
        headers=None,
        overwrite=False,
    ):
    '''
    Download all of the Enabled files. The filepaths will match that of the
    website, using `outputdir` as the root directory.

    Parameters:
        outputdir:
            The directory to mirror the files into. If not provided, the domain
            name is used.

        bytespersecond:
            The speed to ratelimit the downloads. Can be an integer, or a string like
            '500k', according to the capabilities of `bytestring.parsebytes`

            Note that this is bytes, not bits.

        headers:
            Additional headers to pass to each `download_file` call.

        overwrite:
            If True, delete local copies of existing files and rewrite them.
            Otherwise, completed files are skipped.
    '''
    sql = sqlite3.connect(databasename)
    cur = sql.cursor()

    if outputdir in (None, ''):
        # This assumes that all URLs in the database are from the same domain.
        # If they aren't, it's the user's fault because Walkers don't leave the given site
        # on their own.
        cur.execute('SELECT url FROM urls LIMIT 1')
        url = cur.fetchone()[0]
        outputdir = url_split(url)['domain']

    if isinstance(bytespersecond, str):
        bytespersecond = bytestring.parsebytes(bytespersecond)

    cur.execute('SELECT * FROM urls WHERE do_download == 1 ORDER BY url')
    for fetch in fetch_generator(cur):
        url = fetch[SQL_URL]

        url_filepath = url_split(url)
        folder = os.path.join(outputdir, url_filepath['folder'])
        os.makedirs(folder, exist_ok=True)

        fullname = os.path.join(folder, url_filepath['filename'])

        write('Downloading "%s"' % fullname)
        downloady.download_file(
            url,
            localname=fullname,
            bytespersecond=bytespersecond,
            callback_progress=downloady.Progress2,
            headers=headers,
            overwrite=overwrite,
        )