Exemple #1
0
def _external_file_download_callback(progress_text, directory, filename, cache, data):
  """
  *Private*. Callback function used in :func:`external_file` when a file
  is downloaded with :func:`httputils.download_file`.

  :param progress_text: The text to display on the progress bar.
  :param directory: The directory passed to :func:`external_file`.
  :param filename: The filename passed to :func:`external_file`.
  :param cache: A dictionary that contains cached information, eventually
      from a previous download.
  :param data: The data passed from :func:`httputils.download_file`.
  """

  is_spinning = data['size'] is None

  # Before the download starts but the request is already made, we can
  # check if the file already exists.
  if data['downloaded'] == 0:
    logger.progress_begin(progress_text, is_spinning)
  elif data['completed']:
    logger.progress_end()
  elif is_spinning:
    # TODO: Convert bytes to human readable
    logger.progress_update(None, data['downloaded'])
  elif not is_spinning:
    # TODO: Convert bytes to human readable
    logger.progress_update(data['downloaded'] / data['size'], data['downloaded'])
Exemple #2
0
 def progress(index, count, filename):
   if index == -1:
     logger.progress_begin("Unpacking {} ...".format(path.basename(archive)))
     logger.progress_update(0.0, 'Reading index...')
     return
   progress = index / float(count)
   if index == 0:
     logger.progress_end()
     logger.progress_begin(None, False)
   elif index == (count - 1):
     logger.progress_end()
   else:
     logger.progress_update(progress, '{} / {}'.format(index, count))
Exemple #3
0
def external_file(*urls, filename = None, directory = None,
    copy_file_url = False, name = None):
  """
  Downloads a file from the first valid URL and saves it into *directory*
  under the specified *filename*.

  :param urls: One or more URLs. Supports ``http://``, ``https://``,
      ``ftp://`` and ``file://`. Note that if a ``file://`` URL is
      specified, the file is not copied to the output filename unless
      *copy_file_url* is True.
  :param filename: The output filename of the downloaded file. Defaults
      to the filename of the downloaded file.
  :param directory: The directory to save the file to. If *filename*
      is a relative path, it will be joined with this directory. Defaults
      to a path in the build directory.
  :param copy_file_url: If True, ``file://`` URLs will be copied instead
      of used as-is.
  :param name: The name of the loader action. This name is used to store
      information in the :attr:`Session.cache` so we can re-use existing
      downloaded data. :func:`~craftr.defaults.gtn` will be used to
      retrieve the default value for this parameter.
  :return: The path to the downloaded file.
  """

  name = gtn(name)
  if not directory and not filename:
    directory = buildlocal('data')

  cache = get_loader_cache(name)

  # TODO: expand variables of the current module.

  target_filename = None
  exceptions = []
  for url in urls:
    if url == cache.get('download_url'):
      existing_file = cache.get('download_file')
      if existing_file and path.isfile(existing_file):
        return existing_file

    progress_info = 'Downloading {} ...'.format(url)
    if url.startswith('file://'):
      source_file = url[7:]
      if path.isfile(source_file):
        if not copy_file_url:
          return source_file
        if not filename:
          filename = path.basename(source_file)

        # TODO: Use httputils.download_file() for this as well?
        logger.progress_begin(progress_info)
        path.makedirs(directory)
        target_filename = path.join(directory, filename)
        with open(source_file, 'rb') as sfp:
          with open(target_filename, 'wb') as dfp:
            for bytes_copied, size in pyutils.copyfileobj(sfp, dfp):
              logger.progress_update(float(bytes_copied) / size)
        logger.progress_end()

        # TODO: Copy file permissions
        break
      else:
        exceptions.append(FileNotFoundError(url))
    else:
      progress = lambda data: _external_file_download_callback(
          progress_info, directory, filename, cache, data)

      try:
        target_filename, reused = httputils.download_file(
          url, filename = filename, directory = directory, on_exists = 'skip',
          progress = progress)
      except (httputils.URLError, httputils.HTTPError) as exc:
        exceptions.append(exc)
      else:
        break
      finally:
        logger.progress_end()

  if target_filename:
    cache['download_url'] = url
    cache['download_file'] = target_filename
    return target_filename

  raise NoExternalFileMatch(name, urls, exceptions)