def _download_url(resp, link, temp_location): fp = open(temp_location, 'wb') download_hash = None if link.hash and link.hash_name: try: download_hash = hashlib.new(link.hash_name) except ValueError: logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link)) try: total_length = int(resp.info()['content-length']) except (ValueError, KeyError, TypeError): total_length = 0 downloaded = 0 show_progress = total_length > 40*1000 or not total_length show_url = link.show_url try: if show_progress: ## FIXME: the URL can get really long in this message: if total_length: logger.start_progress('Downloading %s (%s): ' % (show_url, format_size(total_length))) else: logger.start_progress('Downloading %s (unknown size): ' % show_url) else: logger.notify('Downloading %s' % show_url) logger.info('Downloading from URL %s' % link) while True: chunk = resp.read(4096) if not chunk: break downloaded += len(chunk) if show_progress: if not total_length: logger.show_progress('%s' % format_size(downloaded)) else: logger.show_progress('%3i%% %s' % (100*downloaded/total_length, format_size(downloaded))) if download_hash is not None: download_hash.update(chunk) fp.write(chunk) fp.close() finally: if show_progress: logger.end_progress('%s downloaded' % format_size(downloaded)) return download_hash
def _copy_file(filename, location, content_type, link): copy = True download_location = os.path.join(location, link.filename) if os.path.exists(download_location): response = ask_path_exists( 'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' % display_path(download_location), ('i', 'w', 'b')) if response == 'i': copy = False elif response == 'w': logger.warn('Deleting %s' % display_path(download_location)) os.remove(download_location) elif response == 'b': dest_file = backup_dir(download_location) logger.warn('Backing up %s to %s' % (display_path(download_location), display_path(dest_file))) shutil.move(download_location, dest_file) if copy: shutil.copy(filename, download_location) logger.indent -= 2 logger.notify('Saved %s' % display_path(download_location))
def unpack_http_url(link, location, download_cache=None, download_dir=None): """ downloads and unpacks a archive to a directory link is a index.Link object (with an url) location is dir where to move files to download_cache is the dir where the download cache is stored download_dir is optional a dir where the download is first stored """ temp_dir = tempfile.mkdtemp('-unpack', 'xppkg-') target_url = link.url.split('#', 1)[0] target_file = None download_hash = None if download_cache: target_file = os.path.join(download_cache, urllib.quote(target_url, '')) if not os.path.isdir(download_cache): create_download_cache_folder(download_cache) already_downloaded = None if download_dir: already_downloaded = os.path.join(download_dir, link.filename) if not os.path.exists(already_downloaded): already_downloaded = None if (target_file and os.path.exists(target_file) and os.path.exists(target_file + '.content-type')): fp = open(target_file+'.content-type') content_type = fp.read().strip() fp.close() if link.hash and link.hash_name: download_hash = _get_hash_from_file(target_file, link) temp_location = target_file logger.notify('Using download cache from %s' % target_file) elif already_downloaded: temp_location = already_downloaded content_type = mimetypes.guess_type(already_downloaded) if link.hash: download_hash = _get_hash_from_file(temp_location, link) logger.notify('File was already downloaded %s' % already_downloaded) else: resp = _get_response_from_url(target_url, link) content_type = resp.info()['content-type'] filename = link.filename # fallback # Have a look at the Content-Disposition header for a better guess content_disposition = resp.info().get('content-disposition') if content_disposition: type, params = cgi.parse_header(content_disposition) # We use ``or`` here because we don't want to use an "empty" value # from the filename param. filename = params.get('filename') or filename ext = splitext(filename)[1] if not ext: ext = mimetypes.guess_extension(content_type) if ext: filename += ext if not ext and link.url != geturl(resp): ext = os.path.splitext(geturl(resp))[1] if ext: filename += ext temp_location = os.path.join(temp_dir, filename) download_hash = _download_url(resp, link, temp_location) if link.hash and link.hash_name: _check_hash(download_hash, link) if download_dir and not already_downloaded: _copy_file(temp_location, download_dir, content_type, link) unpack_file(temp_location, location, content_type, link) if target_file and target_file != temp_location: cache_download(target_file, temp_location, content_type) if target_file is None and not already_downloaded: os.unlink(temp_location) os.rmdir(temp_dir)