def fetch_dist(self, dist, fetch_dir, force=False, check_md5=False, dry_run=False): """ Get a distribution, i.e. copy or download the distribution into fetch_dir. force: force download or copy check_md5: when determining if a file needs to be downloaded or copied, check it's MD5. This is, of course, slower but more reliable then just checking the file-size (which always done first). Note: * This option has option has nothing to do with checking the MD5 of a download. The md5 is always checked when files are downloaded (regardless of this option). * If force=True, this option is has no effect, because the file is forcefully downloaded, ignoring any existing file (as well as the MD5). """ md5 = self.index[dist].get('md5', None) size = self.index[dist].get('size', None) fn = dist_naming.filename_dist(dist) dst = join(fetch_dir, fn) # if force is not used, see if (i) the file exists (ii) its size is # the expected (iii) optionally, make sure the md5 is the expected. if (not force and isfile(dst) and getsize(dst) == size and (not check_md5 or md5_file(dst) == md5)): if self.verbose: print "Not forcing refetch, %r already exists" % dst return pprint_fn_action(fn, ['copying', 'downloading'][dist.startswith('http://')]) if dry_run: return if self.verbose: print "Copying: %r" % dist print " to: %r" % dst fo = open(dst + '.part', 'wb') write_data_from_url(fo, dist, md5, size) fo.close() rm_rf(dst) os.rename(dst + '.part', dst)
def add_repo(self, repo, index_fn='index-depend.bz2'): """ Add a repo to the chain, i.e. read the index file of the url, parse it and update the index. """ if self.verbose: print "Adding repository:" print " URL:", repo repo = dist_naming.cleanup_reponame(repo) self.repos.append(repo) index_url = repo + index_fn if index_url.startswith('file://'): if isfile(index_url[7:]): # A local url with index file if self.verbose: print " found index", index_url else: # A local url without index file self.index_all_files(repo) return if self.verbose: print " index:", index_fn faux = StringIO() write_data_from_url(faux, index_url) index_data = faux.getvalue() faux.close() if self.verbose: import hashlib print " md5:", hashlib.md5(index_data).hexdigest() print if index_fn.endswith('.bz2'): index_data = bz2.decompress(index_data) new_index = metadata.parse_depend_index(index_data) for spec in new_index.itervalues(): add_Reqs_to_spec(spec) for distname, spec in new_index.iteritems(): dist = repo + distname self.index[dist] = spec self.groups[spec['cname']].append(dist)
def fetch_dist(self, dist, fetch_dir, force=False, check_md5=False, dry_run=False): """ Get a distribution, i.e. copy or download the distribution into fetch_dir. force: force download or copy check_md5: when determining if a file needs to be downloaded or copied, check it's MD5. This is, of course, slower but more reliable then just checking the file-size (which is always done first). Note: * This option has nothing to do with checking the MD5 of the download. The md5 is always checked when files are downloaded (regardless of this option). * If force=True, this option is has no effect, because the file is forcefully downloaded, ignoring any existing file (as well as the MD5). """ md5 = self.index[dist].get('md5') size = self.index[dist].get('size') fn = dist_naming.filename_dist(dist) dst = join(fetch_dir, fn) # if force is not used, see if (i) the file exists (ii) its size is # the expected (iii) optionally, make sure the md5 is the expected. if (not force and isfile(dst) and getsize(dst) == size and (not check_md5 or md5_file(dst) == md5)): if self.verbose: print "Not forcing refetch, %r already exists" % dst return self.file_action_callback(fn, ('copying', 'downloading') [dist.startswith(('http://', 'https://'))]) if dry_run: return if self.verbose: print "Copying: %r" % dist print " to: %r" % dst fo = open(dst + '.part', 'wb') write_data_from_url(fo, dist, md5, size, progress_callback=self.download_progress_callback) fo.close() rm_rf(dst) os.rename(dst + '.part', dst)
def add_repo(self, repo, index_fn='index-depend.txt'): """ Add a repo to the chain, i.e. read the index file of the url, parse it and update the index. """ if self.verbose: print "Adding repository:", repo repo = dist_naming.cleanup_reponame(repo) self.repos.append(repo) index_url = repo + index_fn if index_url.startswith('file://'): if isfile(index_url[7:]): # A local url with index file if self.verbose: print "\tfound index", index_url else: # A local url without index file self.index_all_files(repo) return if self.verbose: print "\treading:", index_url faux = StringIO() write_data_from_url(faux, index_url) index_data = faux.getvalue() faux.close() new_index = metadata.parse_depend_index(index_data) for spec in new_index.itervalues(): add_Reqs_to_spec(spec) for distname, spec in new_index.iteritems(): self.index[repo + distname] = spec
\t py-vers : %(python_version)s''' % url print sys.exit() import re import StringIO import enstaller.utils as utils URL = 'http://pypi.python.org/simple/' if 0: faux = StringIO.StringIO() utils.write_data_from_url(faux, URL) data = faux.getvalue() data = open('simple.txt').read() pat = re.compile(r'<a href=.+?>(.+?)<') for line in data.splitlines(): m = pat.match(line) if m: print m.group(1)