def updateEggFor(self, package_name, eggname, eggs_dir=None): """Download an egg for package_name """ if eggs_dir is None: eggs_dir = self.config.eggs_directory self._lookupPackage(package_name) file_path = os.path.join(eggs_dir, package_name, eggname) for dist in self.index[package_name]: if getattr(dist, "module_path", None) is not None: # this is a module installed in system (directory), we want # to download a fresh package continue filename, md5 = egg_info_for_url(dist.location) # There may be more than one filename that matches: if the first n # fail, we want to try until one works! if filename == eggname: tmp = tempfile.gettempdir() try: tmp_location = self.index.download(dist.location, tmp) logger.debug('Downloaded %s\n' % dist.location) shutil.move(tmp_location, file_path) return except Exception, err: logger.debug('Error downloading %s: \n\t%s\n' % (dist.location, err))
def updatePackageIndex(self, package_name, eggs_dir=EGGS_DIR): """Update info for a specific package """ self._lookupPackage(package_name) if not self.index[package_name]: raise PackageNotFound, "Package '%s' does not exists or has no eggs" % package_name package_path = os.path.join(eggs_dir, package_name) if not os.path.exists(package_path): os.mkdir(package_path) html_path = os.path.join(package_path, 'index.html') dists = self.index[package_name] if not dists and os.path.exists(html_path): # We already have a cached index page and there are no dists. # Pypi is probably down, so we keep our existing one. return html = open(html_path, 'w') title = "Links for %s" % package_name print >> html, "<html><head><title>%s</title></head>" % package_name print >> html, "<body><h1>%s</h1>" % package_name for dist in dists: if getattr(dist, "module_path", None) is not None: # this is a module installed in system continue filename, md5 = egg_info_for_url(dist.location) print >> html, ('<a href="%s#%s" rel="download">%s</a><br />' % (filename, md5, filename)) print >> html, "</body></html>" html.close() del html
def updateEggFor(self, package_name, eggname, eggs_dir=EGGS_DIR): """Download an egg for package_name """ self._lookupPackage(package_name) file_path = os.path.join(eggs_dir, package_name, eggname) for dist in self.index[package_name]: if getattr(dist, "module_path", None) is not None: # this is a module installed in system (directory), we want # to download a fresh package continue filename, md5 = egg_info_for_url(dist.location) if filename == eggname: tmp = tempfile.gettempdir() try: tmp_location = self.index.download(dist.location, tmp) log.debug("Downloaded %s\n" % dist.location) # BUG: # 2 instances are downloading "traits" package. # First instance removes the file # from /tmp with "shutil.move", # second instance can not find it -> # "shutil.move" raises exception shutil.move(tmp_location, file_path) return except Exception, err: log.debug("Error downloading %s: \n\t%s\n" % (dist.location, err))
def updatePackageIndex(self, package_name, eggs_dir=EGGS_DIR): """Update info for a specific package """ self._lookupPackage(package_name) if not self.index[package_name]: raise PackageNotFound, "Package '%s' does not exists or has no eggs" % package_name package_path = os.path.join(eggs_dir, package_name) if not os.path.exists(package_path): os.mkdir(package_path) html_path = os.path.join(package_path, 'index.html') dists = self.index[package_name] if not dists and os.path.exists(html_path): # We already have a cached index page and there are no dists. # Pypi is probably down, so we keep our existing one. return html = open(html_path, 'w') title = "Links for %s" % package_name print >> html, "<html><head><title>%s</title></head>" % package_name print >> html, "<body><h1>%s</h1>" % package_name for dist in dists: if getattr(dist, "module_path", None) is not None: # this is a module installed in system continue filename, md5 = egg_info_for_url(dist.location) print >> html, ( '<a href="%s#%s" rel="download">%s</a><br />' % (filename, md5, filename) ) print >> html, "</body></html>" html.close() del html
def remoteURIOfEggFor(self, package_name, eggname, eggs_dir=EGGS_DIR): self._lookupPackage(package_name) file_path = os.path.join(eggs_dir, package_name, eggname) for dist in self.index[package_name]: if getattr(dist, "module_path", None) is not None: # this is a module installed in system (directory), we want # to download a fresh package continue filename, md5 = egg_info_for_url(dist.location) if filename == eggname: return dist.location raise ValueError, "Egg '%s' not found in index" % eggname
def updatePackageIndex(self, package_name, eggs_dir=None): """Update info for a specific package """ if eggs_dir is None: eggs_dir = self.config.eggs_directory self._lookupPackage(package_name) dists = self.index[package_name] package_path = os.path.join(eggs_dir, package_name) local_eggs = set(os.listdir(package_path)) - set(["index.html"]) \ if os.path.exists(package_path) else set() if not dists and len(local_eggs) == 0: raise PackageNotFound("Package '%s' does not exists or has no " "eggs" % package_name) if not os.path.exists(package_path): os.mkdir(package_path) html_path = os.path.join(package_path, 'index.html') if not dists and os.path.exists(html_path): # We already have a cached index page and there are no dists. # Pypi is probably down, so we keep our existing one. return html = open(html_path, 'w') logger.debug('Building new index page for package %r' % package_name) title = "Links for %s" % package_name print >> html, "<html><head><title>%s</title></head>" % title print >> html, "<body><h1>%s</h1>" % title for dist in dists: if getattr(dist, "module_path", None) is not None: # this is a module installed in system continue filename, md5 = egg_info_for_url(dist.location) print >> html, ( '<a href="%s#%s" rel="download">%s</a><br />' % (filename, md5, filename) ) local_eggs.discard(filename) for egg in local_eggs: print >> html, ( '<a href="%s" rel="download">%s</a><br />' % (egg, egg) ) print >> html, "</body></html>" html.close() del html
def process_index(self, url, page): """Process the contents of a PyPI page Override: don't lowercase package name """ if ALWAYS_REFRESH: # Zap ourselves from the fetched url list, otherwise we'll # never be updated as long as the server runs. del self.fetched_urls[url] def scan(link): # Process a URL to see if it's for a package page if link.startswith(self.index_url): parts = map( urllib2.unquote, link[len(self.index_url):].split('/') ) if len(parts)==2 and '#' not in parts[1]: # it's a package page, sanitize and index it pkg = safe_name(parts[0]) ver = safe_version(parts[1]) # changed "pkg.lower()" to "pkg" self.package_pages.setdefault(pkg, {})[link] = True return to_filename(pkg), to_filename(ver) return None, None # process an index page into the package-page index for match in HREF.finditer(page): scan( urlparse.urljoin(url, htmldecode(match.group(1))) ) pkg, ver = scan(url) # ensure this page is in the page index if pkg: # process individual package page for new_url in find_external_links(url, page): # Process the found URL base, frag = egg_info_for_url(new_url) if base.endswith('.py') and not frag: if ver: new_url+='#egg=%s-%s' % (pkg,ver) else: self.need_version_info(url) self.scan_url(new_url) return PYPI_MD5.sub( lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1,3,2), page ) else: return "" # no sense double-scanning non-package pages
def process_index(self, url, page): """Process the contents of a PyPI page Override: don't lowercase package name """ if ALWAYS_REFRESH: # Zap ourselves from the fetched url list, otherwise we'll # never be updated as long as the server runs. del self.fetched_urls[url] def scan(link): # Process a URL to see if it's for a package page if link.startswith(self.index_url): parts = map(urllib2.unquote, link[len(self.index_url):].split('/')) if len(parts) == 2 and '#' not in parts[1]: # it's a package page, sanitize and index it pkg = safe_name(parts[0]) ver = safe_version(parts[1]) # changed "pkg.lower()" to "pkg" self.package_pages.setdefault(pkg, {})[link] = True return to_filename(pkg), to_filename(ver) return None, None # process an index page into the package-page index for match in HREF.finditer(page): scan(urlparse.urljoin(url, htmldecode(match.group(1)))) pkg, ver = scan(url) # ensure this page is in the page index if pkg: # process individual package page for new_url in find_external_links(url, page): # Process the found URL base, frag = egg_info_for_url(new_url) if base.endswith('.py') and not frag: if ver: new_url += '#egg=%s-%s' % (pkg, ver) else: self.need_version_info(url) self.scan_url(new_url) return PYPI_MD5.sub( lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1, 3, 2), page) else: return "" # no sense double-scanning non-package pages
def updateEggFor(self, package_name, eggname, eggs_dir=EGGS_DIR): """Download an egg for package_name """ self._lookupPackage(package_name) file_path = os.path.join(eggs_dir, package_name, eggname) for dist in self.index[package_name]: if getattr(dist, "module_path", None) is not None: # this is a module installed in system (directory), we want # to download a fresh package continue filename, md5 = egg_info_for_url(dist.location) if filename == eggname: tmp = tempfile.gettempdir() tmp_location = self.index.download(dist.location, tmp) shutil.move(tmp_location, file_path) return raise ValueError, "Egg '%s' not found in index" % eggname
def get_package_downloads(self, package_name): requirement = Requirement.parse(package_name) self.find_packages(requirement) downloads = list() dists = self[package_name] if not dists: # We already have a cached index page and there are no dists. # Pypi is probably down, so we keep our existing one. return downloads for dist in dists: if getattr(dist, "module_path", None) is not None: # this is a module installed in system continue filename, md5 = egg_info_for_url(dist.location) downloads.append({'filename': filename, 'md5': md5.split('=',1)[-1], 'location': dist.location.split('#')[0],}) return downloads
def process_index(self, url, page): """Process the contents of a PyPI page Override: don't lowercase package name """ def scan(link): # Process a URL to see if it's for a package page if link.startswith(self.index_url): parts = map(urllib2.unquote, link[len(self.index_url) :].split("/")) if len(parts) == 2 and "#" not in parts[1]: # it's a package page, sanitize and index it pkg = safe_name(parts[0]) ver = safe_version(parts[1]) # changed "pkg.lower()" to "pkg" self.package_pages.setdefault(pkg, {})[link] = True return to_filename(pkg), to_filename(ver) return None, None # process an index page into the package-page index for match in HREF.finditer(page): try: scan(urlparse.urljoin(url, htmldecode(match.group(1)))) except ValueError: pass pkg, ver = scan(url) # ensure this page is in the page index if pkg: # process individual package page for new_url in find_external_links(url, page): # Process the found URL base, frag = egg_info_for_url(new_url) if base.endswith(".py") and not frag: if ver: new_url += "#egg=%s-%s" % (pkg, ver) else: self.need_version_info(url) self.scan_url(new_url) return PYPI_MD5.sub(lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1, 3, 2), page) else: return "" # no sense double-scanning non-package pages
def get_package_downloads(self, package_name): requirement = Requirement.parse(package_name) self.find_packages(requirement) downloads = list() dists = self[package_name] if not dists: # We already have a cached index page and there are no dists. # Pypi is probably down, so we keep our existing one. return downloads for dist in dists: if getattr(dist, "module_path", None) is not None: # this is a module installed in system continue filename, md5 = egg_info_for_url(dist.location) downloads.append({ 'filename': filename, 'md5': md5.split('=', 1)[-1], 'location': dist.location.split('#')[0], }) return downloads
def updateEggFor(self, package_name, eggname, eggs_dir=EGGS_DIR): """Download an egg for package_name """ self._lookupPackage(package_name) file_path = os.path.join(eggs_dir, package_name, eggname) for dist in self.index[package_name]: if getattr(dist, "module_path", None) is not None: # this is a module installed in system (directory), we want # to download a fresh package continue filename, md5 = egg_info_for_url(dist.location) # There may be more than one filename that matches: if the first n # fail, we want to try until one works! if filename == eggname: tmp = tempfile.gettempdir() try: tmp_location = self.index.download(dist.location, tmp) sys.stderr.write('Downloaded %s\n' % dist.location) shutil.move(tmp_location, file_path) return except Exception, err: sys.stderr.write('Error downloading %s: \n\t%s\n' % (dist.location, err))