def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False): if all: useflags = None elif useflags is None: if mysettings: useflags = mysettings["USE"].split() myfiles = self.getFetchMap(mypkg, useflags=useflags) myebuild = self.findname(mypkg) if myebuild is None: raise AssertionError("ebuild not found for '%s'" % mypkg) pkgdir = os.path.dirname(myebuild) mf = Manifest(pkgdir, self.settings["DISTDIR"]) mysums = mf.getDigests() failures = {} for x in myfiles: if not mysums or x not in mysums: ok = False reason = _("digest missing") else: try: ok, reason = portage.checksum.verify_all( os.path.join(self.settings["DISTDIR"], x), mysums[x]) except FileNotFound as e: ok = False reason = _("File Not Found: '%s'") % (e,) if not ok: failures[x] = reason if failures: return False return True
def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False): if all: useflags = None elif useflags is None: if mysettings: useflags = mysettings["USE"].split() myfiles = self.getFetchMap(mypkg, useflags=useflags) myebuild = self.findname(mypkg) if myebuild is None: raise AssertionError("ebuild not found for '%s'" % mypkg) pkgdir = os.path.dirname(myebuild) mf = Manifest(pkgdir, self.settings["DISTDIR"]) mysums = mf.getDigests() failures = {} for x in myfiles: if not mysums or x not in mysums: ok = False reason = _("digest missing") else: try: ok, reason = portage.checksum.verify_all( os.path.join(self.settings["DISTDIR"], x), mysums[x]) except FileNotFound as e: ok = False reason = _("File Not Found: '%s'") % (e, ) if not ok: failures[x] = reason if failures: return False return True
def getfetchsizes(self, mypkg, useflags=None, debug=0): # returns a filename:size dictionnary of remaining downloads myebuild = self.findname(mypkg) if myebuild is None: raise AssertionError("ebuild not found for '%s'" % mypkg) pkgdir = os.path.dirname(myebuild) mf = Manifest(pkgdir, self.settings["DISTDIR"]) checksums = mf.getDigests() if not checksums: if debug: writemsg("[empty/missing/bad digest]: %s\n" % (mypkg, )) return {} filesdict = {} myfiles = self.getFetchMap(mypkg, useflags=useflags) #XXX: maybe this should be improved: take partial downloads # into account? check checksums? for myfile in myfiles: try: fetch_size = int(checksums[myfile]["size"]) except (KeyError, ValueError): if debug: writemsg( _("[bad digest]: missing %(file)s for %(pkg)s\n") % { "file": myfile, "pkg": mypkg }) continue file_path = os.path.join(self.settings["DISTDIR"], myfile) mystat = None try: mystat = os.stat(file_path) except OSError as e: pass if mystat is None: existing_size = 0 ro_distdirs = self.settings.get("PORTAGE_RO_DISTDIRS") if ro_distdirs is not None: for x in shlex_split(ro_distdirs): try: mystat = os.stat(os.path.join(x, myfile)) except OSError: pass else: if mystat.st_size == fetch_size: existing_size = fetch_size break else: existing_size = mystat.st_size remaining_size = fetch_size - existing_size if remaining_size > 0: # Assume the download is resumable. filesdict[myfile] = remaining_size elif remaining_size < 0: # The existing file is too large and therefore corrupt. filesdict[myfile] = int(checksums[myfile]["size"]) return filesdict
def getfetchsizes(self, mypkg, useflags=None, debug=0): # returns a filename:size dictionnary of remaining downloads myebuild = self.findname(mypkg) if myebuild is None: raise AssertionError("ebuild not found for '%s'" % mypkg) pkgdir = os.path.dirname(myebuild) mf = Manifest(pkgdir, self.settings["DISTDIR"]) checksums = mf.getDigests() if not checksums: if debug: writemsg("[empty/missing/bad digest]: %s\n" % (mypkg,)) return {} filesdict={} myfiles = self.getFetchMap(mypkg, useflags=useflags) #XXX: maybe this should be improved: take partial downloads # into account? check checksums? for myfile in myfiles: try: fetch_size = int(checksums[myfile]["size"]) except (KeyError, ValueError): if debug: writemsg(_("[bad digest]: missing %(file)s for %(pkg)s\n") % {"file":myfile, "pkg":mypkg}) continue file_path = os.path.join(self.settings["DISTDIR"], myfile) mystat = None try: mystat = os.stat(file_path) except OSError as e: pass if mystat is None: existing_size = 0 ro_distdirs = self.settings.get("PORTAGE_RO_DISTDIRS") if ro_distdirs is not None: for x in shlex_split(ro_distdirs): try: mystat = os.stat(os.path.join(x, myfile)) except OSError: pass else: if mystat.st_size == fetch_size: existing_size = fetch_size break else: existing_size = mystat.st_size remaining_size = fetch_size - existing_size if remaining_size > 0: # Assume the download is resumable. filesdict[myfile] = remaining_size elif remaining_size < 0: # The existing file is too large and therefore corrupt. filesdict[myfile] = int(checksums[myfile]["size"]) return filesdict
def getfetchsize(pkg): # from /usr/bin/emerge try: myebuild = portage.portdb.findname(pkg) pkgdir = os.path.dirname(myebuild) mf = Manifest(pkgdir, portage.settings["DISTDIR"]) if hasattr(portage.portdb, "getFetchMap"): fetchlist = portage.portdb.getFetchMap(pkg) else: fetchlist = portage.portdb.getfetchlist( pkg, mysettings=portage.settings, all=True)[1] mysum = mf.getDistfilesSize(fetchlist) mystr = str(mysum / 1024) mycount = len(mystr) while (mycount > 3): mycount -= 3 mystr = mystr[:mycount] + "," + mystr[mycount:] mysum = mystr + " kB" return mysum except (PortageException, KeyError): return "[no/bad digest]"
def getfetchsize(pkg): # from /usr/bin/emerge try: myebuild = portage.portdb.findname(pkg) pkgdir = os.path.dirname(myebuild) mf = Manifest(pkgdir, portage.settings["DISTDIR"]) if hasattr(portage.portdb, "getFetchMap"): fetchlist = portage.portdb.getFetchMap(pkg) else: fetchlist = portage.portdb.getfetchlist(pkg, mysettings=portage.settings, all=True)[1] mysum = mf.getDistfilesSize(fetchlist) mystr = str(mysum/1024) mycount = len(mystr) while (mycount > 3): mycount -= 3 mystr = mystr[:mycount] + "," + mystr[mycount:] mysum = mystr + " kB" return mysum except (PortageException, KeyError): return "[no/bad digest]"
def digestParseFile(myfilename, mysettings=None): """(filename) -- Parses a given file for entries matching: <checksumkey> <checksum_hex_string> <filename> <filesize> Ignores lines that don't start with a valid checksum identifier and returns a dict with the filenames as keys and {checksumkey:checksum} as the values. DEPRECATED: this function is now only a compability wrapper for portage.manifest.Manifest().""" warnings.warn("portage.digestParseFile() is deprecated", DeprecationWarning, stacklevel=2) mysplit = myfilename.split(os.sep) if mysplit[-2] == "files" and mysplit[-1].startswith("digest-"): pkgdir = os.sep + os.sep.join(mysplit[:-2]).strip(os.sep) elif mysplit[-1] == "Manifest": pkgdir = os.sep + os.sep.join(mysplit[:-1]).strip(os.sep) return Manifest(pkgdir, None).getDigests()
def process(self, args): # Call ebuild ... digest try: portage._doebuild_manifest_exempt_depend += 1 pkgdir = os.path.dirname(args['output']) fetchlist_dict = portage.FetchlistDict(pkgdir, portage.settings, portage.portdb) mf = Manifest(pkgdir, args['--portage-distfiles'], fetchlist_dict=fetchlist_dict, manifest1_compat=False) mf.create(requiredDistfiles=None, assumeDistHashesSometimes=True, assumeDistHashesAlways=True) mf.write() finally: portage._doebuild_manifest_exempt_depend -= 1
def digestgen(myarchives=None, mysettings=None, overwrite=None, manifestonly=None, myportdb=None): """ Generates a digest file if missing. Fetches files if necessary. NOTE: myarchives and mysettings used to be positional arguments, so their order must be preserved for backward compatibility. @param mysettings: the ebuild config (mysettings["O"] must correspond to the ebuild's parent directory) @type mysettings: config @param myportdb: a portdbapi instance @type myportdb: portdbapi @rtype: int @returns: 1 on success and 0 on failure """ if mysettings is None: raise TypeError("portage.digestgen(): missing" + \ " required 'mysettings' parameter") if myportdb is None: warnings.warn("portage.digestgen() called without 'myportdb' parameter", DeprecationWarning, stacklevel=2) myportdb = portage.portdb if overwrite is not None: warnings.warn("portage.digestgen() called with " + \ "deprecated 'overwrite' parameter", DeprecationWarning, stacklevel=2) if manifestonly is not None: warnings.warn("portage.digestgen() called with " + \ "deprecated 'manifestonly' parameter", DeprecationWarning, stacklevel=2) try: portage._doebuild_manifest_exempt_depend += 1 distfiles_map = {} fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb) for cpv in fetchlist_dict: try: for myfile in fetchlist_dict[cpv]: distfiles_map.setdefault(myfile, []).append(cpv) except InvalidDependString as e: writemsg("!!! %s\n" % str(e), noiselevel=-1) del e return 0 mytree = os.path.dirname(os.path.dirname(mysettings["O"])) manifest1_compat = False mf = Manifest(mysettings["O"], mysettings["DISTDIR"], fetchlist_dict=fetchlist_dict, manifest1_compat=manifest1_compat) # Don't require all hashes since that can trigger excessive # fetches when sufficient digests already exist. To ease transition # while Manifest 1 is being removed, only require hashes that will # exist before and after the transition. required_hash_types = set() required_hash_types.add("size") required_hash_types.add(MANIFEST2_REQUIRED_HASH) dist_hashes = mf.fhashdict.get("DIST", {}) # To avoid accidental regeneration of digests with the incorrect # files (such as partially downloaded files), trigger the fetch # code if the file exists and it's size doesn't match the current # manifest entry. If there really is a legitimate reason for the # digest to change, `ebuild --force digest` can be used to avoid # triggering this code (or else the old digests can be manually # removed from the Manifest). missing_files = [] for myfile in distfiles_map: myhashes = dist_hashes.get(myfile) if not myhashes: try: st = os.stat(os.path.join(mysettings["DISTDIR"], myfile)) except OSError: st = None if st is None or st.st_size == 0: missing_files.append(myfile) continue size = myhashes.get("size") try: st = os.stat(os.path.join(mysettings["DISTDIR"], myfile)) except OSError as e: if e.errno != errno.ENOENT: raise del e if size == 0: missing_files.append(myfile) continue if required_hash_types.difference(myhashes): missing_files.append(myfile) continue else: if st.st_size == 0 or size is not None and size != st.st_size: missing_files.append(myfile) continue if missing_files: mytree = os.path.realpath(os.path.dirname( os.path.dirname(mysettings["O"]))) fetch_settings = config(clone=mysettings) debug = mysettings.get("PORTAGE_DEBUG") == "1" for myfile in missing_files: uris = set() for cpv in distfiles_map[myfile]: myebuild = os.path.join(mysettings["O"], catsplit(cpv)[1] + ".ebuild") # for RESTRICT=fetch, mirror, etc... doebuild_environment(myebuild, "fetch", mysettings["ROOT"], fetch_settings, debug, 1, myportdb) uris.update(myportdb.getFetchMap( cpv, mytree=mytree)[myfile]) fetch_settings["A"] = myfile # for use by pkg_nofetch() try: st = os.stat(os.path.join( mysettings["DISTDIR"],myfile)) except OSError: st = None if not fetch({myfile : uris}, fetch_settings): writemsg(_("!!! Fetch failed for %s, can't update " "Manifest\n") % myfile, noiselevel=-1) if myfile in dist_hashes and \ st is not None and st.st_size > 0: # stat result is obtained before calling fetch(), # since fetch may rename the existing file if the # digest does not match. writemsg(_("!!! If you would like to " "forcefully replace the existing " "Manifest entry\n!!! for %s, use " "the following command:\n") % myfile + \ "!!! " + colorize("INFORM", "ebuild --force %s manifest" % \ os.path.basename(myebuild)) + "\n", noiselevel=-1) return 0 writemsg_stdout(_(">>> Creating Manifest for %s\n") % mysettings["O"]) try: mf.create(assumeDistHashesSometimes=True, assumeDistHashesAlways=( "assume-digests" in mysettings.features)) except FileNotFound as e: writemsg(_("!!! File %s doesn't exist, can't update " "Manifest\n") % e, noiselevel=-1) return 0 except PortagePackageException as e: writemsg(("!!! %s\n") % (e,), noiselevel=-1) return 0 try: mf.write(sign=False) except PermissionDenied as e: writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1) return 0 if "assume-digests" not in mysettings.features: distlist = list(mf.fhashdict.get("DIST", {})) distlist.sort() auto_assumed = [] for filename in distlist: if not os.path.exists( os.path.join(mysettings["DISTDIR"], filename)): auto_assumed.append(filename) if auto_assumed: mytree = os.path.realpath( os.path.dirname(os.path.dirname(mysettings["O"]))) cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:]) pkgs = myportdb.cp_list(cp, mytree=mytree) pkgs.sort() writemsg_stdout(" digest.assumed" + colorize("WARN", str(len(auto_assumed)).rjust(18)) + "\n") for pkg_key in pkgs: fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree) pv = pkg_key.split("/")[1] for filename in auto_assumed: if filename in fetchlist: writemsg_stdout( " %s::%s\n" % (pv, filename)) return 1 finally: portage._doebuild_manifest_exempt_depend -= 1
def digestcheck(myfiles, mysettings, strict=0, justmanifest=0): """ Verifies checksums. Assumes all files have been downloaded. @rtype: int @returns: 1 on success and 0 on failure """ if mysettings.get("EBUILD_SKIP_MANIFEST") == "1": return 1 pkgdir = mysettings["O"] manifest_path = os.path.join(pkgdir, "Manifest") if not os.path.exists(manifest_path): writemsg(_("!!! Manifest file not found: '%s'\n") % manifest_path, noiselevel=-1) if strict: return 0 else: return 1 mf = Manifest(pkgdir, mysettings["DISTDIR"]) manifest_empty = True for d in mf.fhashdict.values(): if d: manifest_empty = False break if manifest_empty: writemsg(_("!!! Manifest is empty: '%s'\n") % manifest_path, noiselevel=-1) if strict: return 0 else: return 1 eout = EOutput() eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1" try: if strict and "PORTAGE_PARALLEL_FETCHONLY" not in mysettings: eout.ebegin(_("checking ebuild checksums ;-)")) mf.checkTypeHashes("EBUILD") eout.eend(0) eout.ebegin(_("checking auxfile checksums ;-)")) mf.checkTypeHashes("AUX") eout.eend(0) eout.ebegin(_("checking miscfile checksums ;-)")) mf.checkTypeHashes("MISC", ignoreMissingFiles=True) eout.eend(0) for f in myfiles: eout.ebegin(_("checking %s ;-)") % f) ftype = mf.findFile(f) if ftype is None: raise KeyError(f) mf.checkFileHashes(ftype, f) eout.eend(0) except KeyError as e: eout.eend(1) writemsg(_("\n!!! Missing digest for %s\n") % str(e), noiselevel=-1) return 0 except FileNotFound as e: eout.eend(1) writemsg(_("\n!!! A file listed in the Manifest could not be found: %s\n") % str(e), noiselevel=-1) return 0 except DigestException as e: eout.eend(1) writemsg(_("\n!!! Digest verification failed:\n"), noiselevel=-1) writemsg("!!! %s\n" % e.value[0], noiselevel=-1) writemsg(_("!!! Reason: %s\n") % e.value[1], noiselevel=-1) writemsg(_("!!! Got: %s\n") % e.value[2], noiselevel=-1) writemsg(_("!!! Expected: %s\n") % e.value[3], noiselevel=-1) return 0 # Make sure that all of the ebuilds are actually listed in the Manifest. glep55 = 'parse-eapi-glep-55' in mysettings.features for f in os.listdir(pkgdir): pf = None if glep55: pf, eapi = _split_ebuild_name_glep55(f) elif f[-7:] == '.ebuild': pf = f[:-7] if pf is not None and not mf.hasFile("EBUILD", f): writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \ os.path.join(pkgdir, f), noiselevel=-1) if strict: return 0 """ epatch will just grab all the patches out of a directory, so we have to make sure there aren't any foreign files that it might grab.""" filesdir = os.path.join(pkgdir, "files") for parent, dirs, files in os.walk(filesdir): try: parent = _unicode_decode(parent, encoding=_encodings['fs'], errors='strict') except UnicodeDecodeError: parent = _unicode_decode(parent, encoding=_encodings['fs'], errors='replace') writemsg(_("!!! Path contains invalid " "character(s) for encoding '%s': '%s'") \ % (_encodings['fs'], parent), noiselevel=-1) if strict: return 0 continue for d in dirs: d_bytes = d try: d = _unicode_decode(d, encoding=_encodings['fs'], errors='strict') except UnicodeDecodeError: d = _unicode_decode(d, encoding=_encodings['fs'], errors='replace') writemsg(_("!!! Path contains invalid " "character(s) for encoding '%s': '%s'") \ % (_encodings['fs'], os.path.join(parent, d)), noiselevel=-1) if strict: return 0 dirs.remove(d_bytes) continue if d.startswith(".") or d == "CVS": dirs.remove(d_bytes) for f in files: try: f = _unicode_decode(f, encoding=_encodings['fs'], errors='strict') except UnicodeDecodeError: f = _unicode_decode(f, encoding=_encodings['fs'], errors='replace') if f.startswith("."): continue f = os.path.join(parent, f)[len(filesdir) + 1:] writemsg(_("!!! File name contains invalid " "character(s) for encoding '%s': '%s'") \ % (_encodings['fs'], f), noiselevel=-1) if strict: return 0 continue if f.startswith("."): continue f = os.path.join(parent, f)[len(filesdir) + 1:] file_type = mf.findFile(f) if file_type != "AUX" and not f.startswith("digest-"): writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \ os.path.join(filesdir, f), noiselevel=-1) if strict: return 0 return 1
def digestcheck(myfiles, mysettings, strict=False, justmanifest=None): """ Verifies checksums. Assumes all files have been downloaded. @rtype: int @returns: 1 on success and 0 on failure """ if justmanifest is not None: warnings.warn("The justmanifest parameter of the " + \ "portage.package.ebuild.digestcheck.digestcheck()" + \ " function is now unused.", DeprecationWarning, stacklevel=2) justmanifest = None if mysettings.get("EBUILD_SKIP_MANIFEST") == "1": return 1 pkgdir = mysettings["O"] manifest_path = os.path.join(pkgdir, "Manifest") if not os.path.exists(manifest_path): writemsg(_("!!! Manifest file not found: '%s'\n") % manifest_path, noiselevel=-1) if strict: return 0 else: return 1 mf = Manifest(pkgdir, mysettings["DISTDIR"]) manifest_empty = True for d in mf.fhashdict.values(): if d: manifest_empty = False break if manifest_empty: writemsg(_("!!! Manifest is empty: '%s'\n") % manifest_path, noiselevel=-1) if strict: return 0 else: return 1 eout = EOutput() eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1" try: if strict and "PORTAGE_PARALLEL_FETCHONLY" not in mysettings: eout.ebegin(_("checking ebuild checksums ;-)")) mf.checkTypeHashes("EBUILD") eout.eend(0) eout.ebegin(_("checking auxfile checksums ;-)")) mf.checkTypeHashes("AUX") eout.eend(0) eout.ebegin(_("checking miscfile checksums ;-)")) mf.checkTypeHashes("MISC", ignoreMissingFiles=True) eout.eend(0) for f in myfiles: eout.ebegin(_("checking %s ;-)") % f) ftype = mf.findFile(f) if ftype is None: raise KeyError(f) mf.checkFileHashes(ftype, f) eout.eend(0) except KeyError as e: eout.eend(1) writemsg(_("\n!!! Missing digest for %s\n") % str(e), noiselevel=-1) return 0 except FileNotFound as e: eout.eend(1) writemsg( _("\n!!! A file listed in the Manifest could not be found: %s\n") % str(e), noiselevel=-1) return 0 except DigestException as e: eout.eend(1) writemsg(_("\n!!! Digest verification failed:\n"), noiselevel=-1) writemsg("!!! %s\n" % e.value[0], noiselevel=-1) writemsg(_("!!! Reason: %s\n") % e.value[1], noiselevel=-1) writemsg(_("!!! Got: %s\n") % e.value[2], noiselevel=-1) writemsg(_("!!! Expected: %s\n") % e.value[3], noiselevel=-1) return 0 # Make sure that all of the ebuilds are actually listed in the Manifest. for f in os.listdir(pkgdir): pf = None if f[-7:] == '.ebuild': pf = f[:-7] if pf is not None and not mf.hasFile("EBUILD", f): writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \ os.path.join(pkgdir, f), noiselevel=-1) if strict: return 0 """ epatch will just grab all the patches out of a directory, so we have to make sure there aren't any foreign files that it might grab.""" filesdir = os.path.join(pkgdir, "files") for parent, dirs, files in os.walk(filesdir): try: parent = _unicode_decode(parent, encoding=_encodings['fs'], errors='strict') except UnicodeDecodeError: parent = _unicode_decode(parent, encoding=_encodings['fs'], errors='replace') writemsg(_("!!! Path contains invalid " "character(s) for encoding '%s': '%s'") \ % (_encodings['fs'], parent), noiselevel=-1) if strict: return 0 continue for d in dirs: d_bytes = d try: d = _unicode_decode(d, encoding=_encodings['fs'], errors='strict') except UnicodeDecodeError: d = _unicode_decode(d, encoding=_encodings['fs'], errors='replace') writemsg(_("!!! Path contains invalid " "character(s) for encoding '%s': '%s'") \ % (_encodings['fs'], os.path.join(parent, d)), noiselevel=-1) if strict: return 0 dirs.remove(d_bytes) continue if d.startswith(".") or d == "CVS": dirs.remove(d_bytes) for f in files: try: f = _unicode_decode(f, encoding=_encodings['fs'], errors='strict') except UnicodeDecodeError: f = _unicode_decode(f, encoding=_encodings['fs'], errors='replace') if f.startswith("."): continue f = os.path.join(parent, f)[len(filesdir) + 1:] writemsg(_("!!! File name contains invalid " "character(s) for encoding '%s': '%s'") \ % (_encodings['fs'], f), noiselevel=-1) if strict: return 0 continue if f.startswith("."): continue f = os.path.join(parent, f)[len(filesdir) + 1:] file_type = mf.findFile(f) if file_type != "AUX" and not f.startswith("digest-"): writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \ os.path.join(filesdir, f), noiselevel=-1) if strict: return 0 return 1
def digestgen(myarchives=None, mysettings=None, overwrite=None, manifestonly=None, myportdb=None): """ Generates a digest file if missing. Fetches files if necessary. NOTE: myarchives and mysettings used to be positional arguments, so their order must be preserved for backward compatibility. @param mysettings: the ebuild config (mysettings["O"] must correspond to the ebuild's parent directory) @type mysettings: config @param myportdb: a portdbapi instance @type myportdb: portdbapi @rtype: int @returns: 1 on success and 0 on failure """ if mysettings is None: raise TypeError("portage.digestgen(): missing" + \ " required 'mysettings' parameter") if myportdb is None: warnings.warn( "portage.digestgen() called without 'myportdb' parameter", DeprecationWarning, stacklevel=2) myportdb = portage.portdb if overwrite is not None: warnings.warn("portage.digestgen() called with " + \ "deprecated 'overwrite' parameter", DeprecationWarning, stacklevel=2) if manifestonly is not None: warnings.warn("portage.digestgen() called with " + \ "deprecated 'manifestonly' parameter", DeprecationWarning, stacklevel=2) try: portage._doebuild_manifest_exempt_depend += 1 distfiles_map = {} fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb) for cpv in fetchlist_dict: try: for myfile in fetchlist_dict[cpv]: distfiles_map.setdefault(myfile, []).append(cpv) except InvalidDependString as e: writemsg("!!! %s\n" % str(e), noiselevel=-1) del e return 0 mytree = os.path.dirname(os.path.dirname(mysettings["O"])) manifest1_compat = False mf = Manifest(mysettings["O"], mysettings["DISTDIR"], fetchlist_dict=fetchlist_dict, manifest1_compat=manifest1_compat) # Don't require all hashes since that can trigger excessive # fetches when sufficient digests already exist. To ease transition # while Manifest 1 is being removed, only require hashes that will # exist before and after the transition. required_hash_types = set() required_hash_types.add("size") required_hash_types.add(MANIFEST2_REQUIRED_HASH) dist_hashes = mf.fhashdict.get("DIST", {}) # To avoid accidental regeneration of digests with the incorrect # files (such as partially downloaded files), trigger the fetch # code if the file exists and it's size doesn't match the current # manifest entry. If there really is a legitimate reason for the # digest to change, `ebuild --force digest` can be used to avoid # triggering this code (or else the old digests can be manually # removed from the Manifest). missing_files = [] for myfile in distfiles_map: myhashes = dist_hashes.get(myfile) if not myhashes: try: st = os.stat(os.path.join(mysettings["DISTDIR"], myfile)) except OSError: st = None if st is None or st.st_size == 0: missing_files.append(myfile) continue size = myhashes.get("size") try: st = os.stat(os.path.join(mysettings["DISTDIR"], myfile)) except OSError as e: if e.errno != errno.ENOENT: raise del e if size == 0: missing_files.append(myfile) continue if required_hash_types.difference(myhashes): missing_files.append(myfile) continue else: if st.st_size == 0 or size is not None and size != st.st_size: missing_files.append(myfile) continue if missing_files: mytree = os.path.realpath( os.path.dirname(os.path.dirname(mysettings["O"]))) fetch_settings = config(clone=mysettings) debug = mysettings.get("PORTAGE_DEBUG") == "1" for myfile in missing_files: uris = set() for cpv in distfiles_map[myfile]: myebuild = os.path.join(mysettings["O"], catsplit(cpv)[1] + ".ebuild") # for RESTRICT=fetch, mirror, etc... doebuild_environment(myebuild, "fetch", mysettings["ROOT"], fetch_settings, debug, 1, myportdb) uris.update( myportdb.getFetchMap(cpv, mytree=mytree)[myfile]) fetch_settings["A"] = myfile # for use by pkg_nofetch() try: st = os.stat(os.path.join(mysettings["DISTDIR"], myfile)) except OSError: st = None if not fetch({myfile: uris}, fetch_settings): writemsg(_("!!! Fetch failed for %s, can't update " "Manifest\n") % myfile, noiselevel=-1) if myfile in dist_hashes and \ st is not None and st.st_size > 0: # stat result is obtained before calling fetch(), # since fetch may rename the existing file if the # digest does not match. writemsg(_("!!! If you would like to " "forcefully replace the existing " "Manifest entry\n!!! for %s, use " "the following command:\n") % myfile + \ "!!! " + colorize("INFORM", "ebuild --force %s manifest" % \ os.path.basename(myebuild)) + "\n", noiselevel=-1) return 0 writemsg_stdout(_(">>> Creating Manifest for %s\n") % mysettings["O"]) try: mf.create(assumeDistHashesSometimes=True, assumeDistHashesAlways=("assume-digests" in mysettings.features)) except FileNotFound as e: writemsg(_("!!! File %s doesn't exist, can't update " "Manifest\n") % e, noiselevel=-1) return 0 except PortagePackageException as e: writemsg(("!!! %s\n") % (e, ), noiselevel=-1) return 0 try: mf.write(sign=False) except PermissionDenied as e: writemsg(_("!!! Permission Denied: %s\n") % (e, ), noiselevel=-1) return 0 if "assume-digests" not in mysettings.features: distlist = list(mf.fhashdict.get("DIST", {})) distlist.sort() auto_assumed = [] for filename in distlist: if not os.path.exists( os.path.join(mysettings["DISTDIR"], filename)): auto_assumed.append(filename) if auto_assumed: mytree = os.path.realpath( os.path.dirname(os.path.dirname(mysettings["O"]))) cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:]) pkgs = myportdb.cp_list(cp, mytree=mytree) pkgs.sort() writemsg_stdout(" digest.assumed" + colorize("WARN", str(len(auto_assumed)).rjust(18)) + "\n") for pkg_key in pkgs: fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree) pv = pkg_key.split("/")[1] for filename in auto_assumed: if filename in fetchlist: writemsg_stdout(" %s::%s\n" % (pv, filename)) return 1 finally: portage._doebuild_manifest_exempt_depend -= 1
def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1): "fetch files. Will use digest file if available." if not myuris: return 1 features = mysettings.features restrict = mysettings.get("PORTAGE_RESTRICT","").split() userfetch = secpass >= 2 and "userfetch" in features userpriv = secpass >= 2 and "userpriv" in features # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring. if "mirror" in restrict or \ "nomirror" in restrict: if ("mirror" in features) and ("lmirror" not in features): # lmirror should allow you to bypass mirror restrictions. # XXX: This is not a good thing, and is temporary at best. print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch.")) return 1 # Generally, downloading the same file repeatedly from # every single available mirror is a waste of bandwidth # and time, so there needs to be a cap. checksum_failure_max_tries = 5 v = checksum_failure_max_tries try: v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS", checksum_failure_max_tries)) except (ValueError, OverflowError): writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS" " contains non-integer value: '%s'\n") % \ mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1) writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS " "default value: %s\n") % checksum_failure_max_tries, noiselevel=-1) v = checksum_failure_max_tries if v < 1: writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS" " contains value less than 1: '%s'\n") % v, noiselevel=-1) writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS " "default value: %s\n") % checksum_failure_max_tries, noiselevel=-1) v = checksum_failure_max_tries checksum_failure_max_tries = v del v fetch_resume_size_default = "350K" fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE") if fetch_resume_size is not None: fetch_resume_size = "".join(fetch_resume_size.split()) if not fetch_resume_size: # If it's undefined or empty, silently use the default. fetch_resume_size = fetch_resume_size_default match = _fetch_resume_size_re.match(fetch_resume_size) if match is None or \ (match.group(2).upper() not in _size_suffix_map): writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE" " contains an unrecognized format: '%s'\n") % \ mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1) writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE " "default value: %s\n") % fetch_resume_size_default, noiselevel=-1) fetch_resume_size = None if fetch_resume_size is None: fetch_resume_size = fetch_resume_size_default match = _fetch_resume_size_re.match(fetch_resume_size) fetch_resume_size = int(match.group(1)) * \ 2 ** _size_suffix_map[match.group(2).upper()] # Behave like the package has RESTRICT="primaryuri" after a # couple of checksum failures, to increase the probablility # of success before checksum_failure_max_tries is reached. checksum_failure_primaryuri = 2 thirdpartymirrors = mysettings.thirdpartymirrors() # In the background parallel-fetch process, it's safe to skip checksum # verification of pre-existing files in $DISTDIR that have the correct # file size. The parent process will verify their checksums prior to # the unpack phase. parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings if parallel_fetchonly: fetchonly = 1 check_config_instance(mysettings) custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"], CUSTOM_MIRRORS_FILE), recursive=1) mymirrors=[] if listonly or ("distlocks" not in features): use_locks = 0 fetch_to_ro = 0 if "skiprocheck" in features: fetch_to_ro = 1 if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro: if use_locks: writemsg(colorize("BAD", _("!!! For fetching to a read-only filesystem, " "locking should be turned off.\n")), noiselevel=-1) writemsg(_("!!! This can be done by adding -distlocks to " "FEATURES in /etc/make.conf\n"), noiselevel=-1) # use_locks = 0 # local mirrors are always added if "local" in custommirrors: mymirrors += custommirrors["local"] if "nomirror" in restrict or \ "mirror" in restrict: # We don't add any mirrors. pass else: if try_mirrors: mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x] skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1" pkgdir = mysettings.get("O") if not (pkgdir is None or skip_manifest): mydigests = Manifest( pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST") else: # no digests because fetch was not called for a specific package mydigests = {} ro_distdirs = [x for x in \ shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \ if os.path.isdir(x)] fsmirrors = [] for x in range(len(mymirrors)-1,-1,-1): if mymirrors[x] and mymirrors[x][0]=='/': fsmirrors += [mymirrors[x]] del mymirrors[x] restrict_fetch = "fetch" in restrict custom_local_mirrors = custommirrors.get("local", []) if restrict_fetch: # With fetch restriction, a normal uri may only be fetched from # custom local mirrors (if available). A mirror:// uri may also # be fetched from specific mirrors (effectively overriding fetch # restriction, but only for specific mirrors). locations = custom_local_mirrors else: locations = mymirrors file_uri_tuples = [] # Check for 'items' attribute since OrderedDict is not a dict. if hasattr(myuris, 'items'): for myfile, uri_set in myuris.items(): for myuri in uri_set: file_uri_tuples.append((myfile, myuri)) else: for myuri in myuris: file_uri_tuples.append((os.path.basename(myuri), myuri)) filedict = OrderedDict() primaryuri_indexes={} primaryuri_dict = {} thirdpartymirror_uris = {} for myfile, myuri in file_uri_tuples: if myfile not in filedict: filedict[myfile]=[] for y in range(0,len(locations)): filedict[myfile].append(locations[y]+"/distfiles/"+myfile) if myuri[:9]=="mirror://": eidx = myuri.find("/", 9) if eidx != -1: mirrorname = myuri[9:eidx] path = myuri[eidx+1:] # Try user-defined mirrors first if mirrorname in custommirrors: for cmirr in custommirrors[mirrorname]: filedict[myfile].append( cmirr.rstrip("/") + "/" + path) # now try the official mirrors if mirrorname in thirdpartymirrors: random.shuffle(thirdpartymirrors[mirrorname]) uris = [locmirr.rstrip("/") + "/" + path \ for locmirr in thirdpartymirrors[mirrorname]] filedict[myfile].extend(uris) thirdpartymirror_uris.setdefault(myfile, []).extend(uris) if not filedict[myfile]: writemsg(_("No known mirror by the name: %s\n") % (mirrorname)) else: writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1) writemsg(" %s\n" % (myuri), noiselevel=-1) else: if restrict_fetch: # Only fetch from specific mirrors is allowed. continue if "primaryuri" in restrict: # Use the source site first. if myfile in primaryuri_indexes: primaryuri_indexes[myfile] += 1 else: primaryuri_indexes[myfile] = 0 filedict[myfile].insert(primaryuri_indexes[myfile], myuri) else: filedict[myfile].append(myuri) primaryuris = primaryuri_dict.get(myfile) if primaryuris is None: primaryuris = [] primaryuri_dict[myfile] = primaryuris primaryuris.append(myuri) # Prefer thirdpartymirrors over normal mirrors in cases when # the file does not yet exist on the normal mirrors. for myfile, uris in thirdpartymirror_uris.items(): primaryuri_dict.setdefault(myfile, []).extend(uris) can_fetch=True if listonly: can_fetch = False if can_fetch and not fetch_to_ro: global _userpriv_test_write_file_cache dirmode = 0o2070 filemode = 0o60 modemask = 0o2 dir_gid = portage_gid if "FAKED_MODE" in mysettings: # When inside fakeroot, directories with portage's gid appear # to have root's gid. Therefore, use root's gid instead of # portage's gid to avoid spurrious permissions adjustments # when inside fakeroot. dir_gid = 0 distdir_dirs = [""] if "distlocks" in features: distdir_dirs.append(".locks") try: for x in distdir_dirs: mydir = os.path.join(mysettings["DISTDIR"], x) write_test_file = os.path.join( mydir, ".__portage_test_write__") try: st = os.stat(mydir) except OSError: st = None if st is not None and stat.S_ISDIR(st.st_mode): if not (userfetch or userpriv): continue if _userpriv_test_write_file(mysettings, write_test_file): continue _userpriv_test_write_file_cache.pop(write_test_file, None) if ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask): if st is None: # The directory has just been created # and therefore it must be empty. continue writemsg(_("Adjusting permissions recursively: '%s'\n") % mydir, noiselevel=-1) def onerror(e): raise # bail out on the first error that occurs during recursion if not apply_recursive_permissions(mydir, gid=dir_gid, dirmode=dirmode, dirmask=modemask, filemode=filemode, filemask=modemask, onerror=onerror): raise OperationNotPermitted( _("Failed to apply recursive permissions for the portage group.")) except PortageException as e: if not os.path.isdir(mysettings["DISTDIR"]): writemsg("!!! %s\n" % str(e), noiselevel=-1) writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1) writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1) if can_fetch and \ not fetch_to_ro and \ not os.access(mysettings["DISTDIR"], os.W_OK): writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"], noiselevel=-1) can_fetch = False if can_fetch and use_locks and locks_in_subdir: distlocks_subdir = os.path.join(mysettings["DISTDIR"], locks_in_subdir) if not os.access(distlocks_subdir, os.W_OK): writemsg(_("!!! No write access to write to %s. Aborting.\n") % distlocks_subdir, noiselevel=-1) return 0 del distlocks_subdir distdir_writable = can_fetch and not fetch_to_ro failed_files = set() restrict_fetch_msg = False for myfile in filedict: """ fetched status 0 nonexistent 1 partially downloaded 2 completely downloaded """ fetched = 0 orig_digests = mydigests.get(myfile, {}) size = orig_digests.get("size") if size == 0: # Zero-byte distfiles are always invalid, so discard their digests. del mydigests[myfile] orig_digests.clear() size = None pruned_digests = orig_digests if parallel_fetchonly: pruned_digests = {} if size is not None: pruned_digests["size"] = size myfile_path = os.path.join(mysettings["DISTDIR"], myfile) has_space = True has_space_superuser = True file_lock = None if listonly: writemsg_stdout("\n", noiselevel=-1) else: # check if there is enough space in DISTDIR to completely store myfile # overestimate the filesize so we aren't bitten by FS overhead if size is not None and hasattr(os, "statvfs"): vfs_stat = os.statvfs(mysettings["DISTDIR"]) try: mysize = os.stat(myfile_path).st_size except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e mysize = 0 if (size - mysize + vfs_stat.f_bsize) >= \ (vfs_stat.f_bsize * vfs_stat.f_bavail): if (size - mysize + vfs_stat.f_bsize) >= \ (vfs_stat.f_bsize * vfs_stat.f_bfree): has_space_superuser = False if not has_space_superuser: has_space = False elif secpass < 2: has_space = False elif userfetch: has_space = False if not has_space: writemsg(_("!!! Insufficient space to store %s in %s\n") % \ (myfile, mysettings["DISTDIR"]), noiselevel=-1) if has_space_superuser: writemsg(_("!!! Insufficient privileges to use " "remaining space.\n"), noiselevel=-1) if userfetch: writemsg(_("!!! You may set FEATURES=\"-userfetch\"" " in /etc/make.conf in order to fetch with\n" "!!! superuser privileges.\n"), noiselevel=-1) if distdir_writable and use_locks: lock_kwargs = {} if fetchonly: lock_kwargs["flags"] = os.O_NONBLOCK try: file_lock = lockfile(myfile_path, wantnewlockfile=1, **lock_kwargs) except TryAgain: writemsg(_(">>> File '%s' is already locked by " "another fetcher. Continuing...\n") % myfile, noiselevel=-1) continue try: if not listonly: eout = EOutput() eout.quiet = mysettings.get("PORTAGE_QUIET") == "1" match, mystat = _check_distfile( myfile_path, pruned_digests, eout) if match: if distdir_writable: try: apply_secpass_permissions(myfile_path, gid=portage_gid, mode=0o664, mask=0o2, stat_cached=mystat) except PortageException as e: if not os.access(myfile_path, os.R_OK): writemsg(_("!!! Failed to adjust permissions:" " %s\n") % str(e), noiselevel=-1) del e continue if distdir_writable and mystat is None: # Remove broken symlinks if necessary. try: os.unlink(myfile_path) except OSError: pass if mystat is not None: if stat.S_ISDIR(mystat.st_mode): writemsg_level( _("!!! Unable to fetch file since " "a directory is in the way: \n" "!!! %s\n") % myfile_path, level=logging.ERROR, noiselevel=-1) return 0 if mystat.st_size == 0: if distdir_writable: try: os.unlink(myfile_path) except OSError: pass elif distdir_writable: if mystat.st_size < fetch_resume_size and \ mystat.st_size < size: # If the file already exists and the size does not # match the existing digests, it may be that the # user is attempting to update the digest. In this # case, the digestgen() function will advise the # user to use `ebuild --force foo.ebuild manifest` # in order to force the old digests to be replaced. # Since the user may want to keep this file, rename # it instead of deleting it. writemsg(_(">>> Renaming distfile with size " "%d (smaller than " "PORTAGE_FETCH_RESU" "ME_MIN_SIZE)\n") % mystat.st_size) temp_filename = \ _checksum_failure_temp_file( mysettings["DISTDIR"], myfile) writemsg_stdout(_("Refetching... " "File renamed to '%s'\n\n") % \ temp_filename, noiselevel=-1) elif mystat.st_size >= size: temp_filename = \ _checksum_failure_temp_file( mysettings["DISTDIR"], myfile) writemsg_stdout(_("Refetching... " "File renamed to '%s'\n\n") % \ temp_filename, noiselevel=-1) if distdir_writable and ro_distdirs: readonly_file = None for x in ro_distdirs: filename = os.path.join(x, myfile) match, mystat = _check_distfile( filename, pruned_digests, eout) if match: readonly_file = filename break if readonly_file is not None: try: os.unlink(myfile_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e os.symlink(readonly_file, myfile_path) continue if fsmirrors and not os.path.exists(myfile_path) and has_space: for mydir in fsmirrors: mirror_file = os.path.join(mydir, myfile) try: shutil.copyfile(mirror_file, myfile_path) writemsg(_("Local mirror has file: %s\n") % myfile) break except (IOError, OSError) as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e try: mystat = os.stat(myfile_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e else: try: apply_secpass_permissions( myfile_path, gid=portage_gid, mode=0o664, mask=0o2, stat_cached=mystat) except PortageException as e: if not os.access(myfile_path, os.R_OK): writemsg(_("!!! Failed to adjust permissions:" " %s\n") % str(e), noiselevel=-1) # If the file is empty then it's obviously invalid. Remove # the empty file and try to download if possible. if mystat.st_size == 0: if distdir_writable: try: os.unlink(myfile_path) except EnvironmentError: pass elif myfile not in mydigests: # We don't have a digest, but the file exists. We must # assume that it is fully downloaded. continue else: if mystat.st_size < mydigests[myfile]["size"] and \ not restrict_fetch: fetched = 1 # Try to resume this download. elif parallel_fetchonly and \ mystat.st_size == mydigests[myfile]["size"]: eout = EOutput() eout.quiet = \ mysettings.get("PORTAGE_QUIET") == "1" eout.ebegin( "%s size ;-)" % (myfile, )) eout.eend(0) continue else: verified_ok, reason = verify_all( myfile_path, mydigests[myfile]) if not verified_ok: writemsg(_("!!! Previously fetched" " file: '%s'\n") % myfile, noiselevel=-1) writemsg(_("!!! Reason: %s\n") % reason[0], noiselevel=-1) writemsg(_("!!! Got: %s\n" "!!! Expected: %s\n") % \ (reason[1], reason[2]), noiselevel=-1) if reason[0] == _("Insufficient data for checksum verification"): return 0 if distdir_writable: temp_filename = \ _checksum_failure_temp_file( mysettings["DISTDIR"], myfile) writemsg_stdout(_("Refetching... " "File renamed to '%s'\n\n") % \ temp_filename, noiselevel=-1) else: eout = EOutput() eout.quiet = \ mysettings.get("PORTAGE_QUIET", None) == "1" digests = mydigests.get(myfile) if digests: digests = list(digests) digests.sort() eout.ebegin( "%s %s ;-)" % (myfile, " ".join(digests))) eout.eend(0) continue # fetch any remaining files # Create a reversed list since that is optimal for list.pop(). uri_list = filedict[myfile][:] uri_list.reverse() checksum_failure_count = 0 tried_locations = set() while uri_list: loc = uri_list.pop() # Eliminate duplicates here in case we've switched to # "primaryuri" mode on the fly due to a checksum failure. if loc in tried_locations: continue tried_locations.add(loc) if listonly: writemsg_stdout(loc+" ", noiselevel=-1) continue # allow different fetchcommands per protocol protocol = loc[0:loc.find("://")] missing_file_param = False fetchcommand_var = "FETCHCOMMAND_" + protocol.upper() fetchcommand = mysettings.get(fetchcommand_var) if fetchcommand is None: fetchcommand_var = "FETCHCOMMAND" fetchcommand = mysettings.get(fetchcommand_var) if fetchcommand is None: writemsg_level( _("!!! %s is unset. It should " "have been defined in\n!!! %s/make.globals.\n") \ % (fetchcommand_var, GLOBAL_CONFIG_PATH), level=logging.ERROR, noiselevel=-1) return 0 if "${FILE}" not in fetchcommand: writemsg_level( _("!!! %s does not contain the required ${FILE}" " parameter.\n") % fetchcommand_var, level=logging.ERROR, noiselevel=-1) missing_file_param = True resumecommand_var = "RESUMECOMMAND_" + protocol.upper() resumecommand = mysettings.get(resumecommand_var) if resumecommand is None: resumecommand_var = "RESUMECOMMAND" resumecommand = mysettings.get(resumecommand_var) if resumecommand is None: writemsg_level( _("!!! %s is unset. It should " "have been defined in\n!!! %s/make.globals.\n") \ % (resumecommand_var, GLOBAL_CONFIG_PATH), level=logging.ERROR, noiselevel=-1) return 0 if "${FILE}" not in resumecommand: writemsg_level( _("!!! %s does not contain the required ${FILE}" " parameter.\n") % resumecommand_var, level=logging.ERROR, noiselevel=-1) missing_file_param = True if missing_file_param: writemsg_level( _("!!! Refer to the make.conf(5) man page for " "information about how to\n!!! correctly specify " "FETCHCOMMAND and RESUMECOMMAND.\n"), level=logging.ERROR, noiselevel=-1) if myfile != os.path.basename(loc): return 0 if not can_fetch: if fetched != 2: try: mysize = os.stat(myfile_path).st_size except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e mysize = 0 if mysize == 0: writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile, noiselevel=-1) elif size is None or size > mysize: writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile, noiselevel=-1) else: writemsg(_("!!! File %s is incorrect size, " "but unable to retry.\n") % myfile, noiselevel=-1) return 0 else: continue if fetched != 2 and has_space: #we either need to resume or start the download if fetched == 1: try: mystat = os.stat(myfile_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e fetched = 0 else: if mystat.st_size < fetch_resume_size: writemsg(_(">>> Deleting distfile with size " "%d (smaller than " "PORTAGE_FETCH_RESU" "ME_MIN_SIZE)\n") % mystat.st_size) try: os.unlink(myfile_path) except OSError as e: if e.errno not in \ (errno.ENOENT, errno.ESTALE): raise del e fetched = 0 if fetched == 1: #resume mode: writemsg(_(">>> Resuming download...\n")) locfetch=resumecommand command_var = resumecommand_var else: #normal mode: locfetch=fetchcommand command_var = fetchcommand_var writemsg_stdout(_(">>> Downloading '%s'\n") % \ re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc)) variables = { "DISTDIR": mysettings["DISTDIR"], "URI": loc, "FILE": myfile } myfetch = shlex_split(locfetch) myfetch = [varexpand(x, mydict=variables) for x in myfetch] myret = -1 try: myret = _spawn_fetch(mysettings, myfetch) finally: try: apply_secpass_permissions(myfile_path, gid=portage_gid, mode=0o664, mask=0o2) except FileNotFound: pass except PortageException as e: if not os.access(myfile_path, os.R_OK): writemsg(_("!!! Failed to adjust permissions:" " %s\n") % str(e), noiselevel=-1) del e # If the file is empty then it's obviously invalid. Don't # trust the return value from the fetcher. Remove the # empty file and try to download again. try: if os.stat(myfile_path).st_size == 0: os.unlink(myfile_path) fetched = 0 continue except EnvironmentError: pass if mydigests is not None and myfile in mydigests: try: mystat = os.stat(myfile_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e fetched = 0 else: if stat.S_ISDIR(mystat.st_mode): # This can happen if FETCHCOMMAND erroneously # contains wget's -P option where it should # instead have -O. writemsg_level( _("!!! The command specified in the " "%s variable appears to have\n!!! " "created a directory instead of a " "normal file.\n") % command_var, level=logging.ERROR, noiselevel=-1) writemsg_level( _("!!! Refer to the make.conf(5) " "man page for information about how " "to\n!!! correctly specify " "FETCHCOMMAND and RESUMECOMMAND.\n"), level=logging.ERROR, noiselevel=-1) return 0 # no exception? file exists. let digestcheck() report # an appropriately for size or checksum errors # If the fetcher reported success and the file is # too small, it's probably because the digest is # bad (upstream changed the distfile). In this # case we don't want to attempt to resume. Show a # digest verification failure to that the user gets # a clue about what just happened. if myret != os.EX_OK and \ mystat.st_size < mydigests[myfile]["size"]: # Fetch failed... Try the next one... Kill 404 files though. if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")): html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M) if html404.search(codecs.open( _unicode_encode(myfile_path, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='replace' ).read()): try: os.unlink(mysettings["DISTDIR"]+"/"+myfile) writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n")) fetched = 0 continue except (IOError, OSError): pass fetched = 1 continue if True: # File is the correct size--check the checksums for the fetched # file NOW, for those users who don't have a stable/continuous # net connection. This way we have a chance to try to download # from another mirror... verified_ok,reason = verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile]) if not verified_ok: print(reason) writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile, noiselevel=-1) writemsg(_("!!! Reason: %s\n") % reason[0], noiselevel=-1) writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \ (reason[1], reason[2]), noiselevel=-1) if reason[0] == _("Insufficient data for checksum verification"): return 0 temp_filename = \ _checksum_failure_temp_file( mysettings["DISTDIR"], myfile) writemsg_stdout(_("Refetching... " "File renamed to '%s'\n\n") % \ temp_filename, noiselevel=-1) fetched=0 checksum_failure_count += 1 if checksum_failure_count == \ checksum_failure_primaryuri: # Switch to "primaryuri" mode in order # to increase the probablility of # of success. primaryuris = \ primaryuri_dict.get(myfile) if primaryuris: uri_list.extend( reversed(primaryuris)) if checksum_failure_count >= \ checksum_failure_max_tries: break else: eout = EOutput() eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1" digests = mydigests.get(myfile) if digests: eout.ebegin("%s %s ;-)" % \ (myfile, " ".join(sorted(digests)))) eout.eend(0) fetched=2 break else: if not myret: fetched=2 break elif mydigests!=None: writemsg(_("No digest file available and download failed.\n\n"), noiselevel=-1) finally: if use_locks and file_lock: unlockfile(file_lock) if listonly: writemsg_stdout("\n", noiselevel=-1) if fetched != 2: if restrict_fetch and not restrict_fetch_msg: restrict_fetch_msg = True msg = _("\n!!! %s/%s" " has fetch restriction turned on.\n" "!!! This probably means that this " "ebuild's files must be downloaded\n" "!!! manually. See the comments in" " the ebuild for more information.\n\n") % \ (mysettings["CATEGORY"], mysettings["PF"]) writemsg_level(msg, level=logging.ERROR, noiselevel=-1) have_builddir = "PORTAGE_BUILDDIR" in mysettings and \ os.path.isdir(mysettings["PORTAGE_BUILDDIR"]) global_tmpdir = mysettings["PORTAGE_TMPDIR"] private_tmpdir = None if not parallel_fetchonly and not have_builddir: # When called by digestgen(), it's normal that # PORTAGE_BUILDDIR doesn't exist. It's helpful # to show the pkg_nofetch output though, so go # ahead and create a temporary PORTAGE_BUILDDIR. # Use a temporary config instance to avoid altering # the state of the one that's been passed in. mysettings = config(clone=mysettings) try: private_tmpdir = tempfile.mkdtemp("", "._portage_fetch_.", global_tmpdir) except OSError as e: if e.errno != PermissionDenied.errno: raise raise PermissionDenied(global_tmpdir) mysettings["PORTAGE_TMPDIR"] = private_tmpdir mysettings.backup_changes("PORTAGE_TMPDIR") debug = mysettings.get("PORTAGE_DEBUG") == "1" doebuild_environment(mysettings["EBUILD"], "fetch", mysettings["ROOT"], mysettings, debug, 1, None) prepare_build_dirs(mysettings["ROOT"], mysettings, 0) have_builddir = True if not parallel_fetchonly and have_builddir: # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for # ensuring sane $PWD (bug #239560) and storing elog # messages. Therefore, calling code needs to ensure that # PORTAGE_BUILDDIR is already clean and locked here. # All the pkg_nofetch goes to stderr since it's considered # to be an error message. fd_pipes = { 0 : sys.stdin.fileno(), 1 : sys.stderr.fileno(), 2 : sys.stderr.fileno(), } ebuild_phase = mysettings.get("EBUILD_PHASE") try: mysettings["EBUILD_PHASE"] = "nofetch" doebuild_spawn(_shell_quote(EBUILD_SH_BINARY) + \ " nofetch", mysettings, fd_pipes=fd_pipes) finally: if ebuild_phase is None: mysettings.pop("EBUILD_PHASE", None) else: mysettings["EBUILD_PHASE"] = ebuild_phase if private_tmpdir is not None: shutil.rmtree(private_tmpdir) elif restrict_fetch: pass elif listonly: pass elif not filedict[myfile]: writemsg(_("Warning: No mirrors available for file" " '%s'\n") % (myfile), noiselevel=-1) else: writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile, noiselevel=-1) if listonly: continue elif fetchonly: failed_files.add(myfile) continue return 0 if failed_files: return 0 return 1
def digestcheck(self): """ Verifies checksums. Assumes all files have been downloaded. @rtype: int @returns: None on success and error msg on failure """ myfiles = [] justmanifest = None self._mysettings['PORTAGE_QUIET'] = '1' if self._mysettings.get("EBUILD_SKIP_MANIFEST") == "1": return None manifest_path = os.path.join(self._pkgdir, "Manifest") if not os.path.exists(manifest_path): return ("!!! Manifest file not found: '%s'") % manifest_path mf = Manifest(self._pkgdir, self._mysettings["DISTDIR"]) manifest_empty = True for d in mf.fhashdict.values(): if d: manifest_empty = False break if manifest_empty: return ("!!! Manifest is empty: '%s'") % manifest_path try: if "PORTAGE_PARALLEL_FETCHONLY" not in self._mysettings: mf.checkTypeHashes("EBUILD") mf.checkTypeHashes("AUX") mf.checkTypeHashes("MISC", ignoreMissingFiles=True) for f in myfiles: ftype = mf.findFile(f) if ftype is None: return ("!!! Missing digest for '%s'") % (f,) mf.checkFileHashes(ftype, f) except FileNotFound as e: return ("!!! A file listed in the Manifest could not be found: %s") % str(e) except DigestException as e: return ("!!! Digest verification failed: %s\nReason: %s\nGot: %s\nExpected: %s") \ % (e.value[0], e.value[1], e.value[2], e.value[3]) # Make sure that all of the ebuilds are actually listed in the Manifest. for f in os.listdir(self._pkgdir): pf = None if f[-7:] == '.ebuild': pf = f[:-7] if pf is not None and not mf.hasFile("EBUILD", f): return ("!!! A file is not listed in the Manifest: '%s'") \ % os.path.join(pkgdir, f) """ epatch will just grab all the patches out of a directory, so we have to make sure there aren't any foreign files that it might grab.""" filesdir = os.path.join(self._pkgdir, "files") for parent, dirs, files in os.walk(filesdir): try: parent = _unicode_decode(parent, encoding=_encodings['fs'], errors='strict') except UnicodeDecodeError: parent = _unicode_decode(parent, encoding=_encodings['fs'], errors='replace') return ("!!! Path contains invalid character(s) for encoding '%s': '%s'") \ % (_encodings['fs'], parent) for d in dirs: d_bytes = d try: d = _unicode_decode(d, encoding=_encodings['fs'], errors='strict') except UnicodeDecodeError: d = _unicode_decode(d, encoding=_encodings['fs'], errors='replace') return ("!!! Path contains invalid character(s) for encoding '%s': '%s'") \ % (_encodings['fs'], os.path.join(parent, d)) if d.startswith(".") or d == "CVS": dirs.remove(d_bytes) for f in files: try: f = _unicode_decode(f, encoding=_encodings['fs'], errors='strict') except UnicodeDecodeError: f = _unicode_decode(f, encoding=_encodings['fs'], errors='replace') if f.startswith("."): continue f = os.path.join(parent, f)[len(filesdir) + 1:] return ("!!! File name contains invalid character(s) for encoding '%s': '%s'") \ % (_encodings['fs'], f) if f.startswith("."): continue f = os.path.join(parent, f)[len(filesdir) + 1:] file_type = mf.findFile(f) if file_type != "AUX" and not f.startswith("digest-"): return ("!!! A file is not listed in the Manifest: '%s'") \ % os.path.join(filesdir, f) return None