def unlock(self): """ Set GPG_TTY and run GPG unlock command. If gpg-keepalive is set, start keepalive thread. """ if self.GPG_unlock_command and ( self.settings.get("BINPKG_FORMAT", SUPPORTED_GENTOO_BINPKG_FORMATS[0]) == "gpkg" ): try: os.environ["GPG_TTY"] = os.ttyname(sys.stdout.fileno()) except OSError as e: # When run with no input/output tty, this will fail. # However, if the password is given by command, # GPG does not need to ask password, so can be ignored. writemsg(colorize("WARN", str(e)) + "\n") cmd = shlex_split(varexpand(self.GPG_unlock_command, mydict=self.settings)) return_code = subprocess.Popen(cmd).wait() if return_code == os.EX_OK: writemsg_stdout(colorize("GOOD", "unlocked") + "\n") sys.stdout.flush() else: raise GPGException("GPG unlock failed") if self.keepalive: self.GPG_unlock_command = shlex_split( varexpand(self.GPG_unlock_command, mydict=self.settings) ) self.thread = threading.Thread(target=self.gpg_keepalive, daemon=True) self.thread.start()
def testVarExpandPass(self): varDict = {"a": "5", "b": "7", "c": "-5"} for key in varDict: result = varexpand("$%s" % key, varDict) self.assertFalse( result != varDict[key], msg="Got %s != %s, from varexpand( %s, %s )" % \ ( result, varDict[key], "$%s" % key, varDict ) ) result = varexpand("${%s}" % key, varDict) self.assertFalse( result != varDict[key], msg="Got %s != %s, from varexpand( %s, %s )" % \ ( result, varDict[key], "${%s}" % key, varDict ) )
def testVarExpandPass(self): varDict = { "a":"5", "b":"7", "c":"-5" } for key in varDict: result = varexpand( "$%s" % key, varDict ) self.assertFalse( result != varDict[key], msg="Got %s != %s, from varexpand( %s, %s )" % \ ( result, varDict[key], "$%s" % key, varDict ) ) result = varexpand( "${%s}" % key, varDict ) self.assertFalse( result != varDict[key], msg="Got %s != %s, from varexpand( %s, %s )" % \ ( result, varDict[key], "${%s}" % key, varDict ) )
def testVarExpandBackslashes(self): """ We want to behave like bash does when expanding a variable assignment in a sourced file, in which case it performs backslash removal for \\ and \$ but nothing more. It also removes escaped newline characters. Note that we don't handle escaped quotes here, since getconfig() uses shlex to handle that earlier. """ varDict = {} tests = [ ("\\", "\\"), ("\\\\", "\\"), ("\\\\\\", "\\\\"), ("\\\\\\\\", "\\\\"), ("\\$", "$"), ("\\\\$", "\\$"), ("\\a", "\\a"), ("\\b", "\\b"), ("\\n", "\\n"), ("\\r", "\\r"), ("\\t", "\\t"), ("\\\n", ""), ("\\\"", "\\\""), ("\\'", "\\'"), ] for test in tests: result = varexpand( test[0], varDict ) self.assertFalse( result != test[1], msg="Got %s != %s from varexpand( %s, %s )" \ % ( result, test[1], test[0], varDict ) )
def file_get(baseurl, dest, conn=None, fcmd=None): """(baseurl,dest,fcmd=) -- Takes a base url to connect to and read from. URI should be in the form <proto>://[user[:pass]@]<site>[:port]<path>""" if not fcmd: return file_get_lib(baseurl, dest, conn) variables = { "DISTDIR": dest, "URI": baseurl, "FILE": os.path.basename(baseurl) } from portage.util import varexpand from portage.process import spawn myfetch = portage.util.shlex_split(fcmd) myfetch = [varexpand(x, mydict=variables) for x in myfetch] fd_pipes = { 0: sys.stdin.fileno(), 1: sys.stdout.fileno(), 2: sys.stdout.fileno() } retval = spawn(myfetch, env=os.environ.copy(), fd_pipes=fd_pipes) if retval != os.EX_OK: sys.stderr.write(_("Fetcher exited with a failure condition.\n")) return 0 return 1
def file_get(baseurl,dest,conn=None,fcmd=None,filename=None): """(baseurl,dest,fcmd=) -- Takes a base url to connect to and read from. URI should be in the form <proto>://[user[:pass]@]<site>[:port]<path>""" if not fcmd: warnings.warn("Use of portage.getbinpkg.file_get() without the fcmd " "parameter is deprecated", DeprecationWarning, stacklevel=2) return file_get_lib(baseurl,dest,conn) if not filename: filename = os.path.basename(baseurl) variables = { "DISTDIR": dest, "URI": baseurl, "FILE": filename } from portage.util import varexpand from portage.process import spawn myfetch = portage.util.shlex_split(fcmd) myfetch = [varexpand(x, mydict=variables) for x in myfetch] fd_pipes= { 0:sys.__stdin__.fileno(), 1:sys.__stdout__.fileno(), 2:sys.__stdout__.fileno() } sys.__stdout__.flush() sys.__stderr__.flush() retval = spawn(myfetch, env=os.environ.copy(), fd_pipes=fd_pipes) if retval != os.EX_OK: sys.stderr.write(_("Fetcher exited with a failure condition.\n")) return 0 return 1
def file_get(baseurl,dest,conn=None,fcmd=None): """(baseurl,dest,fcmd=) -- Takes a base url to connect to and read from. URI should be in the form <proto>://[user[:pass]@]<site>[:port]<path>""" if not fcmd: return file_get_lib(baseurl,dest,conn) variables = { "DISTDIR": dest, "URI": baseurl, "FILE": os.path.basename(baseurl) } from portage.util import varexpand from portage.process import spawn myfetch = portage.util.shlex_split(fcmd) myfetch = [varexpand(x, mydict=variables) for x in myfetch] fd_pipes= { 0:sys.stdin.fileno(), 1:sys.stdout.fileno(), 2:sys.stdout.fileno() } retval = spawn(myfetch, env=os.environ.copy(), fd_pipes=fd_pipes) if retval != os.EX_OK: sys.stderr.write(_("Fetcher exited with a failure condition.\n")) return 0 return 1
def testVarExpandFail(self): varDict = {"a": "5", "b": "7", "c": "15"} testVars = ["fail"] for var in testVars: result = varexpand("$%s" % var, varDict) self.assertFalse( len(result), msg="Got %s == %s, from varexpand( %s, %s )" \ % ( result, var, "$%s" % var, varDict ) ) result = varexpand("${%s}" % var, varDict) self.assertFalse( len(result), msg="Got %s == %s, from varexpand( %s, %s )" \ % ( result, var, "${%s}" % var, varDict ) )
def testVarExpandBackslashes(self): r""" We want to behave like bash does when expanding a variable assignment in a sourced file, in which case it performs backslash removal for \\ and \$ but nothing more. It also removes escaped newline characters. Note that we don't handle escaped quotes here, since getconfig() uses shlex to handle that earlier. """ varDict = {} tests = [ ("\\", "\\"), ("\\\\", "\\"), ("\\\\\\", "\\\\"), ("\\\\\\\\", "\\\\"), ("\\$", "$"), ("\\\\$", "\\$"), ("\\a", "\\a"), ("\\b", "\\b"), ("\\n", "\\n"), ("\\r", "\\r"), ("\\t", "\\t"), ("\\\n", ""), ('\\"', '\\"'), ("\\'", "\\'"), ] for test in tests: result = varexpand(test[0], varDict) self.assertFalse( result != test[1], msg="Got %s != %s from varexpand(%s, %s)" % (result, test[1], test[0], varDict), )
def file_get(baseurl=None, dest=None, conn=None, fcmd=None, filename=None, fcmd_vars=None): """Takes a base url to connect to and read from. URI should be in the form <proto>://[user[:pass]@]<site>[:port]<path>""" if not fcmd: warnings.warn( "Use of portage.getbinpkg.file_get() without the fcmd " "parameter is deprecated", DeprecationWarning, stacklevel=2, ) return file_get_lib(baseurl, dest, conn) variables = {} if fcmd_vars is not None: variables.update(fcmd_vars) if "DISTDIR" not in variables: if dest is None: raise portage.exception.MissingParameter( _("%s is missing required '%s' key") % ("fcmd_vars", "DISTDIR")) variables["DISTDIR"] = dest if "URI" not in variables: if baseurl is None: raise portage.exception.MissingParameter( _("%s is missing required '%s' key") % ("fcmd_vars", "URI")) variables["URI"] = baseurl if "FILE" not in variables: if filename is None: filename = os.path.basename(variables["URI"]) variables["FILE"] = filename from portage.util import varexpand from portage.process import spawn myfetch = portage.util.shlex_split(fcmd) myfetch = [varexpand(x, mydict=variables) for x in myfetch] fd_pipes = { 0: portage._get_stdin().fileno(), 1: sys.__stdout__.fileno(), 2: sys.__stdout__.fileno(), } sys.__stdout__.flush() sys.__stderr__.flush() retval = spawn(myfetch, env=os.environ.copy(), fd_pipes=fd_pipes) if retval != os.EX_OK: sys.stderr.write(_("Fetcher exited with a failure condition.\n")) return 0 return 1
def testVarExpandFail(self): varDict = { "a":"5", "b":"7", "c":"15" } testVars = [ "fail" ] for var in testVars: result = varexpand( "$%s" % var, varDict ) self.assertFalse( len(result), msg="Got %s == %s, from varexpand( %s, %s )" \ % ( result, var, "$%s" % var, varDict ) ) result = varexpand( "${%s}" % var, varDict ) self.assertFalse( len(result), msg="Got %s == %s, from varexpand( %s, %s )" \ % ( result, var, "${%s}" % var, varDict ) )
def testVarExpandSingleQuotes(self): varDict = {"a": "5"} tests = [("\'${a}\'", "\'${a}\'")] for test in tests: result = varexpand(test[0], varDict) self.assertFalse( result != test[1], msg="Got %s != %s from varexpand( %s, %s )" \ % ( result, test[1], test[0], varDict ) )
def testVarExpandDoubleQuotes(self): varDict = {"a": "5"} tests = [("\"${a}\"", "\"5\"")] for test in tests: result = varexpand(test[0], varDict) self.failIf( result != test[1], msg="Got %s != %s from varexpand( %s, %s )" \ % ( result, test[1], test[0], varDict ) )
def testVarExpandSingleQuotes(self): varDict = { "a":"5" } tests = [ ("\'${a}\'", "\'${a}\'") ] for test in tests: result = varexpand( test[0], varDict ) self.assertFalse( result != test[1], msg="Got %s != %s from varexpand( %s, %s )" \ % ( result, test[1], test[0], varDict ) )
def testVarExpandDoubleQuotes(self): varDict = { "a":"5" } tests = [ ("\"${a}\"", "\"5\"") ] for test in tests: result = varexpand( test[0], varDict ) self.failIf( result != test[1], msg="Got %s != %s from varexpand( %s, %s )" \ % ( result, test[1], test[0], varDict ) )
def _start_gpg_proc(self): gpg_vars = self.gpg_vars if gpg_vars is None: gpg_vars = {} else: gpg_vars = gpg_vars.copy() gpg_vars["FILE"] = self._manifest_path gpg_cmd = varexpand(self.gpg_cmd, mydict=gpg_vars) gpg_cmd = shlex_split(gpg_cmd) gpg_proc = PopenProcess(proc=subprocess.Popen(gpg_cmd)) self._start_task(gpg_proc, self._gpg_proc_exit)
def testVarExpandDoubleQuotes(self): varDict = {"a": "5"} tests = [('"${a}"', '"5"')] for test in tests: result = varexpand(test[0], varDict) self.assertFalse( result != test[1], msg="Got %s != %s from varexpand(%s, %s)" % (result, test[1], test[0], varDict), )
def read_config(mandatory_opts): eprefix = portage.settings["EPREFIX"] if portage._not_installed: config_path = os.path.join(portage.PORTAGE_BASE_PATH, "cnf", "dispatch-conf.conf") else: config_path = os.path.join(eprefix or os.sep, "etc/dispatch-conf.conf") loader = KeyValuePairFileLoader(config_path, None) opts, _errors = loader.load() if not opts: print( _("dispatch-conf: Error reading {}; fatal").format(config_path), file=sys.stderr, ) sys.exit(1) # Handle quote removal here, since KeyValuePairFileLoader doesn't do that. quotes = "\"'" for k, v in opts.items(): if v[:1] in quotes and v[:1] == v[-1:]: opts[k] = v[1:-1] for key in mandatory_opts: if key not in opts: if key == "merge": opts[ "merge"] = "sdiff --suppress-common-lines --output='%s' '%s' '%s'" else: print( _('dispatch-conf: Missing option "%s" in /etc/dispatch-conf.conf; fatal' ) % (key, ), file=sys.stderr, ) # archive-dir supports ${EPREFIX} expansion, in order to avoid hardcoding variables = {"EPREFIX": eprefix} opts["archive-dir"] = varexpand(opts["archive-dir"], mydict=variables) if not os.path.exists(opts["archive-dir"]): os.mkdir(opts["archive-dir"]) # Use restrictive permissions by default, in order to protect # against vulnerabilities (like bug #315603 involving rcs). os.chmod(opts["archive-dir"], 0o700) elif not os.path.isdir(opts["archive-dir"]): print( _("dispatch-conf: Config archive dir [%s] must exist; fatal") % (opts["archive-dir"], ), file=sys.stderr, ) sys.exit(1) return opts
def file_get(baseurl=None, dest=None, conn=None, fcmd=None, filename=None, fcmd_vars=None): """Takes a base url to connect to and read from. URI should be in the form <proto>://[user[:pass]@]<site>[:port]<path>""" if not fcmd: warnings.warn("Use of portage.getbinpkg.file_get() without the fcmd " "parameter is deprecated", DeprecationWarning, stacklevel=2) return file_get_lib(baseurl, dest, conn) variables = {} if fcmd_vars is not None: variables.update(fcmd_vars) if "DISTDIR" not in variables: if dest is None: raise portage.exception.MissingParameter( _("%s is missing required '%s' key") % ("fcmd_vars", "DISTDIR")) variables["DISTDIR"] = dest if "URI" not in variables: if baseurl is None: raise portage.exception.MissingParameter( _("%s is missing required '%s' key") % ("fcmd_vars", "URI")) variables["URI"] = baseurl if "FILE" not in variables: if filename is None: filename = os.path.basename(variables["URI"]) variables["FILE"] = filename from portage.util import varexpand from portage.process import spawn myfetch = portage.util.shlex_split(fcmd) myfetch = [varexpand(x, mydict=variables) for x in myfetch] fd_pipes = { 0: portage._get_stdin().fileno(), 1: sys.__stdout__.fileno(), 2: sys.__stdout__.fileno() } sys.__stdout__.flush() sys.__stderr__.flush() retval = spawn(myfetch, env=os.environ.copy(), fd_pipes=fd_pipes) if retval != os.EX_OK: sys.stderr.write(_("Fetcher exited with a failure condition.\n")) return 0 return 1
def _clean_logs(clean_cmd, settings): logdir = settings.get("PORTAGE_LOGDIR") if logdir is None or not os.path.isdir(logdir): return 78 variables = {"PORTAGE_LOGDIR": logdir} cmd = [varexpand(x, mydict=variables) for x in clean_cmd] try: rval = portage.process.spawn(cmd, env=os.environ) except portage.exception.CommandNotFound: rval = 127 return rval
def _clean_logs(clean_cmd, settings): logdir = settings.get("PORT_LOGDIR") if logdir is None or not os.path.isdir(logdir): return 78 variables = {"PORT_LOGDIR" : logdir} cmd = [varexpand(x, mydict=variables) for x in clean_cmd] try: rval = portage.process.spawn(cmd, env=os.environ) except portage.exception.CommandNotFound: rval = 127 return rval
def add(self, entry): """ Add one NEEDED.ELF.2 entry, for inclusion in the generated REQUIRES and PROVIDES values. @param entry: NEEDED.ELF.2 entry @type entry: NeededEntry """ multilib_cat = entry.multilib_category if multilib_cat is None: # This usage is invalid. The caller must ensure that # the multilib category data is supplied here. raise AssertionError("Missing multilib category data: %s" % entry.filename) self._basename_map.setdefault(os.path.basename(entry.filename), []).append(entry) if entry.needed and (self._requires_exclude is None or self._requires_exclude.match( entry.filename.lstrip(os.sep)) is None): runpaths = frozenset() if entry.runpaths is not None: expand = {"ORIGIN": os.path.dirname(entry.filename)} runpaths = frozenset( normalize_path( varexpand( x, expand, error_leader=lambda: "%s: DT_RUNPATH: " % entry. filename, )) for x in entry.runpaths) for x in entry.needed: if (self._requires_exclude is None or self._requires_exclude.match(x) is None): self._requires_map[multilib_cat][x].add(runpaths) if entry.soname: self._provides_unfiltered.setdefault(multilib_cat, set()).add(entry.soname) if entry.soname and ( self._provides_exclude is None or (self._provides_exclude.match(entry.filename.lstrip(os.sep)) is None and self._provides_exclude.match(entry.soname) is None)): self._provides_map.setdefault(multilib_cat, set()).add(entry.soname)
def _start_gpg_proc(self): gpg_vars = self.gpg_vars if gpg_vars is None: gpg_vars = {} else: gpg_vars = gpg_vars.copy() gpg_vars["FILE"] = self._manifest_path gpg_cmd = varexpand(self.gpg_cmd, mydict=gpg_vars) gpg_cmd = shlex_split(gpg_cmd) gpg_proc = PopenProcess(proc=subprocess.Popen(gpg_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)) # PipeLogger echos output and efficiently monitors for process # exit by listening for the stdout EOF event. gpg_proc.pipe_reader = PipeLogger(background=self.background, input_fd=gpg_proc.proc.stdout, scheduler=self.scheduler) self._start_task(gpg_proc, self._gpg_proc_exit)
def add(self, entry): """ Add one NEEDED.ELF.2 entry, for inclusion in the generated REQUIRES and PROVIDES values. @param entry: NEEDED.ELF.2 entry @type entry: NeededEntry """ multilib_cat = entry.multilib_category if multilib_cat is None: # This usage is invalid. The caller must ensure that # the multilib category data is supplied here. raise AssertionError( "Missing multilib category data: %s" % entry.filename) self._basename_map.setdefault( os.path.basename(entry.filename), []).append(entry) if entry.needed and ( self._requires_exclude is None or self._requires_exclude.match( entry.filename.lstrip(os.sep)) is None): runpaths = frozenset() if entry.runpaths is not None: expand = {"ORIGIN": os.path.dirname(entry.filename)} runpaths = frozenset(normalize_path(varexpand(x, expand, error_leader=lambda: "%s: DT_RUNPATH: " % entry.filename)) for x in entry.runpaths) for x in entry.needed: if (self._requires_exclude is None or self._requires_exclude.match(x) is None): self._requires_map[multilib_cat][x].add(runpaths) if entry.soname: self._provides_unfiltered.setdefault( multilib_cat, set()).add(entry.soname) if entry.soname and ( self._provides_exclude is None or (self._provides_exclude.match( entry.filename.lstrip(os.sep)) is None and self._provides_exclude.match(entry.soname) is None)): self._provides_map.setdefault( multilib_cat, set()).add(entry.soname)
def read_config(mandatory_opts): eprefix = portage.settings["EPREFIX"] if portage._not_installed: config_path = os.path.join(portage.PORTAGE_BASE_PATH, "cnf", "dispatch-conf.conf") else: config_path = os.path.join(eprefix or os.sep, "etc/dispatch-conf.conf") loader = KeyValuePairFileLoader(config_path, None) opts, _errors = loader.load() if not opts: print(_('dispatch-conf: Error reading /etc/dispatch-conf.conf; fatal'), file=sys.stderr) sys.exit(1) # Handle quote removal here, since KeyValuePairFileLoader doesn't do that. quotes = "\"'" for k, v in opts.items(): if v[:1] in quotes and v[:1] == v[-1:]: opts[k] = v[1:-1] for key in mandatory_opts: if key not in opts: if key == "merge": opts["merge"] = "sdiff --suppress-common-lines --output='%s' '%s' '%s'" else: print(_('dispatch-conf: Missing option "%s" in /etc/dispatch-conf.conf; fatal') % (key,), file=sys.stderr) # archive-dir supports ${EPREFIX} expansion, in order to avoid hardcoding variables = {"EPREFIX": eprefix} opts['archive-dir'] = varexpand(opts['archive-dir'], mydict=variables) if not os.path.exists(opts['archive-dir']): os.mkdir(opts['archive-dir']) # Use restrictive permissions by default, in order to protect # against vulnerabilities (like bug #315603 involving rcs). os.chmod(opts['archive-dir'], 0o700) elif not os.path.isdir(opts['archive-dir']): print(_('dispatch-conf: Config archive dir [%s] must exist; fatal') % (opts['archive-dir'],), file=sys.stderr) sys.exit(1) return opts
def ionice(settings): ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND") if ionice_cmd: ionice_cmd = portage.util.shlex_split(ionice_cmd) if not ionice_cmd: return from portage.util import varexpand variables = {"PID" : str(os.getpid())} cmd = [varexpand(x, mydict=variables) for x in ionice_cmd] try: rval = portage.process.spawn(cmd, env=os.environ) except portage.exception.CommandNotFound: # The OS kernel probably doesn't support ionice, # so return silently. return if rval != os.EX_OK: out = portage.output.EOutput() out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,)) out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
def rebuild(self, exclude_pkgs=None, include_file=None, preserve_paths=None): """ Raises CommandNotFound if there are preserved libs and the scanelf binary is not available. @param exclude_pkgs: A set of packages that should be excluded from the LinkageMap, since they are being unmerged and their NEEDED entries are therefore irrelevant and would only serve to corrupt the LinkageMap. @type exclude_pkgs: set @param include_file: The path of a file containing NEEDED entries for a package which does not exist in the vardbapi yet because it is currently being merged. @type include_file: String @param preserve_paths: Libraries preserved by a package instance that is currently being merged. They need to be explicitly passed to the LinkageMap, since they are not registered in the PreservedLibsRegistry yet. @type preserve_paths: set """ os = _os_merge root = self._root root_len = len(root) - 1 self._clear_cache() self._defpath.update(getlibpaths(self._root, env=self._dbapi.settings)) libs = self._libs obj_properties = self._obj_properties lines = [] # Data from include_file is processed first so that it # overrides any data from previously installed files. if include_file is not None: for line in grabfile(include_file): lines.append((None, include_file, line)) aux_keys = [self._needed_aux_key] can_lock = os.access(os.path.dirname(self._dbapi._dbroot), os.W_OK) if can_lock: self._dbapi.lock() try: for cpv in self._dbapi.cpv_all(): if exclude_pkgs is not None and cpv in exclude_pkgs: continue needed_file = self._dbapi.getpath(cpv, filename=self._needed_aux_key) for line in self._dbapi.aux_get(cpv, aux_keys)[0].splitlines(): lines.append((cpv, needed_file, line)) finally: if can_lock: self._dbapi.unlock() # have to call scanelf for preserved libs here as they aren't # registered in NEEDED.ELF.2 files plibs = {} if preserve_paths is not None: plibs.update((x, None) for x in preserve_paths) if self._dbapi._plib_registry and \ self._dbapi._plib_registry.hasEntries(): for cpv, items in \ self._dbapi._plib_registry.getPreservedLibs().items(): if exclude_pkgs is not None and cpv in exclude_pkgs: # These preserved libs will either be unmerged, # rendering them irrelevant, or they will be # preserved in the replacement package and are # already represented via the preserve_paths # parameter. continue plibs.update((x, cpv) for x in items) if plibs: args = [os.path.join(EPREFIX or "/", "usr/bin/scanelf"), "-qF", "%a;%F;%S;%r;%n"] args.extend(os.path.join(root, x.lstrip("." + os.sep)) \ for x in plibs) try: proc = subprocess.Popen(args, stdout=subprocess.PIPE) except EnvironmentError as e: if e.errno != errno.ENOENT: raise raise CommandNotFound(args[0]) else: for l in proc.stdout: try: l = _unicode_decode(l, encoding=_encodings['content'], errors='strict') except UnicodeDecodeError: l = _unicode_decode(l, encoding=_encodings['content'], errors='replace') writemsg_level(_("\nError decoding characters " \ "returned from scanelf: %s\n\n") % (l,), level=logging.ERROR, noiselevel=-1) l = l[3:].rstrip("\n") if not l: continue fields = l.split(";") if len(fields) < 5: writemsg_level(_("\nWrong number of fields " \ "returned from scanelf: %s\n\n") % (l,), level=logging.ERROR, noiselevel=-1) continue fields[1] = fields[1][root_len:] owner = plibs.pop(fields[1], None) lines.append((owner, "scanelf", ";".join(fields))) proc.wait() proc.stdout.close() if plibs: # Preserved libraries that did not appear in the scanelf output. # This is known to happen with statically linked libraries. # Generate dummy lines for these, so we can assume that every # preserved library has an entry in self._obj_properties. This # is important in order to prevent findConsumers from raising # an unwanted KeyError. for x, cpv in plibs.items(): lines.append((cpv, "plibs", ";".join(['', x, '', '', '']))) # Share identical frozenset instances when available, # in order to conserve memory. frozensets = {} for owner, location, l in lines: l = l.rstrip("\n") if not l: continue if '\0' in l: # os.stat() will raise "TypeError: must be encoded string # without NULL bytes, not str" in this case. writemsg_level(_("\nLine contains null byte(s) " \ "in %s: %s\n\n") % (location, l), level=logging.ERROR, noiselevel=-1) continue try: entry = NeededEntry.parse(location, l) except InvalidData as e: writemsg_level("\n%s\n\n" % (e,), level=logging.ERROR, noiselevel=-1) continue # If NEEDED.ELF.2 contains the new multilib category field, # then use that for categorization. Otherwise, if a mapping # exists, map e_machine (entry.arch) to an approximate # multilib category. If all else fails, use e_machine, just # as older versions of portage did. arch = entry.multilib_category if arch is None: arch = _approx_multilib_categories.get( entry.arch, entry.arch) obj = entry.filename soname = entry.soname expand = {"ORIGIN": os.path.dirname(entry.filename)} path = frozenset(normalize_path( varexpand(x, expand, error_leader=lambda: "%s: " % location)) for x in entry.runpaths) path = frozensets.setdefault(path, path) needed = frozenset(entry.needed) needed = frozensets.setdefault(needed, needed) obj_key = self._obj_key(obj) indexed = True myprops = obj_properties.get(obj_key) if myprops is None: indexed = False myprops = self._obj_properties_class( arch, needed, path, soname, [], owner) obj_properties[obj_key] = myprops # All object paths are added into the obj_properties tuple. myprops.alt_paths.append(obj) # Don't index the same file more that once since only one # set of data can be correct and therefore mixing data # may corrupt the index (include_file overrides previously # installed). if indexed: continue arch_map = libs.get(arch) if arch_map is None: arch_map = {} libs[arch] = arch_map if soname: soname_map = arch_map.get(soname) if soname_map is None: soname_map = self._soname_map_class( providers=[], consumers=[]) arch_map[soname] = soname_map soname_map.providers.append(obj_key) for needed_soname in needed: soname_map = arch_map.get(needed_soname) if soname_map is None: soname_map = self._soname_map_class( providers=[], consumers=[]) arch_map[needed_soname] = soname_map soname_map.consumers.append(obj_key) for arch, sonames in libs.items(): for soname_node in sonames.values(): soname_node.providers = tuple(set(soname_node.providers)) soname_node.consumers = tuple(set(soname_node.consumers))
def rebuild(self, exclude_pkgs=None, include_file=None, preserve_paths=None): """ Raises CommandNotFound if there are preserved libs and the scanelf binary is not available. @param exclude_pkgs: A set of packages that should be excluded from the LinkageMap, since they are being unmerged and their NEEDED entries are therefore irrelevant and would only serve to corrupt the LinkageMap. @type exclude_pkgs: set @param include_file: The path of a file containing NEEDED entries for a package which does not exist in the vardbapi yet because it is currently being merged. @type include_file: String @param preserve_paths: Libraries preserved by a package instance that is currently being merged. They need to be explicitly passed to the LinkageMap, since they are not registered in the PreservedLibsRegistry yet. @type preserve_paths: set """ os = _os_merge root = self._root root_len = len(root) - 1 self._clear_cache() self._defpath.update(getlibpaths(self._dbapi.settings['EROOT'], env=self._dbapi.settings)) libs = self._libs obj_properties = self._obj_properties lines = [] # Data from include_file is processed first so that it # overrides any data from previously installed files. if include_file is not None: for line in grabfile(include_file): lines.append((None, include_file, line)) aux_keys = [self._needed_aux_key] can_lock = os.access(os.path.dirname(self._dbapi._dbroot), os.W_OK) if can_lock: self._dbapi.lock() try: for cpv in self._dbapi.cpv_all(): if exclude_pkgs is not None and cpv in exclude_pkgs: continue needed_file = self._dbapi.getpath(cpv, filename=self._needed_aux_key) for line in self._dbapi.aux_get(cpv, aux_keys)[0].splitlines(): lines.append((cpv, needed_file, line)) finally: if can_lock: self._dbapi.unlock() # have to call scanelf for preserved libs here as they aren't # registered in NEEDED.ELF.2 files plibs = {} if preserve_paths is not None: plibs.update((x, None) for x in preserve_paths) if self._dbapi._plib_registry and \ self._dbapi._plib_registry.hasEntries(): for cpv, items in \ self._dbapi._plib_registry.getPreservedLibs().items(): if exclude_pkgs is not None and cpv in exclude_pkgs: # These preserved libs will either be unmerged, # rendering them irrelevant, or they will be # preserved in the replacement package and are # already represented via the preserve_paths # parameter. continue plibs.update((x, cpv) for x in items) if plibs: args = [os.path.join(EPREFIX or "/", "usr/bin/scanelf"), "-qF", "%a;%F;%S;%r;%n"] args.extend(os.path.join(root, x.lstrip("." + os.sep)) \ for x in plibs) try: proc = subprocess.Popen(args, stdout=subprocess.PIPE) except EnvironmentError as e: if e.errno != errno.ENOENT: raise raise CommandNotFound(args[0]) else: for l in proc.stdout: try: l = _unicode_decode(l, encoding=_encodings['content'], errors='strict') except UnicodeDecodeError: l = _unicode_decode(l, encoding=_encodings['content'], errors='replace') writemsg_level(_("\nError decoding characters " \ "returned from scanelf: %s\n\n") % (l,), level=logging.ERROR, noiselevel=-1) l = l[3:].rstrip("\n") if not l: continue try: entry = NeededEntry.parse("scanelf", l) except InvalidData as e: writemsg_level("\n%s\n\n" % (e,), level=logging.ERROR, noiselevel=-1) continue try: with open(_unicode_encode(entry.filename, encoding=_encodings['fs'], errors='strict'), 'rb') as f: elf_header = ELFHeader.read(f) except EnvironmentError as e: if e.errno != errno.ENOENT: raise # File removed concurrently. continue entry.multilib_category = compute_multilib_category(elf_header) entry.filename = entry.filename[root_len:] owner = plibs.pop(entry.filename, None) lines.append((owner, "scanelf", _unicode(entry))) proc.wait() proc.stdout.close() if plibs: # Preserved libraries that did not appear in the scanelf output. # This is known to happen with statically linked libraries. # Generate dummy lines for these, so we can assume that every # preserved library has an entry in self._obj_properties. This # is important in order to prevent findConsumers from raising # an unwanted KeyError. for x, cpv in plibs.items(): lines.append((cpv, "plibs", ";".join(['', x, '', '', '']))) # Share identical frozenset instances when available, # in order to conserve memory. frozensets = {} for owner, location, l in lines: l = l.rstrip("\n") if not l: continue if '\0' in l: # os.stat() will raise "TypeError: must be encoded string # without NULL bytes, not str" in this case. writemsg_level(_("\nLine contains null byte(s) " \ "in %s: %s\n\n") % (location, l), level=logging.ERROR, noiselevel=-1) continue try: entry = NeededEntry.parse(location, l) except InvalidData as e: writemsg_level("\n%s\n\n" % (e,), level=logging.ERROR, noiselevel=-1) continue # If NEEDED.ELF.2 contains the new multilib category field, # then use that for categorization. Otherwise, if a mapping # exists, map e_machine (entry.arch) to an approximate # multilib category. If all else fails, use e_machine, just # as older versions of portage did. arch = entry.multilib_category if arch is None: arch = _approx_multilib_categories.get( entry.arch, entry.arch) obj = entry.filename soname = entry.soname expand = {"ORIGIN": os.path.dirname(entry.filename)} path = frozenset(normalize_path( varexpand(x, expand, error_leader=lambda: "%s: " % location)) for x in entry.runpaths) path = frozensets.setdefault(path, path) needed = frozenset(entry.needed) needed = frozensets.setdefault(needed, needed) obj_key = self._obj_key(obj) indexed = True myprops = obj_properties.get(obj_key) if myprops is None: indexed = False myprops = self._obj_properties_class( arch, needed, path, soname, [], owner) obj_properties[obj_key] = myprops # All object paths are added into the obj_properties tuple. myprops.alt_paths.append(obj) # Don't index the same file more that once since only one # set of data can be correct and therefore mixing data # may corrupt the index (include_file overrides previously # installed). if indexed: continue arch_map = libs.get(arch) if arch_map is None: arch_map = {} libs[arch] = arch_map if soname: soname_map = arch_map.get(soname) if soname_map is None: soname_map = self._soname_map_class( providers=[], consumers=[]) arch_map[soname] = soname_map soname_map.providers.append(obj_key) for needed_soname in needed: soname_map = arch_map.get(needed_soname) if soname_map is None: soname_map = self._soname_map_class( providers=[], consumers=[]) arch_map[needed_soname] = soname_map soname_map.consumers.append(obj_key) for arch, sonames in libs.items(): for soname_node in sonames.values(): soname_node.providers = tuple(set(soname_node.providers)) soname_node.consumers = tuple(set(soname_node.consumers))
def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks", use_locks=1, try_mirrors=1, digests=None, allow_missing_digests=True): "fetch files. Will use digest file if available." if not myuris: return 1 features = mysettings.features restrict = mysettings.get("PORTAGE_RESTRICT","").split() userfetch = secpass >= 2 and "userfetch" in features userpriv = secpass >= 2 and "userpriv" in features # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring. restrict_mirror = "mirror" in restrict or "nomirror" in restrict if restrict_mirror: if ("mirror" in features) and ("lmirror" not in features): # lmirror should allow you to bypass mirror restrictions. # XXX: This is not a good thing, and is temporary at best. print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch.")) return 1 # Generally, downloading the same file repeatedly from # every single available mirror is a waste of bandwidth # and time, so there needs to be a cap. checksum_failure_max_tries = 5 v = checksum_failure_max_tries try: v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS", checksum_failure_max_tries)) except (ValueError, OverflowError): writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS" " contains non-integer value: '%s'\n") % \ mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1) writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS " "default value: %s\n") % checksum_failure_max_tries, noiselevel=-1) v = checksum_failure_max_tries if v < 1: writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS" " contains value less than 1: '%s'\n") % v, noiselevel=-1) writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS " "default value: %s\n") % checksum_failure_max_tries, noiselevel=-1) v = checksum_failure_max_tries checksum_failure_max_tries = v del v fetch_resume_size_default = "350K" fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE") if fetch_resume_size is not None: fetch_resume_size = "".join(fetch_resume_size.split()) if not fetch_resume_size: # If it's undefined or empty, silently use the default. fetch_resume_size = fetch_resume_size_default match = _fetch_resume_size_re.match(fetch_resume_size) if match is None or \ (match.group(2).upper() not in _size_suffix_map): writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE" " contains an unrecognized format: '%s'\n") % \ mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1) writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE " "default value: %s\n") % fetch_resume_size_default, noiselevel=-1) fetch_resume_size = None if fetch_resume_size is None: fetch_resume_size = fetch_resume_size_default match = _fetch_resume_size_re.match(fetch_resume_size) fetch_resume_size = int(match.group(1)) * \ 2 ** _size_suffix_map[match.group(2).upper()] # Behave like the package has RESTRICT="primaryuri" after a # couple of checksum failures, to increase the probablility # of success before checksum_failure_max_tries is reached. checksum_failure_primaryuri = 2 thirdpartymirrors = mysettings.thirdpartymirrors() # In the background parallel-fetch process, it's safe to skip checksum # verification of pre-existing files in $DISTDIR that have the correct # file size. The parent process will verify their checksums prior to # the unpack phase. parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings if parallel_fetchonly: fetchonly = 1 check_config_instance(mysettings) custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"], CUSTOM_MIRRORS_FILE), recursive=1) mymirrors=[] if listonly or ("distlocks" not in features): use_locks = 0 fetch_to_ro = 0 if "skiprocheck" in features: fetch_to_ro = 1 if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro: if use_locks: writemsg(colorize("BAD", _("!!! For fetching to a read-only filesystem, " "locking should be turned off.\n")), noiselevel=-1) writemsg(_("!!! This can be done by adding -distlocks to " "FEATURES in /etc/make.conf\n"), noiselevel=-1) # use_locks = 0 # local mirrors are always added if "local" in custommirrors: mymirrors += custommirrors["local"] if restrict_mirror: # We don't add any mirrors. pass else: if try_mirrors: mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x] hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", "")) if hash_filter.transparent: hash_filter = None skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1" if skip_manifest: allow_missing_digests = True pkgdir = mysettings.get("O") if digests is None and not (pkgdir is None or skip_manifest): mydigests = mysettings.repositories.get_repo_for_location( os.path.dirname(os.path.dirname(pkgdir))).load_manifest( pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST") elif digests is None or skip_manifest: # no digests because fetch was not called for a specific package mydigests = {} else: mydigests = digests ro_distdirs = [x for x in \ shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \ if os.path.isdir(x)] fsmirrors = [] for x in range(len(mymirrors)-1,-1,-1): if mymirrors[x] and mymirrors[x][0]=='/': fsmirrors += [mymirrors[x]] del mymirrors[x] restrict_fetch = "fetch" in restrict force_mirror = "force-mirror" in features and not restrict_mirror custom_local_mirrors = custommirrors.get("local", []) if restrict_fetch: # With fetch restriction, a normal uri may only be fetched from # custom local mirrors (if available). A mirror:// uri may also # be fetched from specific mirrors (effectively overriding fetch # restriction, but only for specific mirrors). locations = custom_local_mirrors else: locations = mymirrors file_uri_tuples = [] # Check for 'items' attribute since OrderedDict is not a dict. if hasattr(myuris, 'items'): for myfile, uri_set in myuris.items(): for myuri in uri_set: file_uri_tuples.append((myfile, myuri)) else: for myuri in myuris: file_uri_tuples.append((os.path.basename(myuri), myuri)) filedict = OrderedDict() primaryuri_dict = {} thirdpartymirror_uris = {} for myfile, myuri in file_uri_tuples: if myfile not in filedict: filedict[myfile]=[] for y in range(0,len(locations)): filedict[myfile].append(locations[y]+"/distfiles/"+myfile) if myuri[:9]=="mirror://": eidx = myuri.find("/", 9) if eidx != -1: mirrorname = myuri[9:eidx] path = myuri[eidx+1:] # Try user-defined mirrors first if mirrorname in custommirrors: for cmirr in custommirrors[mirrorname]: filedict[myfile].append( cmirr.rstrip("/") + "/" + path) # now try the official mirrors if mirrorname in thirdpartymirrors: uris = [locmirr.rstrip("/") + "/" + path \ for locmirr in thirdpartymirrors[mirrorname]] random.shuffle(uris) filedict[myfile].extend(uris) thirdpartymirror_uris.setdefault(myfile, []).extend(uris) if not filedict[myfile]: writemsg(_("No known mirror by the name: %s\n") % (mirrorname)) else: writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1) writemsg(" %s\n" % (myuri), noiselevel=-1) else: if restrict_fetch or force_mirror: # Only fetch from specific mirrors is allowed. continue primaryuris = primaryuri_dict.get(myfile) if primaryuris is None: primaryuris = [] primaryuri_dict[myfile] = primaryuris primaryuris.append(myuri) # Order primaryuri_dict values to match that in SRC_URI. for uris in primaryuri_dict.values(): uris.reverse() # Prefer thirdpartymirrors over normal mirrors in cases when # the file does not yet exist on the normal mirrors. for myfile, uris in thirdpartymirror_uris.items(): primaryuri_dict.setdefault(myfile, []).extend(uris) # Now merge primaryuri values into filedict (includes mirrors # explicitly referenced in SRC_URI). if "primaryuri" in restrict: for myfile, uris in filedict.items(): filedict[myfile] = primaryuri_dict.get(myfile, []) + uris else: for myfile in filedict: filedict[myfile] += primaryuri_dict.get(myfile, []) can_fetch=True if listonly: can_fetch = False if can_fetch and not fetch_to_ro: global _userpriv_test_write_file_cache dirmode = 0o070 filemode = 0o60 modemask = 0o2 dir_gid = portage_gid if "FAKED_MODE" in mysettings: # When inside fakeroot, directories with portage's gid appear # to have root's gid. Therefore, use root's gid instead of # portage's gid to avoid spurrious permissions adjustments # when inside fakeroot. dir_gid = 0 distdir_dirs = [""] try: for x in distdir_dirs: mydir = os.path.join(mysettings["DISTDIR"], x) write_test_file = os.path.join( mydir, ".__portage_test_write__") try: st = os.stat(mydir) except OSError: st = None if st is not None and stat.S_ISDIR(st.st_mode): if not (userfetch or userpriv): continue if _userpriv_test_write_file(mysettings, write_test_file): continue _userpriv_test_write_file_cache.pop(write_test_file, None) if ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask): if st is None: # The directory has just been created # and therefore it must be empty. continue writemsg(_("Adjusting permissions recursively: '%s'\n") % mydir, noiselevel=-1) def onerror(e): raise # bail out on the first error that occurs during recursion if not apply_recursive_permissions(mydir, gid=dir_gid, dirmode=dirmode, dirmask=modemask, filemode=filemode, filemask=modemask, onerror=onerror): raise OperationNotPermitted( _("Failed to apply recursive permissions for the portage group.")) except PortageException as e: if not os.path.isdir(mysettings["DISTDIR"]): writemsg("!!! %s\n" % str(e), noiselevel=-1) writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1) writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1) if can_fetch and \ not fetch_to_ro and \ not os.access(mysettings["DISTDIR"], os.W_OK): writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"], noiselevel=-1) can_fetch = False distdir_writable = can_fetch and not fetch_to_ro failed_files = set() restrict_fetch_msg = False for myfile in filedict: """ fetched status 0 nonexistent 1 partially downloaded 2 completely downloaded """ fetched = 0 orig_digests = mydigests.get(myfile, {}) if not (allow_missing_digests or listonly): verifiable_hash_types = set(orig_digests).intersection(hashfunc_map) verifiable_hash_types.discard("size") if not verifiable_hash_types: expected = set(hashfunc_map) expected.discard("size") expected = " ".join(sorted(expected)) got = set(orig_digests) got.discard("size") got = " ".join(sorted(got)) reason = (_("Insufficient data for checksum verification"), got, expected) writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile, noiselevel=-1) writemsg(_("!!! Reason: %s\n") % reason[0], noiselevel=-1) writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \ (reason[1], reason[2]), noiselevel=-1) if fetchonly: failed_files.add(myfile) continue else: return 0 size = orig_digests.get("size") if size == 0: # Zero-byte distfiles are always invalid, so discard their digests. del mydigests[myfile] orig_digests.clear() size = None pruned_digests = orig_digests if parallel_fetchonly: pruned_digests = {} if size is not None: pruned_digests["size"] = size myfile_path = os.path.join(mysettings["DISTDIR"], myfile) has_space = True has_space_superuser = True file_lock = None if listonly: writemsg_stdout("\n", noiselevel=-1) else: # check if there is enough space in DISTDIR to completely store myfile # overestimate the filesize so we aren't bitten by FS overhead vfs_stat = None if size is not None and hasattr(os, "statvfs"): try: vfs_stat = os.statvfs(mysettings["DISTDIR"]) except OSError as e: writemsg_level("!!! statvfs('%s'): %s\n" % (mysettings["DISTDIR"], e), noiselevel=-1, level=logging.ERROR) del e if vfs_stat is not None: try: mysize = os.stat(myfile_path).st_size except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e mysize = 0 if (size - mysize + vfs_stat.f_bsize) >= \ (vfs_stat.f_bsize * vfs_stat.f_bavail): if (size - mysize + vfs_stat.f_bsize) >= \ (vfs_stat.f_bsize * vfs_stat.f_bfree): has_space_superuser = False if not has_space_superuser: has_space = False elif secpass < 2: has_space = False elif userfetch: has_space = False if distdir_writable and use_locks: lock_kwargs = {} if fetchonly: lock_kwargs["flags"] = os.O_NONBLOCK try: file_lock = lockfile(myfile_path, wantnewlockfile=1, **lock_kwargs) except TryAgain: writemsg(_(">>> File '%s' is already locked by " "another fetcher. Continuing...\n") % myfile, noiselevel=-1) continue try: if not listonly: eout = EOutput() eout.quiet = mysettings.get("PORTAGE_QUIET") == "1" match, mystat = _check_distfile( myfile_path, pruned_digests, eout, hash_filter=hash_filter) if match: # Skip permission adjustment for symlinks, since we don't # want to modify anything outside of the primary DISTDIR, # and symlinks typically point to PORTAGE_RO_DISTDIRS. if distdir_writable and not os.path.islink(myfile_path): try: apply_secpass_permissions(myfile_path, gid=portage_gid, mode=0o664, mask=0o2, stat_cached=mystat) except PortageException as e: if not os.access(myfile_path, os.R_OK): writemsg(_("!!! Failed to adjust permissions:" " %s\n") % str(e), noiselevel=-1) del e continue if distdir_writable and mystat is None: # Remove broken symlinks if necessary. try: os.unlink(myfile_path) except OSError: pass if mystat is not None: if stat.S_ISDIR(mystat.st_mode): writemsg_level( _("!!! Unable to fetch file since " "a directory is in the way: \n" "!!! %s\n") % myfile_path, level=logging.ERROR, noiselevel=-1) return 0 if mystat.st_size == 0: if distdir_writable: try: os.unlink(myfile_path) except OSError: pass elif distdir_writable: if mystat.st_size < fetch_resume_size and \ mystat.st_size < size: # If the file already exists and the size does not # match the existing digests, it may be that the # user is attempting to update the digest. In this # case, the digestgen() function will advise the # user to use `ebuild --force foo.ebuild manifest` # in order to force the old digests to be replaced. # Since the user may want to keep this file, rename # it instead of deleting it. writemsg(_(">>> Renaming distfile with size " "%d (smaller than " "PORTAGE_FETCH_RESU" "ME_MIN_SIZE)\n") % mystat.st_size) temp_filename = \ _checksum_failure_temp_file( mysettings["DISTDIR"], myfile) writemsg_stdout(_("Refetching... " "File renamed to '%s'\n\n") % \ temp_filename, noiselevel=-1) elif mystat.st_size >= size: temp_filename = \ _checksum_failure_temp_file( mysettings["DISTDIR"], myfile) writemsg_stdout(_("Refetching... " "File renamed to '%s'\n\n") % \ temp_filename, noiselevel=-1) if distdir_writable and ro_distdirs: readonly_file = None for x in ro_distdirs: filename = os.path.join(x, myfile) match, mystat = _check_distfile( filename, pruned_digests, eout, hash_filter=hash_filter) if match: readonly_file = filename break if readonly_file is not None: try: os.unlink(myfile_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e os.symlink(readonly_file, myfile_path) continue # this message is shown only after we know that # the file is not already fetched if not has_space: writemsg(_("!!! Insufficient space to store %s in %s\n") % \ (myfile, mysettings["DISTDIR"]), noiselevel=-1) if has_space_superuser: writemsg(_("!!! Insufficient privileges to use " "remaining space.\n"), noiselevel=-1) if userfetch: writemsg(_("!!! You may set FEATURES=\"-userfetch\"" " in /etc/make.conf in order to fetch with\n" "!!! superuser privileges.\n"), noiselevel=-1) if fsmirrors and not os.path.exists(myfile_path) and has_space: for mydir in fsmirrors: mirror_file = os.path.join(mydir, myfile) try: shutil.copyfile(mirror_file, myfile_path) writemsg(_("Local mirror has file: %s\n") % myfile) break except (IOError, OSError) as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e try: mystat = os.stat(myfile_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e else: # Skip permission adjustment for symlinks, since we don't # want to modify anything outside of the primary DISTDIR, # and symlinks typically point to PORTAGE_RO_DISTDIRS. if not os.path.islink(myfile_path): try: apply_secpass_permissions(myfile_path, gid=portage_gid, mode=0o664, mask=0o2, stat_cached=mystat) except PortageException as e: if not os.access(myfile_path, os.R_OK): writemsg(_("!!! Failed to adjust permissions:" " %s\n") % (e,), noiselevel=-1) # If the file is empty then it's obviously invalid. Remove # the empty file and try to download if possible. if mystat.st_size == 0: if distdir_writable: try: os.unlink(myfile_path) except EnvironmentError: pass elif myfile not in mydigests: # We don't have a digest, but the file exists. We must # assume that it is fully downloaded. continue else: if mystat.st_size < mydigests[myfile]["size"] and \ not restrict_fetch: fetched = 1 # Try to resume this download. elif parallel_fetchonly and \ mystat.st_size == mydigests[myfile]["size"]: eout = EOutput() eout.quiet = \ mysettings.get("PORTAGE_QUIET") == "1" eout.ebegin( "%s size ;-)" % (myfile, )) eout.eend(0) continue else: digests = _filter_unaccelarated_hashes(mydigests[myfile]) if hash_filter is not None: digests = _apply_hash_filter(digests, hash_filter) verified_ok, reason = verify_all(myfile_path, digests) if not verified_ok: writemsg(_("!!! Previously fetched" " file: '%s'\n") % myfile, noiselevel=-1) writemsg(_("!!! Reason: %s\n") % reason[0], noiselevel=-1) writemsg(_("!!! Got: %s\n" "!!! Expected: %s\n") % \ (reason[1], reason[2]), noiselevel=-1) if reason[0] == _("Insufficient data for checksum verification"): return 0 if distdir_writable: temp_filename = \ _checksum_failure_temp_file( mysettings["DISTDIR"], myfile) writemsg_stdout(_("Refetching... " "File renamed to '%s'\n\n") % \ temp_filename, noiselevel=-1) else: eout = EOutput() eout.quiet = \ mysettings.get("PORTAGE_QUIET", None) == "1" if digests: digests = list(digests) digests.sort() eout.ebegin( "%s %s ;-)" % (myfile, " ".join(digests))) eout.eend(0) continue # fetch any remaining files # Create a reversed list since that is optimal for list.pop(). uri_list = filedict[myfile][:] uri_list.reverse() checksum_failure_count = 0 tried_locations = set() while uri_list: loc = uri_list.pop() # Eliminate duplicates here in case we've switched to # "primaryuri" mode on the fly due to a checksum failure. if loc in tried_locations: continue tried_locations.add(loc) if listonly: writemsg_stdout(loc+" ", noiselevel=-1) continue # allow different fetchcommands per protocol protocol = loc[0:loc.find("://")] global_config_path = GLOBAL_CONFIG_PATH if mysettings['EPREFIX']: global_config_path = os.path.join(mysettings['EPREFIX'], GLOBAL_CONFIG_PATH.lstrip(os.sep)) missing_file_param = False fetchcommand_var = "FETCHCOMMAND_" + protocol.upper() fetchcommand = mysettings.get(fetchcommand_var) if fetchcommand is None: fetchcommand_var = "FETCHCOMMAND" fetchcommand = mysettings.get(fetchcommand_var) if fetchcommand is None: writemsg_level( _("!!! %s is unset. It should " "have been defined in\n!!! %s/make.globals.\n") \ % (fetchcommand_var, global_config_path), level=logging.ERROR, noiselevel=-1) return 0 if "${FILE}" not in fetchcommand: writemsg_level( _("!!! %s does not contain the required ${FILE}" " parameter.\n") % fetchcommand_var, level=logging.ERROR, noiselevel=-1) missing_file_param = True resumecommand_var = "RESUMECOMMAND_" + protocol.upper() resumecommand = mysettings.get(resumecommand_var) if resumecommand is None: resumecommand_var = "RESUMECOMMAND" resumecommand = mysettings.get(resumecommand_var) if resumecommand is None: writemsg_level( _("!!! %s is unset. It should " "have been defined in\n!!! %s/make.globals.\n") \ % (resumecommand_var, global_config_path), level=logging.ERROR, noiselevel=-1) return 0 if "${FILE}" not in resumecommand: writemsg_level( _("!!! %s does not contain the required ${FILE}" " parameter.\n") % resumecommand_var, level=logging.ERROR, noiselevel=-1) missing_file_param = True if missing_file_param: writemsg_level( _("!!! Refer to the make.conf(5) man page for " "information about how to\n!!! correctly specify " "FETCHCOMMAND and RESUMECOMMAND.\n"), level=logging.ERROR, noiselevel=-1) if myfile != os.path.basename(loc): return 0 if not can_fetch: if fetched != 2: try: mysize = os.stat(myfile_path).st_size except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e mysize = 0 if mysize == 0: writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile, noiselevel=-1) elif size is None or size > mysize: writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile, noiselevel=-1) else: writemsg(_("!!! File %s is incorrect size, " "but unable to retry.\n") % myfile, noiselevel=-1) return 0 else: continue if fetched != 2 and has_space: #we either need to resume or start the download if fetched == 1: try: mystat = os.stat(myfile_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e fetched = 0 else: if mystat.st_size < fetch_resume_size: writemsg(_(">>> Deleting distfile with size " "%d (smaller than " "PORTAGE_FETCH_RESU" "ME_MIN_SIZE)\n") % mystat.st_size) try: os.unlink(myfile_path) except OSError as e: if e.errno not in \ (errno.ENOENT, errno.ESTALE): raise del e fetched = 0 if fetched == 1: #resume mode: writemsg(_(">>> Resuming download...\n")) locfetch=resumecommand command_var = resumecommand_var else: #normal mode: locfetch=fetchcommand command_var = fetchcommand_var writemsg_stdout(_(">>> Downloading '%s'\n") % \ _hide_url_passwd(loc)) variables = { "DISTDIR": mysettings["DISTDIR"], "URI": loc, "FILE": myfile } myfetch = shlex_split(locfetch) myfetch = [varexpand(x, mydict=variables) for x in myfetch] myret = -1 try: myret = _spawn_fetch(mysettings, myfetch) finally: try: apply_secpass_permissions(myfile_path, gid=portage_gid, mode=0o664, mask=0o2) except FileNotFound: pass except PortageException as e: if not os.access(myfile_path, os.R_OK): writemsg(_("!!! Failed to adjust permissions:" " %s\n") % str(e), noiselevel=-1) del e # If the file is empty then it's obviously invalid. Don't # trust the return value from the fetcher. Remove the # empty file and try to download again. try: if os.stat(myfile_path).st_size == 0: os.unlink(myfile_path) fetched = 0 continue except EnvironmentError: pass if mydigests is not None and myfile in mydigests: try: mystat = os.stat(myfile_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e fetched = 0 else: if stat.S_ISDIR(mystat.st_mode): # This can happen if FETCHCOMMAND erroneously # contains wget's -P option where it should # instead have -O. writemsg_level( _("!!! The command specified in the " "%s variable appears to have\n!!! " "created a directory instead of a " "normal file.\n") % command_var, level=logging.ERROR, noiselevel=-1) writemsg_level( _("!!! Refer to the make.conf(5) " "man page for information about how " "to\n!!! correctly specify " "FETCHCOMMAND and RESUMECOMMAND.\n"), level=logging.ERROR, noiselevel=-1) return 0 # no exception? file exists. let digestcheck() report # an appropriately for size or checksum errors # If the fetcher reported success and the file is # too small, it's probably because the digest is # bad (upstream changed the distfile). In this # case we don't want to attempt to resume. Show a # digest verification failure to that the user gets # a clue about what just happened. if myret != os.EX_OK and \ mystat.st_size < mydigests[myfile]["size"]: # Fetch failed... Try the next one... Kill 404 files though. if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")): html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M) with io.open( _unicode_encode(myfile_path, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='replace' ) as f: if html404.search(f.read()): try: os.unlink(mysettings["DISTDIR"]+"/"+myfile) writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n")) fetched = 0 continue except (IOError, OSError): pass fetched = 1 continue if True: # File is the correct size--check the checksums for the fetched # file NOW, for those users who don't have a stable/continuous # net connection. This way we have a chance to try to download # from another mirror... digests = _filter_unaccelarated_hashes(mydigests[myfile]) if hash_filter is not None: digests = _apply_hash_filter(digests, hash_filter) verified_ok, reason = verify_all(myfile_path, digests) if not verified_ok: writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile, noiselevel=-1) writemsg(_("!!! Reason: %s\n") % reason[0], noiselevel=-1) writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \ (reason[1], reason[2]), noiselevel=-1) if reason[0] == _("Insufficient data for checksum verification"): return 0 temp_filename = \ _checksum_failure_temp_file( mysettings["DISTDIR"], myfile) writemsg_stdout(_("Refetching... " "File renamed to '%s'\n\n") % \ temp_filename, noiselevel=-1) fetched=0 checksum_failure_count += 1 if checksum_failure_count == \ checksum_failure_primaryuri: # Switch to "primaryuri" mode in order # to increase the probablility of # of success. primaryuris = \ primaryuri_dict.get(myfile) if primaryuris: uri_list.extend( reversed(primaryuris)) if checksum_failure_count >= \ checksum_failure_max_tries: break else: eout = EOutput() eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1" if digests: eout.ebegin("%s %s ;-)" % \ (myfile, " ".join(sorted(digests)))) eout.eend(0) fetched=2 break else: if not myret: fetched=2 break elif mydigests!=None: writemsg(_("No digest file available and download failed.\n\n"), noiselevel=-1) finally: if use_locks and file_lock: unlockfile(file_lock) file_lock = None if listonly: writemsg_stdout("\n", noiselevel=-1) if fetched != 2: if restrict_fetch and not restrict_fetch_msg: restrict_fetch_msg = True msg = _("\n!!! %s/%s" " has fetch restriction turned on.\n" "!!! This probably means that this " "ebuild's files must be downloaded\n" "!!! manually. See the comments in" " the ebuild for more information.\n\n") % \ (mysettings["CATEGORY"], mysettings["PF"]) writemsg_level(msg, level=logging.ERROR, noiselevel=-1) elif restrict_fetch: pass elif listonly: pass elif not filedict[myfile]: writemsg(_("Warning: No mirrors available for file" " '%s'\n") % (myfile), noiselevel=-1) else: writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile, noiselevel=-1) if listonly: failed_files.add(myfile) continue elif fetchonly: failed_files.add(myfile) continue return 0 if failed_files: return 0 return 1
def _xpak_start(self): tar_options = "" if "xattr" in self.features: process = subprocess.Popen(["tar", "--help"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) output = process.communicate()[0] if b"--xattrs" in output: tar_options = ["--xattrs", "--xattrs-include='*'"] for x in portage.util.shlex_split( self.env.get("PORTAGE_XATTR_EXCLUDE", "")): tar_options.append( portage._shell_quote("--xattrs-exclude=%s" % x)) tar_options = " ".join(tar_options) decomp = _compressors.get(compression_probe(self.pkg_path)) if decomp is not None: decomp_cmd = decomp.get("decompress") elif tarfile.is_tarfile( portage._unicode_encode(self.pkg_path, encoding=portage._encodings["fs"], errors="strict")): decomp_cmd = "cat" decomp = { "compress": "cat", "package": "sys-apps/coreutils", } else: decomp_cmd = None if decomp_cmd is None: self.scheduler.output( "!!! %s\n" % _("File compression header unrecognized: %s") % self.pkg_path, log_path=self.logfile, background=self.background, level=logging.ERROR, ) self.returncode = 1 self._async_wait() return try: decompression_binary = shlex_split( varexpand(decomp_cmd, mydict=self.env))[0] except IndexError: decompression_binary = "" if find_binary(decompression_binary) is None: # Try alternative command if it exists if decomp.get("decompress_alt"): decomp_cmd = decomp.get("decompress_alt") try: decompression_binary = shlex_split( varexpand(decomp_cmd, mydict=self.env))[0] except IndexError: decompression_binary = "" if find_binary(decompression_binary) is None: missing_package = decomp.get("package") self.scheduler.output( "!!! %s\n" % _("File compression unsupported %s.\n Command was: %s.\n Maybe missing package: %s" ) % ( self.pkg_path, varexpand(decomp_cmd, mydict=self.env), missing_package, ), log_path=self.logfile, background=self.background, level=logging.ERROR, ) self.returncode = 1 self._async_wait() return pkg_xpak = portage.xpak.tbz2(self.pkg_path) pkg_xpak.scan() # SIGPIPE handling (128 + SIGPIPE) should be compatible with # assert_sigpipe_ok() that's used by the ebuild unpack() helper. self.args = [ self._shell_binary, "-c", ("cmd0=(head -c %d -- %s) cmd1=(%s) cmd2=(tar -xp %s -C %s -f -); " + '"${cmd0[@]}" | "${cmd1[@]}" | "${cmd2[@]}"; ' + "p=(${PIPESTATUS[@]}) ; for i in {0..2}; do " + "if [[ ${p[$i]} != 0 && ${p[$i]} != %d ]] ; then " + 'echo command $(eval "echo \\"\'\\${cmd$i[*]}\'\\"") ' + "failed with status ${p[$i]} ; exit ${p[$i]} ; fi ; done; " + "if [ ${p[$i]} != 0 ] ; then " + 'echo command $(eval "echo \\"\'\\${cmd$i[*]}\'\\"") ' + "failed with status ${p[$i]} ; exit ${p[$i]} ; fi ; " + "exit 0 ;") % ( pkg_xpak.filestat.st_size - pkg_xpak.xpaksize, portage._shell_quote(self.pkg_path), decomp_cmd, tar_options, portage._shell_quote(self.image_dir), 128 + signal.SIGPIPE, ), ] SpawnProcess._start(self)
def _start(self): tar_options = "" if "xattr" in self.features: process = subprocess.Popen(["tar", "--help"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) output = process.communicate()[0] if b"--xattrs" in output: tar_options = ["--xattrs", "--xattrs-include='*'"] for x in portage.util.shlex_split(self.env.get("PORTAGE_XATTR_EXCLUDE", "")): tar_options.append(portage._shell_quote("--xattrs-exclude=%s" % x)) tar_options = " ".join(tar_options) decomp = _compressors.get(compression_probe(self.pkg_path)) if decomp is not None: decomp_cmd = decomp.get("decompress") else: decomp_cmd = None if decomp_cmd is None: self.scheduler.output("!!! %s\n" % _("File compression header unrecognized: %s") % self.pkg_path, log_path=self.logfile, background=self.background, level=logging.ERROR) self.returncode = 1 self._async_wait() return try: decompression_binary = shlex_split(varexpand(decomp_cmd, mydict=self.env))[0] except IndexError: decompression_binary = "" if find_binary(decompression_binary) is None: # Try alternative command if it exists if _compressors.get(compression_probe(self.pkg_path)).get("decompress_alt"): decomp_cmd = _compressors.get( compression_probe(self.pkg_path)).get("decompress_alt") try: decompression_binary = shlex_split(varexpand(decomp_cmd, mydict=self.env))[0] except IndexError: decompression_binary = "" if find_binary(decompression_binary) is None: missing_package = _compressors.get(compression_probe(self.pkg_path)).get("package") self.scheduler.output("!!! %s\n" % _("File compression unsupported %s.\n Command was: %s.\n Maybe missing package: %s") % (self.pkg_path, varexpand(decomp_cmd, mydict=self.env), missing_package), log_path=self.logfile, background=self.background, level=logging.ERROR) self.returncode = 1 self._async_wait() return # Add -q to decomp_cmd opts, in order to avoid "trailing garbage # after EOF ignored" warning messages due to xpak trailer. # SIGPIPE handling (128 + SIGPIPE) should be compatible with # assert_sigpipe_ok() that's used by the ebuild unpack() helper. self.args = [self._shell_binary, "-c", ("%s -cq -- %s | tar -xp %s -C %s -f - ; " + \ "p=(${PIPESTATUS[@]}) ; " + \ "if [[ ${p[0]} != 0 && ${p[0]} != %d ]] ; then " % (128 + signal.SIGPIPE) + \ "echo bzip2 failed with status ${p[0]} ; exit ${p[0]} ; fi ; " + \ "if [ ${p[1]} != 0 ] ; then " + \ "echo tar failed with status ${p[1]} ; exit ${p[1]} ; fi ; " + \ "exit 0 ;") % \ (decomp_cmd, portage._shell_quote(self.pkg_path), tar_options, portage._shell_quote(self.image_dir))] SpawnProcess._start(self)
def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks", use_locks=1, try_mirrors=1, digests=None, allow_missing_digests=True, force=False): """ Fetch files to DISTDIR and also verify digests if they are available. @param myuris: Maps each file name to a tuple of available fetch URIs. @type myuris: dict @param mysettings: Portage config instance. @type mysettings: portage.config @param listonly: Only print URIs and do not actually fetch them. @type listonly: bool @param fetchonly: Do not block for files that are locked by a concurrent fetcher process. This means that the function can return successfully *before* all files have been successfully fetched! @type fetchonly: bool @param use_locks: Enable locks. This parameter is ineffective if FEATURES=distlocks is disabled in the portage config! @type use_locks: bool @param digests: Maps each file name to a dict of digest types and values. @type digests: dict @param allow_missing_digests: Enable fetch even if there are no digests available for verification. @type allow_missing_digests: bool @param force: Force download, even when a file already exists in DISTDIR. This is most useful when there are no digests available, since otherwise download will be automatically forced if the existing file does not match the available digests. Also, this avoids the need to remove the existing file in advance, which makes it possible to atomically replace the file and avoid interference with concurrent processes. @type force: bool @rtype: int @return: 1 if successful, 0 otherwise. """ if force and digests: # Since the force parameter can trigger unnecessary fetch when the # digests match, do not allow force=True when digests are provided. raise PortageException(_('fetch: force=True is not allowed when digests are provided')) if not myuris: return 1 features = mysettings.features restrict = mysettings.get("PORTAGE_RESTRICT","").split() userfetch = portage.data.secpass >= 2 and "userfetch" in features # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring. restrict_mirror = "mirror" in restrict or "nomirror" in restrict if restrict_mirror: if ("mirror" in features) and ("lmirror" not in features): # lmirror should allow you to bypass mirror restrictions. # XXX: This is not a good thing, and is temporary at best. print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch.")) return 1 # Generally, downloading the same file repeatedly from # every single available mirror is a waste of bandwidth # and time, so there needs to be a cap. checksum_failure_max_tries = 5 v = checksum_failure_max_tries try: v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS", checksum_failure_max_tries)) except (ValueError, OverflowError): writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS" " contains non-integer value: '%s'\n") % \ mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1) writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS " "default value: %s\n") % checksum_failure_max_tries, noiselevel=-1) v = checksum_failure_max_tries if v < 1: writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS" " contains value less than 1: '%s'\n") % v, noiselevel=-1) writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS " "default value: %s\n") % checksum_failure_max_tries, noiselevel=-1) v = checksum_failure_max_tries checksum_failure_max_tries = v del v fetch_resume_size_default = "350K" fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE") if fetch_resume_size is not None: fetch_resume_size = "".join(fetch_resume_size.split()) if not fetch_resume_size: # If it's undefined or empty, silently use the default. fetch_resume_size = fetch_resume_size_default match = _fetch_resume_size_re.match(fetch_resume_size) if match is None or \ (match.group(2).upper() not in _size_suffix_map): writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE" " contains an unrecognized format: '%s'\n") % \ mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1) writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE " "default value: %s\n") % fetch_resume_size_default, noiselevel=-1) fetch_resume_size = None if fetch_resume_size is None: fetch_resume_size = fetch_resume_size_default match = _fetch_resume_size_re.match(fetch_resume_size) fetch_resume_size = int(match.group(1)) * \ 2 ** _size_suffix_map[match.group(2).upper()] # Behave like the package has RESTRICT="primaryuri" after a # couple of checksum failures, to increase the probablility # of success before checksum_failure_max_tries is reached. checksum_failure_primaryuri = 2 thirdpartymirrors = mysettings.thirdpartymirrors() # In the background parallel-fetch process, it's safe to skip checksum # verification of pre-existing files in $DISTDIR that have the correct # file size. The parent process will verify their checksums prior to # the unpack phase. parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings if parallel_fetchonly: fetchonly = 1 check_config_instance(mysettings) custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"], CUSTOM_MIRRORS_FILE), recursive=1) mymirrors=[] if listonly or ("distlocks" not in features): use_locks = 0 distdir_writable = os.access(mysettings["DISTDIR"], os.W_OK) fetch_to_ro = 0 if "skiprocheck" in features: fetch_to_ro = 1 if not distdir_writable and fetch_to_ro: if use_locks: writemsg(colorize("BAD", _("!!! For fetching to a read-only filesystem, " "locking should be turned off.\n")), noiselevel=-1) writemsg(_("!!! This can be done by adding -distlocks to " "FEATURES in /etc/portage/make.conf\n"), noiselevel=-1) # use_locks = 0 # local mirrors are always added if try_mirrors and "local" in custommirrors: mymirrors += custommirrors["local"] if restrict_mirror: # We don't add any mirrors. pass else: if try_mirrors: mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x] hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", "")) if hash_filter.transparent: hash_filter = None skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1" if skip_manifest: allow_missing_digests = True pkgdir = mysettings.get("O") if digests is None and not (pkgdir is None or skip_manifest): mydigests = mysettings.repositories.get_repo_for_location( os.path.dirname(os.path.dirname(pkgdir))).load_manifest( pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST") elif digests is None or skip_manifest: # no digests because fetch was not called for a specific package mydigests = {} else: mydigests = digests ro_distdirs = [x for x in \ shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \ if os.path.isdir(x)] fsmirrors = [] for x in range(len(mymirrors)-1,-1,-1): if mymirrors[x] and mymirrors[x][0]=='/': fsmirrors += [mymirrors[x]] del mymirrors[x] restrict_fetch = "fetch" in restrict force_mirror = "force-mirror" in features and not restrict_mirror custom_local_mirrors = custommirrors.get("local", []) if restrict_fetch: # With fetch restriction, a normal uri may only be fetched from # custom local mirrors (if available). A mirror:// uri may also # be fetched from specific mirrors (effectively overriding fetch # restriction, but only for specific mirrors). locations = custom_local_mirrors else: locations = mymirrors file_uri_tuples = [] # Check for 'items' attribute since OrderedDict is not a dict. if hasattr(myuris, 'items'): for myfile, uri_set in myuris.items(): for myuri in uri_set: file_uri_tuples.append((myfile, myuri)) if not uri_set: file_uri_tuples.append((myfile, None)) else: for myuri in myuris: if urlparse(myuri).scheme: file_uri_tuples.append((os.path.basename(myuri), myuri)) else: file_uri_tuples.append((os.path.basename(myuri), None)) filedict = OrderedDict() primaryuri_dict = {} thirdpartymirror_uris = {} for myfile, myuri in file_uri_tuples: if myfile not in filedict: filedict[myfile]=[] if distdir_writable: mirror_cache = os.path.join(mysettings["DISTDIR"], ".mirror-cache.json") else: mirror_cache = None for l in locations: filedict[myfile].append(functools.partial( get_mirror_url, l, myfile, mysettings, mirror_cache)) if myuri is None: continue if myuri[:9]=="mirror://": eidx = myuri.find("/", 9) if eidx != -1: mirrorname = myuri[9:eidx] path = myuri[eidx+1:] # Try user-defined mirrors first if mirrorname in custommirrors: for cmirr in custommirrors[mirrorname]: filedict[myfile].append( cmirr.rstrip("/") + "/" + path) # now try the official mirrors if mirrorname in thirdpartymirrors: uris = [locmirr.rstrip("/") + "/" + path \ for locmirr in thirdpartymirrors[mirrorname]] random.shuffle(uris) filedict[myfile].extend(uris) thirdpartymirror_uris.setdefault(myfile, []).extend(uris) if mirrorname not in custommirrors and \ mirrorname not in thirdpartymirrors: writemsg(_("!!! No known mirror by the name: %s\n") % (mirrorname)) else: writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1) writemsg(" %s\n" % (myuri), noiselevel=-1) else: if restrict_fetch or force_mirror: # Only fetch from specific mirrors is allowed. continue primaryuris = primaryuri_dict.get(myfile) if primaryuris is None: primaryuris = [] primaryuri_dict[myfile] = primaryuris primaryuris.append(myuri) # Order primaryuri_dict values to match that in SRC_URI. for uris in primaryuri_dict.values(): uris.reverse() # Prefer thirdpartymirrors over normal mirrors in cases when # the file does not yet exist on the normal mirrors. for myfile, uris in thirdpartymirror_uris.items(): primaryuri_dict.setdefault(myfile, []).extend(uris) # Now merge primaryuri values into filedict (includes mirrors # explicitly referenced in SRC_URI). if "primaryuri" in restrict: for myfile, uris in filedict.items(): filedict[myfile] = primaryuri_dict.get(myfile, []) + uris else: for myfile in filedict: filedict[myfile] += primaryuri_dict.get(myfile, []) can_fetch=True if listonly: can_fetch = False if can_fetch and not fetch_to_ro: try: _ensure_distdir(mysettings, mysettings["DISTDIR"]) except PortageException as e: if not os.path.isdir(mysettings["DISTDIR"]): writemsg("!!! %s\n" % str(e), noiselevel=-1) writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1) writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1) if can_fetch and \ not fetch_to_ro and \ not os.access(mysettings["DISTDIR"], os.W_OK): writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"], noiselevel=-1) can_fetch = False distdir_writable = can_fetch and not fetch_to_ro failed_files = set() restrict_fetch_msg = False valid_hashes = set(get_valid_checksum_keys()) valid_hashes.discard("size") for myfile in filedict: """ fetched status 0 nonexistent 1 partially downloaded 2 completely downloaded """ fetched = 0 orig_digests = mydigests.get(myfile, {}) if not (allow_missing_digests or listonly): verifiable_hash_types = set(orig_digests).intersection(valid_hashes) if not verifiable_hash_types: expected = " ".join(sorted(valid_hashes)) got = set(orig_digests) got.discard("size") got = " ".join(sorted(got)) reason = (_("Insufficient data for checksum verification"), got, expected) writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile, noiselevel=-1) writemsg(_("!!! Reason: %s\n") % reason[0], noiselevel=-1) writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \ (reason[1], reason[2]), noiselevel=-1) if fetchonly: failed_files.add(myfile) continue else: return 0 size = orig_digests.get("size") if size == 0: # Zero-byte distfiles are always invalid, so discard their digests. del mydigests[myfile] orig_digests.clear() size = None pruned_digests = orig_digests if parallel_fetchonly: pruned_digests = {} if size is not None: pruned_digests["size"] = size myfile_path = os.path.join(mysettings["DISTDIR"], myfile) download_path = myfile_path if fetch_to_ro else myfile_path + _download_suffix has_space = True has_space_superuser = True file_lock = None if listonly: writemsg_stdout("\n", noiselevel=-1) else: # check if there is enough space in DISTDIR to completely store myfile # overestimate the filesize so we aren't bitten by FS overhead vfs_stat = None if size is not None and hasattr(os, "statvfs"): try: vfs_stat = os.statvfs(mysettings["DISTDIR"]) except OSError as e: writemsg_level("!!! statvfs('%s'): %s\n" % (mysettings["DISTDIR"], e), noiselevel=-1, level=logging.ERROR) del e if vfs_stat is not None: try: mysize = os.stat(myfile_path).st_size except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e mysize = 0 if (size - mysize + vfs_stat.f_bsize) >= \ (vfs_stat.f_bsize * vfs_stat.f_bavail): if (size - mysize + vfs_stat.f_bsize) >= \ (vfs_stat.f_bsize * vfs_stat.f_bfree): has_space_superuser = False if not has_space_superuser: has_space = False elif portage.data.secpass < 2: has_space = False elif userfetch: has_space = False if distdir_writable and use_locks: lock_kwargs = {} if fetchonly: lock_kwargs["flags"] = os.O_NONBLOCK try: file_lock = lockfile(myfile_path, wantnewlockfile=1, **lock_kwargs) except TryAgain: writemsg(_(">>> File '%s' is already locked by " "another fetcher. Continuing...\n") % myfile, noiselevel=-1) continue try: if not listonly: eout = EOutput() eout.quiet = mysettings.get("PORTAGE_QUIET") == "1" match, mystat = _check_distfile( myfile_path, pruned_digests, eout, hash_filter=hash_filter) if match and not force: # Skip permission adjustment for symlinks, since we don't # want to modify anything outside of the primary DISTDIR, # and symlinks typically point to PORTAGE_RO_DISTDIRS. if distdir_writable and not os.path.islink(myfile_path): try: apply_secpass_permissions(myfile_path, gid=portage_gid, mode=0o664, mask=0o2, stat_cached=mystat) except PortageException as e: if not os.access(myfile_path, os.R_OK): writemsg(_("!!! Failed to adjust permissions:" " %s\n") % str(e), noiselevel=-1) del e continue # Remove broken symlinks or symlinks to files which # _check_distfile did not match above. if distdir_writable and mystat is None or os.path.islink(myfile_path): try: os.unlink(myfile_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise mystat = None if mystat is not None: if stat.S_ISDIR(mystat.st_mode): writemsg_level( _("!!! Unable to fetch file since " "a directory is in the way: \n" "!!! %s\n") % myfile_path, level=logging.ERROR, noiselevel=-1) return 0 if distdir_writable and not force: # Since _check_distfile did not match above, the file # is either corrupt or its identity has changed since # the last time it was fetched, so rename it. temp_filename = _checksum_failure_temp_file( mysettings, mysettings["DISTDIR"], myfile) writemsg_stdout(_("Refetching... " "File renamed to '%s'\n\n") % \ temp_filename, noiselevel=-1) # Stat the temporary download file for comparison with # fetch_resume_size. try: mystat = os.stat(download_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise mystat = None if mystat is not None: if mystat.st_size == 0: if distdir_writable: try: os.unlink(download_path) except OSError: pass elif distdir_writable and size is not None: if mystat.st_size < fetch_resume_size and \ mystat.st_size < size: # If the file already exists and the size does not # match the existing digests, it may be that the # user is attempting to update the digest. In this # case, the digestgen() function will advise the # user to use `ebuild --force foo.ebuild manifest` # in order to force the old digests to be replaced. # Since the user may want to keep this file, rename # it instead of deleting it. writemsg(_(">>> Renaming distfile with size " "%d (smaller than " "PORTAGE_FETCH_RESU" "ME_MIN_SIZE)\n") % mystat.st_size) temp_filename = \ _checksum_failure_temp_file( mysettings, mysettings["DISTDIR"], os.path.basename(download_path)) writemsg_stdout(_("Refetching... " "File renamed to '%s'\n\n") % \ temp_filename, noiselevel=-1) elif mystat.st_size >= size: temp_filename = \ _checksum_failure_temp_file( mysettings, mysettings["DISTDIR"], os.path.basename(download_path)) writemsg_stdout(_("Refetching... " "File renamed to '%s'\n\n") % \ temp_filename, noiselevel=-1) if distdir_writable and ro_distdirs: readonly_file = None for x in ro_distdirs: filename = os.path.join(x, myfile) match, mystat = _check_distfile( filename, pruned_digests, eout, hash_filter=hash_filter) if match: readonly_file = filename break if readonly_file is not None: try: os.unlink(myfile_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e os.symlink(readonly_file, myfile_path) continue # this message is shown only after we know that # the file is not already fetched if not has_space: writemsg(_("!!! Insufficient space to store %s in %s\n") % \ (myfile, mysettings["DISTDIR"]), noiselevel=-1) if has_space_superuser: writemsg(_("!!! Insufficient privileges to use " "remaining space.\n"), noiselevel=-1) if userfetch: writemsg(_("!!! You may set FEATURES=\"-userfetch\"" " in /etc/portage/make.conf in order to fetch with\n" "!!! superuser privileges.\n"), noiselevel=-1) if fsmirrors and not os.path.exists(myfile_path) and has_space: for mydir in fsmirrors: mirror_file = os.path.join(mydir, myfile) try: shutil.copyfile(mirror_file, download_path) writemsg(_("Local mirror has file: %s\n") % myfile) break except (IOError, OSError) as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e try: mystat = os.stat(download_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e else: # Skip permission adjustment for symlinks, since we don't # want to modify anything outside of the primary DISTDIR, # and symlinks typically point to PORTAGE_RO_DISTDIRS. if not os.path.islink(download_path): try: apply_secpass_permissions(download_path, gid=portage_gid, mode=0o664, mask=0o2, stat_cached=mystat) except PortageException as e: if not os.access(download_path, os.R_OK): writemsg(_("!!! Failed to adjust permissions:" " %s\n") % (e,), noiselevel=-1) # If the file is empty then it's obviously invalid. Remove # the empty file and try to download if possible. if mystat.st_size == 0: if distdir_writable: try: os.unlink(download_path) except EnvironmentError: pass elif not orig_digests: # We don't have a digest, but the file exists. We must # assume that it is fully downloaded. if not force: continue else: if (mydigests[myfile].get("size") is not None and mystat.st_size < mydigests[myfile]["size"] and not restrict_fetch): fetched = 1 # Try to resume this download. elif parallel_fetchonly and \ mystat.st_size == mydigests[myfile]["size"]: eout = EOutput() eout.quiet = \ mysettings.get("PORTAGE_QUIET") == "1" eout.ebegin( "%s size ;-)" % (myfile, )) eout.eend(0) continue else: digests = _filter_unaccelarated_hashes(mydigests[myfile]) if hash_filter is not None: digests = _apply_hash_filter(digests, hash_filter) verified_ok, reason = verify_all(download_path, digests) if not verified_ok: writemsg(_("!!! Previously fetched" " file: '%s'\n") % myfile, noiselevel=-1) writemsg(_("!!! Reason: %s\n") % reason[0], noiselevel=-1) writemsg(_("!!! Got: %s\n" "!!! Expected: %s\n") % \ (reason[1], reason[2]), noiselevel=-1) if reason[0] == _("Insufficient data for checksum verification"): return 0 if distdir_writable: temp_filename = \ _checksum_failure_temp_file( mysettings, mysettings["DISTDIR"], os.path.basename(download_path)) writemsg_stdout(_("Refetching... " "File renamed to '%s'\n\n") % \ temp_filename, noiselevel=-1) else: if not fetch_to_ro: _movefile(download_path, myfile_path, mysettings=mysettings) eout = EOutput() eout.quiet = \ mysettings.get("PORTAGE_QUIET", None) == "1" if digests: digests = list(digests) digests.sort() eout.ebegin( "%s %s ;-)" % (myfile, " ".join(digests))) eout.eend(0) continue # fetch any remaining files # Create a reversed list since that is optimal for list.pop(). uri_list = filedict[myfile][:] uri_list.reverse() checksum_failure_count = 0 tried_locations = set() while uri_list: loc = uri_list.pop() if isinstance(loc, functools.partial): loc = loc() # Eliminate duplicates here in case we've switched to # "primaryuri" mode on the fly due to a checksum failure. if loc in tried_locations: continue tried_locations.add(loc) if listonly: writemsg_stdout(loc+" ", noiselevel=-1) continue # allow different fetchcommands per protocol protocol = loc[0:loc.find("://")] global_config_path = GLOBAL_CONFIG_PATH if portage.const.EPREFIX: global_config_path = os.path.join(portage.const.EPREFIX, GLOBAL_CONFIG_PATH.lstrip(os.sep)) missing_file_param = False fetchcommand_var = "FETCHCOMMAND_" + protocol.upper() fetchcommand = mysettings.get(fetchcommand_var) if fetchcommand is None: fetchcommand_var = "FETCHCOMMAND" fetchcommand = mysettings.get(fetchcommand_var) if fetchcommand is None: writemsg_level( _("!!! %s is unset. It should " "have been defined in\n!!! %s/make.globals.\n") \ % (fetchcommand_var, global_config_path), level=logging.ERROR, noiselevel=-1) return 0 if "${FILE}" not in fetchcommand: writemsg_level( _("!!! %s does not contain the required ${FILE}" " parameter.\n") % fetchcommand_var, level=logging.ERROR, noiselevel=-1) missing_file_param = True resumecommand_var = "RESUMECOMMAND_" + protocol.upper() resumecommand = mysettings.get(resumecommand_var) if resumecommand is None: resumecommand_var = "RESUMECOMMAND" resumecommand = mysettings.get(resumecommand_var) if resumecommand is None: writemsg_level( _("!!! %s is unset. It should " "have been defined in\n!!! %s/make.globals.\n") \ % (resumecommand_var, global_config_path), level=logging.ERROR, noiselevel=-1) return 0 if "${FILE}" not in resumecommand: writemsg_level( _("!!! %s does not contain the required ${FILE}" " parameter.\n") % resumecommand_var, level=logging.ERROR, noiselevel=-1) missing_file_param = True if missing_file_param: writemsg_level( _("!!! Refer to the make.conf(5) man page for " "information about how to\n!!! correctly specify " "FETCHCOMMAND and RESUMECOMMAND.\n"), level=logging.ERROR, noiselevel=-1) if myfile != os.path.basename(loc): return 0 if not can_fetch: if fetched != 2: try: mysize = os.stat(download_path).st_size except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e mysize = 0 if mysize == 0: writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile, noiselevel=-1) elif size is None or size > mysize: writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile, noiselevel=-1) else: writemsg(_("!!! File %s is incorrect size, " "but unable to retry.\n") % myfile, noiselevel=-1) return 0 else: continue if fetched != 2 and has_space: #we either need to resume or start the download if fetched == 1: try: mystat = os.stat(download_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e fetched = 0 else: if distdir_writable and mystat.st_size < fetch_resume_size: writemsg(_(">>> Deleting distfile with size " "%d (smaller than " "PORTAGE_FETCH_RESU" "ME_MIN_SIZE)\n") % mystat.st_size) try: os.unlink(download_path) except OSError as e: if e.errno not in \ (errno.ENOENT, errno.ESTALE): raise del e fetched = 0 if fetched == 1: #resume mode: writemsg(_(">>> Resuming download...\n")) locfetch=resumecommand command_var = resumecommand_var else: #normal mode: locfetch=fetchcommand command_var = fetchcommand_var writemsg_stdout(_(">>> Downloading '%s'\n") % \ _hide_url_passwd(loc)) variables = { "URI": loc, "FILE": os.path.basename(download_path) } for k in ("DISTDIR", "PORTAGE_SSH_OPTS"): v = mysettings.get(k) if v is not None: variables[k] = v myfetch = shlex_split(locfetch) myfetch = [varexpand(x, mydict=variables) for x in myfetch] myret = -1 try: myret = _spawn_fetch(mysettings, myfetch) finally: try: apply_secpass_permissions(download_path, gid=portage_gid, mode=0o664, mask=0o2) except FileNotFound: pass except PortageException as e: if not os.access(download_path, os.R_OK): writemsg(_("!!! Failed to adjust permissions:" " %s\n") % str(e), noiselevel=-1) del e # If the file is empty then it's obviously invalid. Don't # trust the return value from the fetcher. Remove the # empty file and try to download again. try: mystat = os.lstat(download_path) if mystat.st_size == 0 or (stat.S_ISLNK(mystat.st_mode) and not os.path.exists(download_path)): os.unlink(download_path) fetched = 0 continue except EnvironmentError: pass if mydigests is not None and myfile in mydigests: try: mystat = os.stat(download_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e fetched = 0 else: if stat.S_ISDIR(mystat.st_mode): # This can happen if FETCHCOMMAND erroneously # contains wget's -P option where it should # instead have -O. writemsg_level( _("!!! The command specified in the " "%s variable appears to have\n!!! " "created a directory instead of a " "normal file.\n") % command_var, level=logging.ERROR, noiselevel=-1) writemsg_level( _("!!! Refer to the make.conf(5) " "man page for information about how " "to\n!!! correctly specify " "FETCHCOMMAND and RESUMECOMMAND.\n"), level=logging.ERROR, noiselevel=-1) return 0 # no exception? file exists. let digestcheck() report # an appropriately for size or checksum errors # If the fetcher reported success and the file is # too small, it's probably because the digest is # bad (upstream changed the distfile). In this # case we don't want to attempt to resume. Show a # digest verification failure to that the user gets # a clue about what just happened. if myret != os.EX_OK and \ mystat.st_size < mydigests[myfile]["size"]: # Fetch failed... Try the next one... Kill 404 files though. if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")): html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M) with io.open( _unicode_encode(download_path, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='replace' ) as f: if html404.search(f.read()): try: os.unlink(download_path) writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n")) fetched = 0 continue except (IOError, OSError): pass fetched = 1 continue if True: # File is the correct size--check the checksums for the fetched # file NOW, for those users who don't have a stable/continuous # net connection. This way we have a chance to try to download # from another mirror... digests = _filter_unaccelarated_hashes(mydigests[myfile]) if hash_filter is not None: digests = _apply_hash_filter(digests, hash_filter) verified_ok, reason = verify_all(download_path, digests) if not verified_ok: writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile, noiselevel=-1) writemsg(_("!!! Reason: %s\n") % reason[0], noiselevel=-1) writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \ (reason[1], reason[2]), noiselevel=-1) if reason[0] == _("Insufficient data for checksum verification"): return 0 if distdir_writable: temp_filename = \ _checksum_failure_temp_file( mysettings, mysettings["DISTDIR"], os.path.basename(download_path)) writemsg_stdout(_("Refetching... " "File renamed to '%s'\n\n") % \ temp_filename, noiselevel=-1) fetched=0 checksum_failure_count += 1 if checksum_failure_count == \ checksum_failure_primaryuri: # Switch to "primaryuri" mode in order # to increase the probablility of # of success. primaryuris = \ primaryuri_dict.get(myfile) if primaryuris: uri_list.extend( reversed(primaryuris)) if checksum_failure_count >= \ checksum_failure_max_tries: break else: if not fetch_to_ro: _movefile(download_path, myfile_path, mysettings=mysettings) eout = EOutput() eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1" if digests: eout.ebegin("%s %s ;-)" % \ (myfile, " ".join(sorted(digests)))) eout.eend(0) fetched=2 break else: # no digests available if not myret: if not fetch_to_ro: _movefile(download_path, myfile_path, mysettings=mysettings) fetched=2 break elif mydigests!=None: writemsg(_("No digest file available and download failed.\n\n"), noiselevel=-1) finally: if use_locks and file_lock: unlockfile(file_lock) file_lock = None if listonly: writemsg_stdout("\n", noiselevel=-1) if fetched != 2: if restrict_fetch and not restrict_fetch_msg: restrict_fetch_msg = True msg = _("\n!!! %s/%s" " has fetch restriction turned on.\n" "!!! This probably means that this " "ebuild's files must be downloaded\n" "!!! manually. See the comments in" " the ebuild for more information.\n\n") % \ (mysettings["CATEGORY"], mysettings["PF"]) writemsg_level(msg, level=logging.ERROR, noiselevel=-1) elif restrict_fetch: pass elif listonly: pass elif not filedict[myfile]: writemsg(_("Warning: No mirrors available for file" " '%s'\n") % (myfile), noiselevel=-1) else: writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile, noiselevel=-1) if listonly: failed_files.add(myfile) continue elif fetchonly: failed_files.add(myfile) continue return 0 if failed_files: return 0 return 1
def _start(self): tar_options = "" if "xattr" in self.features: process = subprocess.Popen(["tar", "--help"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) output = process.communicate()[0] if b"--xattrs" in output: tar_options = ["--xattrs", "--xattrs-include='*'"] for x in portage.util.shlex_split(self.env.get("PORTAGE_XATTR_EXCLUDE", "")): tar_options.append(portage._shell_quote("--xattrs-exclude=%s" % x)) tar_options = " ".join(tar_options) decomp = _compressors.get(compression_probe(self.pkg_path)) if decomp is not None: decomp_cmd = decomp.get("decompress") else: decomp_cmd = None if decomp_cmd is None: self.scheduler.output("!!! %s\n" % _("File compression header unrecognized: %s") % self.pkg_path, log_path=self.logfile, background=self.background, level=logging.ERROR) self.returncode = 1 self._async_wait() return try: decompression_binary = shlex_split(varexpand(decomp_cmd, mydict=self.env))[0] except IndexError: decompression_binary = "" if find_binary(decompression_binary) is None: # Try alternative command if it exists if _compressors.get(compression_probe(self.pkg_path)).get("decompress_alt"): decomp_cmd = _compressors.get( compression_probe(self.pkg_path)).get("decompress_alt") try: decompression_binary = shlex_split(varexpand(decomp_cmd, mydict=self.env))[0] except IndexError: decompression_binary = "" if find_binary(decompression_binary) is None: missing_package = _compressors.get(compression_probe(self.pkg_path)).get("package") self.scheduler.output("!!! %s\n" % _("File compression unsupported %s.\n Command was: %s.\n Maybe missing package: %s") % (self.pkg_path, varexpand(decomp_cmd, mydict=self.env), missing_package), log_path=self.logfile, background=self.background, level=logging.ERROR) self.returncode = 1 self._async_wait() return pkg_xpak = portage.xpak.tbz2(self.pkg_path) pkg_xpak.scan() # SIGPIPE handling (128 + SIGPIPE) should be compatible with # assert_sigpipe_ok() that's used by the ebuild unpack() helper. self.args = [self._shell_binary, "-c", ("cmd0=(head -c %d -- %s) cmd1=(%s) cmd2=(tar -xp %s -C %s -f -); " + \ '"${cmd0[@]}" | "${cmd1[@]}" | "${cmd2[@]}"; ' + \ "p=(${PIPESTATUS[@]}) ; for i in {0..2}; do " + \ "if [[ ${p[$i]} != 0 && ${p[$i]} != %d ]] ; then " + \ "echo command $(eval \"echo \\\"'\\${cmd$i[*]}'\\\"\") " + \ "failed with status ${p[$i]} ; exit ${p[$i]} ; fi ; done; " + \ "if [ ${p[$i]} != 0 ] ; then " + \ "echo command $(eval \"echo \\\"'\\${cmd$i[*]}'\\\"\") " + \ "failed with status ${p[$i]} ; exit ${p[$i]} ; fi ; " + \ "exit 0 ;") % \ (pkg_xpak.filestat.st_size - pkg_xpak.xpaksize, portage._shell_quote(self.pkg_path), decomp_cmd, tar_options, portage._shell_quote(self.image_dir), 128 + signal.SIGPIPE)] SpawnProcess._start(self)