def commit(self): if not self.filename: return d = {} d.update(self) # Only commit if the internal state has changed. if d != self._clean_data: d["version"] = str(portage.VERSION) try: f = atomic_ofstream(self.filename, mode="wb") except EnvironmentError: pass else: if self._json_write: f.write( _unicode_encode( json.dumps(d, **self._json_write_opts), encoding=_encodings["repo.content"], errors="strict", )) else: pickle.dump(d, f, protocol=2) f.close() apply_secpass_permissions(self.filename, uid=uid, gid=portage_gid, mode=0o644) self._clean_data = copy.deepcopy(d)
def store(self): """ Store the registry data to the file. The existing inode will be replaced atomically, so if that inode is currently being used for a lock then that lock will be rendered useless. Therefore, it is important not to call this method until the current lock is ready to be immediately released. """ if os.environ.get("SANDBOX_ON") == "1" or \ self._data == self._data_orig: return try: f = atomic_ofstream(self._filename, 'wb') if self._json_write: f.write(_unicode_encode( json.dumps(self._data, **self._json_write_opts), encoding=_encodings['repo.content'], errors='strict')) else: pickle.dump(self._data, f, protocol=2) f.close() except EnvironmentError as e: if e.errno != PermissionDenied.errno: writemsg_level("!!! %s %s\n" % (e, self._filename), level=logging.ERROR, noiselevel=-1) else: self._data_orig = self._data.copy()
def store(self): """ Store the registry data to the file. The existing inode will be replaced atomically, so if that inode is currently being used for a lock then that lock will be rendered useless. Therefore, it is important not to call this method until the current lock is ready to be immediately released. """ if os.environ.get( "SANDBOX_ON") == "1" or self._data == self._data_orig: return try: f = atomic_ofstream(self._filename, "wb") if self._json_write: f.write( _unicode_encode( json.dumps(self._data, **self._json_write_opts), encoding=_encodings["repo.content"], errors="strict", )) else: pickle.dump(self._data, f, protocol=2) f.close() except EnvironmentError as e: if e.errno != PermissionDenied.errno: writemsg_level( "!!! %s %s\n" % (e, self._filename), level=logging.ERROR, noiselevel=-1, ) else: self._data_orig = self._data.copy()
def initialize(self, timestamp): f = atomic_ofstream(self._vardb._cache_delta_filename, 'w', encoding=_encodings['repo.content'], errors='strict') json.dump({ "version": self._format_version, "timestamp": timestamp }, f, ensure_ascii=False) f.close()
def initialize(self, timestamp): with atomic_ofstream( self._vardb._cache_delta_filename, "w", encoding=_encodings["repo.content"], errors="strict", ) as f: json.dump( {"version": self._format_version, "timestamp": timestamp}, f, ensure_ascii=False, )
def recordEvent(self, event, cpv, slot, counter): self._vardb.lock() try: deltas_obj = self.load() if deltas_obj is None: # We can't record meaningful deltas without # a pre-existing state. return delta_node = { "event": event, "package": cpv.cp, "version": cpv.version, "slot": slot, "counter": "%s" % counter } deltas_obj["deltas"].append(delta_node) # Eliminate earlier nodes cancelled out by later nodes # that have identical package and slot attributes. filtered_list = [] slot_keys = set() version_keys = set() for delta_node in reversed(deltas_obj["deltas"]): slot_key = (delta_node["package"], delta_node["slot"]) version_key = (delta_node["package"], delta_node["version"]) if not (slot_key in slot_keys or \ version_key in version_keys): filtered_list.append(delta_node) slot_keys.add(slot_key) version_keys.add(version_key) filtered_list.reverse() deltas_obj["deltas"] = filtered_list f = atomic_ofstream(self._vardb._cache_delta_filename, mode='w', encoding=_encodings['repo.content']) json.dump(deltas_obj, f, ensure_ascii=False) f.close() finally: self._vardb.unlock()
def _strip_sig(manifest_path): """ Strip an existing signature from a Manifest file. """ line_re = ManifestTask._manifest_line_re lines = grablines(manifest_path) f = None try: f = atomic_ofstream(manifest_path) for line in lines: if line_re.match(line) is not None: f.write(line) f.close() f = None finally: if f is not None: f.abort()
def store(self): """ Store the registry data to file. No need to call this if autocommit was enabled. """ if os.environ.get("SANDBOX_ON") == "1" or \ self._data == self._data_orig: return try: f = atomic_ofstream(self._filename, 'wb') pickle.dump(self._data, f, protocol=2) f.close() except EnvironmentError as e: if e.errno != PermissionDenied.errno: writemsg_level("!!! %s %s\n" % (e, self._filename), level=logging.ERROR, noiselevel=-1) else: self._data_orig = self._data.copy()
def commit(self): if not self.filename: return d = {} d.update(self) # Only commit if the internal state has changed. if d != self._clean_data: d["version"] = str(portage.VERSION) try: f = atomic_ofstream(self.filename, mode='wb') except EnvironmentError: pass else: pickle.dump(d, f, protocol=2) f.close() apply_secpass_permissions(self.filename, uid=uid, gid=portage_gid, mode=0o644) self._clean_data = copy.deepcopy(d)
def commit(self): if not self.filename: return d = {} d.update(self) # Only commit if the internal state has changed. if d != self._clean_data: d["version"] = VERSION try: f = atomic_ofstream(self.filename, mode='wb') except EnvironmentError: pass else: pickle.dump(d, f, protocol=2) f.close() apply_secpass_permissions(self.filename, uid=uid, gid=portage_gid, mode=0o644) self._clean_data = copy.deepcopy(d)
def commit(self): if not self.filename: return d = {} d.update(self) # Only commit if the internal state has changed. if d != self._clean_data: d["version"] = str(portage.VERSION) try: f = atomic_ofstream(self.filename, mode='wb') except EnvironmentError: pass else: if self._json_write: f.write(_unicode_encode( json.dumps(d, **self._json_write_opts), encoding=_encodings['repo.content'], errors='strict')) else: pickle.dump(d, f, protocol=2) f.close() apply_secpass_permissions(self.filename, uid=uid, gid=portage_gid, mode=0o644) self._clean_data = copy.deepcopy(d)
def __write_to_disk(self, d): """Private method used by the ``commit`` method.""" d["version"] = str(portage.VERSION) try: f = atomic_ofstream(self.filename, mode="wb") except EnvironmentError: pass else: if self._json_write: f.write( _unicode_encode( json.dumps(d, **self._json_write_opts), encoding=_encodings["repo.content"], errors="strict", )) else: pickle.dump(d, f, protocol=2) f.close() apply_secpass_permissions(self.filename, uid=uid, gid=portage_gid, mode=0o644) self._clean_data = copy.deepcopy(d)
def _env_update(makelinks, target_root, prev_mtimes, contents, env, writemsg_level): if writemsg_level is None: writemsg_level = portage.util.writemsg_level if target_root is None: target_root = portage.settings["ROOT"] if prev_mtimes is None: prev_mtimes = portage.mtimedb["ldpath"] if env is None: settings = portage.settings else: settings = env eprefix = settings.get("EPREFIX", "") eprefix_lstrip = eprefix.lstrip(os.sep) envd_dir = os.path.join(target_root, eprefix_lstrip, "etc", "env.d") ensure_dirs(envd_dir, mode=0o755) fns = listdir(envd_dir, EmptyOnError=1) fns.sort() templist = [] for x in fns: if len(x) < 3: continue if not x[0].isdigit() or not x[1].isdigit(): continue if x.startswith(".") or x.endswith("~") or x.endswith(".bak"): continue templist.append(x) fns = templist del templist space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"]) colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH", "CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH", "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK", "PYTHONPATH", "ROOTPATH"]) config_list = [] for x in fns: file_path = os.path.join(envd_dir, x) try: myconfig = getconfig(file_path, expand=False) except ParseError as e: writemsg("!!! '%s'\n" % str(e), noiselevel=-1) del e continue if myconfig is None: # broken symlink or file removed by a concurrent process writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1) continue config_list.append(myconfig) if "SPACE_SEPARATED" in myconfig: space_separated.update(myconfig["SPACE_SEPARATED"].split()) del myconfig["SPACE_SEPARATED"] if "COLON_SEPARATED" in myconfig: colon_separated.update(myconfig["COLON_SEPARATED"].split()) del myconfig["COLON_SEPARATED"] env = {} specials = {} for var in space_separated: mylist = [] for myconfig in config_list: if var in myconfig: for item in myconfig[var].split(): if item and not item in mylist: mylist.append(item) del myconfig[var] # prepare for env.update(myconfig) if mylist: env[var] = " ".join(mylist) specials[var] = mylist for var in colon_separated: mylist = [] for myconfig in config_list: if var in myconfig: for item in myconfig[var].split(":"): if item and not item in mylist: mylist.append(item) del myconfig[var] # prepare for env.update(myconfig) if mylist: env[var] = ":".join(mylist) specials[var] = mylist for myconfig in config_list: """Cumulative variables have already been deleted from myconfig so that they won't be overwritten by this dict.update call.""" env.update(myconfig) ldsoconf_path = os.path.join( target_root, eprefix_lstrip, "etc", "ld.so.conf") try: myld = io.open(_unicode_encode(ldsoconf_path, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='replace') myldlines=myld.readlines() myld.close() oldld=[] for x in myldlines: #each line has at least one char (a newline) if x[:1] == "#": continue oldld.append(x[:-1]) except (IOError, OSError) as e: if e.errno != errno.ENOENT: raise oldld = None newld = specials["LDPATH"] if (oldld != newld): #ld.so.conf needs updating and ldconfig needs to be run myfd = atomic_ofstream(ldsoconf_path) myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n") myfd.write("# contents of /etc/env.d directory\n") for x in specials["LDPATH"]: myfd.write(x + "\n") myfd.close() # Update prelink.conf if we are prelink-enabled if prelink_capable: newprelink = atomic_ofstream(os.path.join( target_root, eprefix_lstrip, "etc", "prelink.conf")) newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n") newprelink.write("# contents of /etc/env.d directory\n") for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]: newprelink.write("-l %s\n" % (x,)); prelink_paths = [] prelink_paths += specials.get("LDPATH", []) prelink_paths += specials.get("PATH", []) prelink_paths += specials.get("PRELINK_PATH", []) prelink_path_mask = specials.get("PRELINK_PATH_MASK", []) for x in prelink_paths: if not x: continue if x[-1:] != '/': x += "/" plmasked = 0 for y in prelink_path_mask: if not y: continue if y[-1] != '/': y += "/" if y == x[0:len(y)]: plmasked = 1 break if not plmasked: newprelink.write("-h %s\n" % (x,)) for x in prelink_path_mask: newprelink.write("-b %s\n" % (x,)) newprelink.close() current_time = long(time.time()) mtime_changed = False lib_dirs = set() for lib_dir in set(specials["LDPATH"] + \ ['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']): x = os.path.join(target_root, eprefix_lstrip, lib_dir.lstrip(os.sep)) try: newldpathtime = os.stat(x)[stat.ST_MTIME] lib_dirs.add(normalize_path(x)) except OSError as oe: if oe.errno == errno.ENOENT: try: del prev_mtimes[x] except KeyError: pass # ignore this path because it doesn't exist continue raise if newldpathtime == current_time: # Reset mtime to avoid the potential ambiguity of times that # differ by less than 1 second. newldpathtime -= 1 os.utime(x, (newldpathtime, newldpathtime)) prev_mtimes[x] = newldpathtime mtime_changed = True elif x in prev_mtimes: if prev_mtimes[x] == newldpathtime: pass else: prev_mtimes[x] = newldpathtime mtime_changed = True else: prev_mtimes[x] = newldpathtime mtime_changed = True if makelinks and \ not mtime_changed and \ contents is not None: libdir_contents_changed = False for mypath, mydata in contents.items(): if mydata[0] not in ("obj", "sym"): continue head, tail = os.path.split(mypath) if head in lib_dirs: libdir_contents_changed = True break if not libdir_contents_changed: makelinks = False ldconfig = "/sbin/ldconfig" if "CHOST" in settings and "CBUILD" in settings and \ settings["CHOST"] != settings["CBUILD"]: ldconfig = find_binary("%s-ldconfig" % settings["CHOST"]) # Only run ldconfig as needed if makelinks and ldconfig and not eprefix: # ldconfig has very different behaviour between FreeBSD and Linux if ostype == "Linux" or ostype.lower().endswith("gnu"): # We can't update links if we haven't cleaned other versions first, as # an older package installed ON TOP of a newer version will cause ldconfig # to overwrite the symlinks we just made. -X means no links. After 'clean' # we can safely create links. writemsg_level(_(">>> Regenerating %setc/ld.so.cache...\n") % \ (target_root,)) os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root)) elif ostype in ("FreeBSD","DragonFly"): writemsg_level(_(">>> Regenerating %svar/run/ld-elf.so.hints...\n") % \ target_root) os.system(("cd / ; %s -elf -i " + \ "-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \ (ldconfig, target_root, target_root)) del specials["LDPATH"] penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n" penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n" cenvnotice = penvnotice[:] penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n" cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n" #create /etc/profile.env for bash support outfile = atomic_ofstream(os.path.join( target_root, eprefix_lstrip, "etc", "profile.env")) outfile.write(penvnotice) env_keys = [ x for x in env if x != "LDPATH" ] env_keys.sort() for k in env_keys: v = env[k] if v.startswith('$') and not v.startswith('${'): outfile.write("export %s=$'%s'\n" % (k, v[1:])) else: outfile.write("export %s='%s'\n" % (k, v)) outfile.close() #create /etc/csh.env for (t)csh support outfile = atomic_ofstream(os.path.join( target_root, eprefix_lstrip, "etc", "csh.env")) outfile.write(cenvnotice) for x in env_keys: outfile.write("setenv %s '%s'\n" % (x, env[x])) outfile.close()
def _env_update(makelinks, target_root, prev_mtimes, contents, env, writemsg_level): if writemsg_level is None: writemsg_level = portage.util.writemsg_level if target_root is None: target_root = portage.settings["ROOT"] if prev_mtimes is None: prev_mtimes = portage.mtimedb["ldpath"] if env is None: settings = portage.settings else: settings = env eprefix = settings.get("EPREFIX", "") eprefix_lstrip = eprefix.lstrip(os.sep) eroot = normalize_path(os.path.join(target_root, eprefix_lstrip)).rstrip( os.sep) + os.sep envd_dir = os.path.join(eroot, "etc", "env.d") ensure_dirs(envd_dir, mode=0o755) fns = listdir(envd_dir, EmptyOnError=1) fns.sort() templist = [] for x in fns: if len(x) < 3: continue if not x[0].isdigit() or not x[1].isdigit(): continue if x.startswith(".") or x.endswith("~") or x.endswith(".bak"): continue templist.append(x) fns = templist del templist space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"]) colon_separated = set([ "ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH", "CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH", "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK", "PYTHONPATH", "ROOTPATH" ]) config_list = [] for x in fns: file_path = os.path.join(envd_dir, x) try: myconfig = getconfig(file_path, expand=False) except ParseError as e: writemsg("!!! '%s'\n" % str(e), noiselevel=-1) del e continue if myconfig is None: # broken symlink or file removed by a concurrent process writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1) continue config_list.append(myconfig) if "SPACE_SEPARATED" in myconfig: space_separated.update(myconfig["SPACE_SEPARATED"].split()) del myconfig["SPACE_SEPARATED"] if "COLON_SEPARATED" in myconfig: colon_separated.update(myconfig["COLON_SEPARATED"].split()) del myconfig["COLON_SEPARATED"] env = {} specials = {} for var in space_separated: mylist = [] for myconfig in config_list: if var in myconfig: for item in myconfig[var].split(): if item and not item in mylist: mylist.append(item) del myconfig[var] # prepare for env.update(myconfig) if mylist: env[var] = " ".join(mylist) specials[var] = mylist for var in colon_separated: mylist = [] for myconfig in config_list: if var in myconfig: for item in myconfig[var].split(":"): if item and not item in mylist: mylist.append(item) del myconfig[var] # prepare for env.update(myconfig) if mylist: env[var] = ":".join(mylist) specials[var] = mylist for myconfig in config_list: """Cumulative variables have already been deleted from myconfig so that they won't be overwritten by this dict.update call.""" env.update(myconfig) ldsoconf_path = os.path.join(eroot, "etc", "ld.so.conf") try: myld = io.open(_unicode_encode(ldsoconf_path, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='replace') myldlines = myld.readlines() myld.close() oldld = [] for x in myldlines: #each line has at least one char (a newline) if x[:1] == "#": continue oldld.append(x[:-1]) except (IOError, OSError) as e: if e.errno != errno.ENOENT: raise oldld = None newld = specials["LDPATH"] if (oldld != newld): #ld.so.conf needs updating and ldconfig needs to be run myfd = atomic_ofstream(ldsoconf_path) myfd.write( "# ld.so.conf autogenerated by env-update; make all changes to\n") myfd.write("# contents of /etc/env.d directory\n") for x in specials["LDPATH"]: myfd.write(x + "\n") myfd.close() potential_lib_dirs = set() for lib_dir_glob in ('usr/lib*', 'lib*'): x = os.path.join(eroot, lib_dir_glob) for y in glob.glob( _unicode_encode(x, encoding=_encodings['fs'], errors='strict')): try: y = _unicode_decode(y, encoding=_encodings['fs'], errors='strict') except UnicodeDecodeError: continue if os.path.basename(y) != 'libexec': potential_lib_dirs.add(y[len(eroot):]) # Update prelink.conf if we are prelink-enabled if prelink_capable: prelink_d = os.path.join(eroot, 'etc', 'prelink.conf.d') ensure_dirs(prelink_d) newprelink = atomic_ofstream(os.path.join(prelink_d, 'portage.conf')) newprelink.write( "# prelink.conf autogenerated by env-update; make all changes to\n" ) newprelink.write("# contents of /etc/env.d directory\n") for x in sorted(potential_lib_dirs) + ['bin', 'sbin']: newprelink.write('-l /%s\n' % (x, )) prelink_paths = set() prelink_paths |= set(specials.get('LDPATH', [])) prelink_paths |= set(specials.get('PATH', [])) prelink_paths |= set(specials.get('PRELINK_PATH', [])) prelink_path_mask = specials.get('PRELINK_PATH_MASK', []) for x in prelink_paths: if not x: continue if x[-1:] != '/': x += "/" plmasked = 0 for y in prelink_path_mask: if not y: continue if y[-1] != '/': y += "/" if y == x[0:len(y)]: plmasked = 1 break if not plmasked: newprelink.write("-h %s\n" % (x, )) for x in prelink_path_mask: newprelink.write("-b %s\n" % (x, )) newprelink.close() # Migration code path. If /etc/prelink.conf was generated by us, then # point it to the new stuff until the prelink package re-installs. prelink_conf = os.path.join(eroot, 'etc', 'prelink.conf') try: with open( _unicode_encode(prelink_conf, encoding=_encodings['fs'], errors='strict'), 'rb') as f: if f.readline( ) == b'# prelink.conf autogenerated by env-update; make all changes to\n': f = atomic_ofstream(prelink_conf) f.write('-c /etc/prelink.conf.d/*.conf\n') f.close() except IOError as e: if e.errno != errno.ENOENT: raise current_time = long(time.time()) mtime_changed = False lib_dirs = set() for lib_dir in set(specials['LDPATH']) | potential_lib_dirs: x = os.path.join(eroot, lib_dir.lstrip(os.sep)) try: newldpathtime = os.stat(x)[stat.ST_MTIME] lib_dirs.add(normalize_path(x)) except OSError as oe: if oe.errno == errno.ENOENT: try: del prev_mtimes[x] except KeyError: pass # ignore this path because it doesn't exist continue raise if newldpathtime == current_time: # Reset mtime to avoid the potential ambiguity of times that # differ by less than 1 second. newldpathtime -= 1 os.utime(x, (newldpathtime, newldpathtime)) prev_mtimes[x] = newldpathtime mtime_changed = True elif x in prev_mtimes: if prev_mtimes[x] == newldpathtime: pass else: prev_mtimes[x] = newldpathtime mtime_changed = True else: prev_mtimes[x] = newldpathtime mtime_changed = True if makelinks and \ not mtime_changed and \ contents is not None: libdir_contents_changed = False for mypath, mydata in contents.items(): if mydata[0] not in ("obj", "sym"): continue head, tail = os.path.split(mypath) if head in lib_dirs: libdir_contents_changed = True break if not libdir_contents_changed: makelinks = False ldconfig = "/sbin/ldconfig" if "CHOST" in settings and "CBUILD" in settings and \ settings["CHOST"] != settings["CBUILD"]: ldconfig = find_binary("%s-ldconfig" % settings["CHOST"]) # Only run ldconfig as needed if makelinks and ldconfig and not eprefix: # ldconfig has very different behaviour between FreeBSD and Linux if ostype == "Linux" or ostype.lower().endswith("gnu"): # We can't update links if we haven't cleaned other versions first, as # an older package installed ON TOP of a newer version will cause ldconfig # to overwrite the symlinks we just made. -X means no links. After 'clean' # we can safely create links. writemsg_level(_(">>> Regenerating %setc/ld.so.cache...\n") % \ (target_root,)) os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root)) elif ostype in ("FreeBSD", "DragonFly"): writemsg_level(_(">>> Regenerating %svar/run/ld-elf.so.hints...\n") % \ target_root) os.system(("cd / ; %s -elf -i " + \ "-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \ (ldconfig, target_root, target_root)) del specials["LDPATH"] penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n" penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n" cenvnotice = penvnotice[:] penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n" cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n" #create /etc/profile.env for bash support outfile = atomic_ofstream(os.path.join(eroot, "etc", "profile.env")) outfile.write(penvnotice) env_keys = [x for x in env if x != "LDPATH"] env_keys.sort() for k in env_keys: v = env[k] if v.startswith('$') and not v.startswith('${'): outfile.write("export %s=$'%s'\n" % (k, v[1:])) else: outfile.write("export %s='%s'\n" % (k, v)) outfile.close() #create /etc/csh.env for (t)csh support outfile = atomic_ofstream(os.path.join(eroot, "etc", "csh.env")) outfile.write(cenvnotice) for x in env_keys: outfile.write("setenv %s '%s'\n" % (x, env[x])) outfile.close()
def _populate(self, getbinpkgs=0): if (not os.path.isdir(self.pkgdir) and not getbinpkgs): return 0 # Clear all caches in case populate is called multiple times # as may be the case when _global_updates calls populate() # prior to performing package moves since it only wants to # operate on local packages (getbinpkgs=0). self._remotepkgs = None self.dbapi._clear_cache() self.dbapi._aux_cache.clear() if True: pkg_paths = {} self._pkg_paths = pkg_paths dirs = listdir(self.pkgdir, dirsonly=True, EmptyOnError=True) if "All" in dirs: dirs.remove("All") dirs.sort() dirs.insert(0, "All") pkgindex = self._load_pkgindex() pf_index = None if not self._pkgindex_version_supported(pkgindex): pkgindex = self._new_pkgindex() header = pkgindex.header metadata = {} for d in pkgindex.packages: metadata[d["CPV"]] = d update_pkgindex = False for mydir in dirs: for myfile in listdir(os.path.join(self.pkgdir, mydir)): if not myfile.endswith(".tbz2"): continue mypath = os.path.join(mydir, myfile) full_path = os.path.join(self.pkgdir, mypath) s = os.lstat(full_path) if stat.S_ISLNK(s.st_mode): continue # Validate data from the package index and try to avoid # reading the xpak if possible. if mydir != "All": possibilities = None d = metadata.get(mydir+"/"+myfile[:-5]) if d: possibilities = [d] else: if pf_index is None: pf_index = {} for mycpv in metadata: mycat, mypf = catsplit(mycpv) pf_index.setdefault( mypf, []).append(metadata[mycpv]) possibilities = pf_index.get(myfile[:-5]) if possibilities: match = None for d in possibilities: try: if long(d["MTIME"]) != s[stat.ST_MTIME]: continue except (KeyError, ValueError): continue try: if long(d["SIZE"]) != long(s.st_size): continue except (KeyError, ValueError): continue if not self._pkgindex_keys.difference(d): match = d break if match: mycpv = match["CPV"] if mycpv in pkg_paths: # discard duplicates (All/ is preferred) continue pkg_paths[mycpv] = mypath # update the path if the package has been moved oldpath = d.get("PATH") if oldpath and oldpath != mypath: update_pkgindex = True if mypath != mycpv + ".tbz2": d["PATH"] = mypath if not oldpath: update_pkgindex = True else: d.pop("PATH", None) if oldpath: update_pkgindex = True self.dbapi.cpv_inject(mycpv) if not self.dbapi._aux_cache_keys.difference(d): aux_cache = self.dbapi._aux_cache_slot_dict() for k in self.dbapi._aux_cache_keys: aux_cache[k] = d[k] self.dbapi._aux_cache[mycpv] = aux_cache continue if not os.access(full_path, os.R_OK): writemsg(_("!!! Permission denied to read " \ "binary package: '%s'\n") % full_path, noiselevel=-1) self.invalids.append(myfile[:-5]) continue metadata_bytes = portage.xpak.tbz2(full_path).get_data() mycat = _unicode_decode(metadata_bytes.get( _unicode_encode("CATEGORY", encoding=_encodings['repo.content']), ""), encoding=_encodings['repo.content'], errors='replace') mypf = _unicode_decode(metadata_bytes.get( _unicode_encode("PF", encoding=_encodings['repo.content']), ""), encoding=_encodings['repo.content'], errors='replace') slot = _unicode_decode(metadata_bytes.get( _unicode_encode("SLOT", encoding=_encodings['repo.content']), ""), encoding=_encodings['repo.content'], errors='replace') mypkg = myfile[:-5] if not mycat or not mypf or not slot: #old-style or corrupt package writemsg(_("\n!!! Invalid binary package: '%s'\n") % full_path, noiselevel=-1) missing_keys = [] if not mycat: missing_keys.append("CATEGORY") if not mypf: missing_keys.append("PF") if not slot: missing_keys.append("SLOT") msg = [] if missing_keys: missing_keys.sort() msg.append(_("Missing metadata key(s): %s.") % \ ", ".join(missing_keys)) msg.append(_(" This binary package is not " \ "recoverable and should be deleted.")) from textwrap import wrap for line in wrap("".join(msg), 72): writemsg("!!! %s\n" % line, noiselevel=-1) self.invalids.append(mypkg) continue mycat = mycat.strip() slot = slot.strip() if mycat != mydir and mydir != "All": continue if mypkg != mypf.strip(): continue mycpv = mycat + "/" + mypkg if mycpv in pkg_paths: # All is first, so it's preferred. continue if not self.dbapi._category_re.match(mycat): writemsg(_("!!! Binary package has an " \ "unrecognized category: '%s'\n") % full_path, noiselevel=-1) writemsg(_("!!! '%s' has a category that is not" \ " listed in %setc/portage/categories\n") % \ (mycpv, self.settings["PORTAGE_CONFIGROOT"]), noiselevel=-1) continue pkg_paths[mycpv] = mypath self.dbapi.cpv_inject(mycpv) update_pkgindex = True d = metadata.get(mycpv, {}) if d: try: if long(d["MTIME"]) != s[stat.ST_MTIME]: d.clear() except (KeyError, ValueError): d.clear() if d: try: if long(d["SIZE"]) != long(s.st_size): d.clear() except (KeyError, ValueError): d.clear() d["CPV"] = mycpv d["SLOT"] = slot d["MTIME"] = str(s[stat.ST_MTIME]) d["SIZE"] = str(s.st_size) d.update(zip(self._pkgindex_aux_keys, self.dbapi.aux_get(mycpv, self._pkgindex_aux_keys))) try: self._eval_use_flags(mycpv, d) except portage.exception.InvalidDependString: writemsg(_("!!! Invalid binary package: '%s'\n") % \ self.getname(mycpv), noiselevel=-1) self.dbapi.cpv_remove(mycpv) del pkg_paths[mycpv] # record location if it's non-default if mypath != mycpv + ".tbz2": d["PATH"] = mypath else: d.pop("PATH", None) metadata[mycpv] = d if not self.dbapi._aux_cache_keys.difference(d): aux_cache = self.dbapi._aux_cache_slot_dict() for k in self.dbapi._aux_cache_keys: aux_cache[k] = d[k] self.dbapi._aux_cache[mycpv] = aux_cache for cpv in list(metadata): if cpv not in pkg_paths: del metadata[cpv] # Do not bother to write the Packages index if $PKGDIR/All/ exists # since it will provide no benefit due to the need to read CATEGORY # from xpak. if update_pkgindex and os.access(self.pkgdir, os.W_OK): del pkgindex.packages[:] pkgindex.packages.extend(iter(metadata.values())) self._update_pkgindex_header(pkgindex.header) from portage.util import atomic_ofstream f = atomic_ofstream(self._pkgindex_file) try: pkgindex.write(f) finally: f.close() if getbinpkgs and not self.settings["PORTAGE_BINHOST"]: writemsg(_("!!! PORTAGE_BINHOST unset, but use is requested.\n"), noiselevel=-1) if getbinpkgs and 'PORTAGE_BINHOST' in self.settings: base_url = self.settings["PORTAGE_BINHOST"] from portage.const import CACHE_PATH try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse urldata = urlparse(base_url) pkgindex_file = os.path.join(self.settings["ROOT"], CACHE_PATH, "binhost", urldata[1] + urldata[2], "Packages") pkgindex = self._new_pkgindex() try: f = codecs.open(_unicode_encode(pkgindex_file, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['repo.content'], errors='replace') try: pkgindex.read(f) finally: f.close() except EnvironmentError as e: if e.errno != errno.ENOENT: raise local_timestamp = pkgindex.header.get("TIMESTAMP", None) try: from urllib.request import urlopen as urllib_request_urlopen except ImportError: from urllib import urlopen as urllib_request_urlopen rmt_idx = self._new_pkgindex() try: # urlparse.urljoin() only works correctly with recognized # protocols and requires the base url to have a trailing # slash, so join manually... f = urllib_request_urlopen(base_url.rstrip("/") + "/Packages") f_dec = codecs.iterdecode(f, _encodings['repo.content'], errors='replace') try: rmt_idx.readHeader(f_dec) remote_timestamp = rmt_idx.header.get("TIMESTAMP", None) if not remote_timestamp: # no timestamp in the header, something's wrong pkgindex = None else: if not self._pkgindex_version_supported(rmt_idx): writemsg(_("\n\n!!! Binhost package index version" \ " is not supported: '%s'\n") % \ rmt_idx.header.get("VERSION"), noiselevel=-1) pkgindex = None elif local_timestamp != remote_timestamp: rmt_idx.readBody(f_dec) pkgindex = rmt_idx finally: f.close() except EnvironmentError as e: writemsg(_("\n\n!!! Error fetching binhost package" \ " info from '%s'\n") % base_url) writemsg("!!! %s\n\n" % str(e)) del e pkgindex = None if pkgindex is rmt_idx: pkgindex.modified = False # don't update the header from portage.util import atomic_ofstream, ensure_dirs try: ensure_dirs(os.path.dirname(pkgindex_file)) f = atomic_ofstream(pkgindex_file) pkgindex.write(f) f.close() except PortageException: if os.access(os.path.join( self.settings["ROOT"], CACHE_PATH), os.W_OK): raise # The current user doesn't have permission to cache the # file, but that's alright. if pkgindex: self._remotepkgs = {} for d in pkgindex.packages: self._remotepkgs[d["CPV"]] = d self._remote_has_index = True self._remote_base_uri = pkgindex.header.get("URI", base_url) self.__remotepkgs = {} for cpv in self._remotepkgs: self.dbapi.cpv_inject(cpv) self.populated = 1 if True: # Remote package instances override local package # if they are not identical. hash_names = ["SIZE"] + self._pkgindex_hashes for cpv, local_metadata in metadata.items(): remote_metadata = self._remotepkgs.get(cpv) if remote_metadata is None: continue # Use digests to compare identity. identical = True for hash_name in hash_names: local_value = local_metadata.get(hash_name) if local_value is None: continue remote_value = remote_metadata.get(hash_name) if remote_value is None: continue if local_value != remote_value: identical = False break if identical: del self._remotepkgs[cpv] else: # Override the local package in the aux_get cache. self.dbapi._aux_cache[cpv] = remote_metadata else: # Local package instances override remote instances. for cpv in metadata: self._remotepkgs.pop(cpv, None) return self._remotepkgs = {} try: chunk_size = long(self.settings["PORTAGE_BINHOST_CHUNKSIZE"]) if chunk_size < 8: chunk_size = 8 except (ValueError, KeyError): chunk_size = 3000 writemsg_stdout("\n") writemsg_stdout( colorize("GOOD", _("Fetching bininfo from ")) + \ re.sub(r'//(.+):.+@(.+)/', r'//\1:*password*@\2/', base_url) + "\n") self.__remotepkgs = portage.getbinpkg.dir_get_metadata( self.settings["PORTAGE_BINHOST"], chunk_size=chunk_size) #writemsg(green(" -- DONE!\n\n")) for mypkg in list(self.__remotepkgs): if "CATEGORY" not in self.__remotepkgs[mypkg]: #old-style or corrupt package writemsg(_("!!! Invalid remote binary package: %s\n") % mypkg, noiselevel=-1) del self.__remotepkgs[mypkg] continue mycat = self.__remotepkgs[mypkg]["CATEGORY"].strip() fullpkg = mycat+"/"+mypkg[:-5] if fullpkg in metadata: # When using this old protocol, comparison with the remote # package isn't supported, so the local package is always # preferred even if getbinpkgsonly is enabled. continue if not self.dbapi._category_re.match(mycat): writemsg(_("!!! Remote binary package has an " \ "unrecognized category: '%s'\n") % fullpkg, noiselevel=-1) writemsg(_("!!! '%s' has a category that is not" \ " listed in %setc/portage/categories\n") % \ (fullpkg, self.settings["PORTAGE_CONFIGROOT"]), noiselevel=-1) continue mykey = portage.cpv_getkey(fullpkg) try: # invalid tbz2's can hurt things. self.dbapi.cpv_inject(fullpkg) remote_metadata = self.__remotepkgs[mypkg] for k, v in remote_metadata.items(): remote_metadata[k] = v.strip() # Eliminate metadata values with names that digestCheck # uses, since they are not valid when using the old # protocol. Typically this is needed for SIZE metadata # which corresponds to the size of the unpacked files # rather than the binpkg file size, triggering digest # verification failures as reported in bug #303211. remote_metadata.pop('SIZE', None) for k in portage.checksum.hashfunc_map: remote_metadata.pop(k, None) self._remotepkgs[fullpkg] = remote_metadata except SystemExit as e: raise except: writemsg(_("!!! Failed to inject remote binary package: %s\n") % fullpkg, noiselevel=-1) del self.__remotepkgs[mypkg] continue self.populated=1
def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None, env=None, writemsg_level=None): """ Parse /etc/env.d and use it to generate /etc/profile.env, csh.env, ld.so.conf, and prelink.conf. Finally, run ldconfig. """ if writemsg_level is None: writemsg_level = portage.util.writemsg_level if target_root is None: target_root = portage.settings["ROOT"] if prev_mtimes is None: prev_mtimes = portage.mtimedb["ldpath"] if env is None: env = os.environ envd_dir = os.path.join(target_root, "etc", "env.d") ensure_dirs(envd_dir, mode=0o755) fns = listdir(envd_dir, EmptyOnError=1) fns.sort() templist = [] for x in fns: if len(x) < 3: continue if not x[0].isdigit() or not x[1].isdigit(): continue if x.startswith(".") or x.endswith("~") or x.endswith(".bak"): continue templist.append(x) fns = templist del templist space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"]) colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH", "CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH", "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK", "PYTHONPATH", "ROOTPATH"]) config_list = [] for x in fns: file_path = os.path.join(envd_dir, x) try: myconfig = getconfig(file_path, expand=False) except ParseError as e: writemsg("!!! '%s'\n" % str(e), noiselevel=-1) del e continue if myconfig is None: # broken symlink or file removed by a concurrent process writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1) continue config_list.append(myconfig) if "SPACE_SEPARATED" in myconfig: space_separated.update(myconfig["SPACE_SEPARATED"].split()) del myconfig["SPACE_SEPARATED"] if "COLON_SEPARATED" in myconfig: colon_separated.update(myconfig["COLON_SEPARATED"].split()) del myconfig["COLON_SEPARATED"] env = {} specials = {} for var in space_separated: mylist = [] for myconfig in config_list: if var in myconfig: for item in myconfig[var].split(): if item and not item in mylist: mylist.append(item) del myconfig[var] # prepare for env.update(myconfig) if mylist: env[var] = " ".join(mylist) specials[var] = mylist for var in colon_separated: mylist = [] for myconfig in config_list: if var in myconfig: for item in myconfig[var].split(":"): if item and not item in mylist: mylist.append(item) del myconfig[var] # prepare for env.update(myconfig) if mylist: env[var] = ":".join(mylist) specials[var] = mylist for myconfig in config_list: """Cumulative variables have already been deleted from myconfig so that they won't be overwritten by this dict.update call.""" env.update(myconfig) ldsoconf_path = os.path.join(target_root, "etc", "ld.so.conf") try: myld = codecs.open(_unicode_encode(ldsoconf_path, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='replace') myldlines=myld.readlines() myld.close() oldld=[] for x in myldlines: #each line has at least one char (a newline) if x[:1] == "#": continue oldld.append(x[:-1]) except (IOError, OSError) as e: if e.errno != errno.ENOENT: raise oldld = None ld_cache_update=False newld = specials["LDPATH"] if (oldld != newld): #ld.so.conf needs updating and ldconfig needs to be run myfd = atomic_ofstream(ldsoconf_path) myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n") myfd.write("# contents of /etc/env.d directory\n") for x in specials["LDPATH"]: myfd.write(x + "\n") myfd.close() ld_cache_update=True # Update prelink.conf if we are prelink-enabled if prelink_capable: newprelink = atomic_ofstream( os.path.join(target_root, "etc", "prelink.conf")) newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n") newprelink.write("# contents of /etc/env.d directory\n") for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]: newprelink.write("-l %s\n" % (x,)); prelink_paths = [] prelink_paths += specials.get("LDPATH", []) prelink_paths += specials.get("PATH", []) prelink_paths += specials.get("PRELINK_PATH", []) prelink_path_mask = specials.get("PRELINK_PATH_MASK", []) for x in prelink_paths: if not x: continue if x[-1:] != '/': x += "/" plmasked = 0 for y in prelink_path_mask: if not y: continue if y[-1] != '/': y += "/" if y == x[0:len(y)]: plmasked = 1 break if not plmasked: newprelink.write("-h %s\n" % (x,)) for x in prelink_path_mask: newprelink.write("-b %s\n" % (x,)) newprelink.close() # Portage stores mtimes with 1 second granularity but in >=python-2.5 finer # granularity is possible. In order to avoid the potential ambiguity of # mtimes that differ by less than 1 second, sleep here if any of the # directories have been modified during the current second. sleep_for_mtime_granularity = False current_time = long(time.time()) mtime_changed = False lib_dirs = set() for lib_dir in set(specials["LDPATH"] + \ ['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']): x = os.path.join(target_root, lib_dir.lstrip(os.sep)) try: newldpathtime = os.stat(x)[stat.ST_MTIME] lib_dirs.add(normalize_path(x)) except OSError as oe: if oe.errno == errno.ENOENT: try: del prev_mtimes[x] except KeyError: pass # ignore this path because it doesn't exist continue raise if newldpathtime == current_time: sleep_for_mtime_granularity = True if x in prev_mtimes: if prev_mtimes[x] == newldpathtime: pass else: prev_mtimes[x] = newldpathtime mtime_changed = True else: prev_mtimes[x] = newldpathtime mtime_changed = True if mtime_changed: ld_cache_update = True if makelinks and \ not ld_cache_update and \ contents is not None: libdir_contents_changed = False for mypath, mydata in contents.items(): if mydata[0] not in ("obj", "sym"): continue head, tail = os.path.split(mypath) if head in lib_dirs: libdir_contents_changed = True break if not libdir_contents_changed: makelinks = False ldconfig = "/sbin/ldconfig" if "CHOST" in env and "CBUILD" in env and \ env["CHOST"] != env["CBUILD"]: ldconfig = find_binary("%s-ldconfig" % env["CHOST"]) # Only run ldconfig as needed if (ld_cache_update or makelinks) and ldconfig: # ldconfig has very different behaviour between FreeBSD and Linux if ostype == "Linux" or ostype.lower().endswith("gnu"): # We can't update links if we haven't cleaned other versions first, as # an older package installed ON TOP of a newer version will cause ldconfig # to overwrite the symlinks we just made. -X means no links. After 'clean' # we can safely create links. writemsg_level(_(">>> Regenerating %setc/ld.so.cache...\n") % \ (target_root,)) if makelinks: os.system("cd / ; %s -r '%s'" % (ldconfig, target_root)) else: os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root)) elif ostype in ("FreeBSD","DragonFly"): writemsg_level(_(">>> Regenerating %svar/run/ld-elf.so.hints...\n") % \ target_root) os.system(("cd / ; %s -elf -i " + \ "-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \ (ldconfig, target_root, target_root)) del specials["LDPATH"] penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n" penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n" cenvnotice = penvnotice[:] penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n" cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n" #create /etc/profile.env for bash support outfile = atomic_ofstream(os.path.join(target_root, "etc", "profile.env")) outfile.write(penvnotice) env_keys = [ x for x in env if x != "LDPATH" ] env_keys.sort() for k in env_keys: v = env[k] if v.startswith('$') and not v.startswith('${'): outfile.write("export %s=$'%s'\n" % (k, v[1:])) else: outfile.write("export %s='%s'\n" % (k, v)) outfile.close() #create /etc/csh.env for (t)csh support outfile = atomic_ofstream(os.path.join(target_root, "etc", "csh.env")) outfile.write(cenvnotice) for x in env_keys: outfile.write("setenv %s '%s'\n" % (x, env[x])) outfile.close() if sleep_for_mtime_granularity: while current_time == long(time.time()): time.sleep(1)
def inject(self, cpv, filename=None): """Add a freshly built package to the database. This updates $PKGDIR/Packages with the new package metadata (including MD5). @param cpv: The cpv of the new package to inject @type cpv: string @param filename: File path of the package to inject, or None if it's already in the location returned by getname() @type filename: string @rtype: None """ mycat, mypkg = catsplit(cpv) if not self.populated: self.populate() if filename is None: full_path = self.getname(cpv) else: full_path = filename try: s = os.stat(full_path) except OSError as e: if e.errno != errno.ENOENT: raise del e writemsg(_("!!! Binary package does not exist: '%s'\n") % full_path, noiselevel=-1) return mytbz2 = portage.xpak.tbz2(full_path) slot = mytbz2.getfile("SLOT") if slot is None: writemsg(_("!!! Invalid binary package: '%s'\n") % full_path, noiselevel=-1) return slot = slot.strip() self.dbapi.cpv_inject(cpv) # Reread the Packages index (in case it's been changed by another # process) and then updated it, all while holding a lock. from portage.locks import lockfile, unlockfile pkgindex_lock = None created_symlink = False try: pkgindex_lock = lockfile(self._pkgindex_file, wantnewlockfile=1) if filename is not None: new_filename = self.getname(cpv) self._ensure_dir(os.path.dirname(new_filename)) _movefile(filename, new_filename, mysettings=self.settings) if self._all_directory and \ self.getname(cpv).split(os.path.sep)[-2] == "All": self._create_symlink(cpv) created_symlink = True pkgindex = self._load_pkgindex() if not self._pkgindex_version_supported(pkgindex): pkgindex = self._new_pkgindex() try: d = self._pkgindex_entry(cpv) except portage.exception.InvalidDependString: writemsg(_("!!! Invalid binary package: '%s'\n") % \ self.getname(cpv), noiselevel=-1) self.dbapi.cpv_remove(cpv) del self._pkg_paths[cpv] return # If found, remove package(s) with duplicate path. path = d.get("PATH", "") for i in range(len(pkgindex.packages) - 1, -1, -1): d2 = pkgindex.packages[i] if path and path == d2.get("PATH"): # Handle path collisions in $PKGDIR/All # when CPV is not identical. del pkgindex.packages[i] elif cpv == d2.get("CPV"): if path == d2.get("PATH", ""): del pkgindex.packages[i] elif created_symlink and not d2.get("PATH", ""): # Delete entry for the package that was just # overwritten by a symlink to this package. del pkgindex.packages[i] pkgindex.packages.append(d) self._update_pkgindex_header(pkgindex.header) from portage.util import atomic_ofstream f = atomic_ofstream(os.path.join(self.pkgdir, "Packages")) try: pkgindex.write(f) finally: f.close() finally: if pkgindex_lock: unlockfile(pkgindex_lock) if self._remotepkgs is not None: # When a remote package is downloaded and injected, # update state so self.isremote() returns False. self._remotepkgs.pop(cpv, None)