def write(self, sign=False, force=False): """ Write Manifest instance to disk, optionally signing it. Returns True if the Manifest is actually written, and False if the write is skipped due to existing Manifest being identical.""" rval = False if not self.allow_create: return rval self.checkIntegrity() try: myentries = list(self._createManifestEntries()) update_manifest = True existing_st = None if myentries and not force: try: f = io.open(_unicode_encode(self.getFullname(), encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['repo.content'], errors='replace') oldentries = list(self._parseManifestLines(f)) existing_st = os.fstat(f.fileno()) f.close() if len(oldentries) == len(myentries): update_manifest = False for i in range(len(oldentries)): if oldentries[i] != myentries[i]: update_manifest = True break except (IOError, OSError) as e: if e.errno == errno.ENOENT: pass else: raise if update_manifest: if myentries or not (self.thin or self.allow_missing): # If myentries is empty, don't write an empty manifest # when thin or allow_missing is enabled. Except for # thin manifests with no DIST entries, myentries is # non-empty for all currently known use cases. write_atomic(self.getFullname(), "".join("%s\n" % _unicode(myentry) for myentry in myentries)) self._apply_max_mtime(existing_st, myentries) rval = True else: # With thin manifest, there's no need to have # a Manifest file if there are no DIST entries. try: os.unlink(self.getFullname()) except OSError as e: if e.errno != errno.ENOENT: raise rval = True if sign: self.sign() except (IOError, OSError) as e: if e.errno == errno.EACCES: raise PermissionDenied(str(e)) raise return rval
def load(self): """ Reload the registry data from file """ self._data = None f = None try: f = open(_unicode_encode(self._filename, encoding=_encodings['fs'], errors='strict'), 'rb') if os.fstat(f.fileno()).st_size == 0: # ignore empty lock file pass else: self._data = pickle.load(f) except (AttributeError, EOFError, ValueError, pickle.UnpicklingError) as e: writemsg_level(_("!!! Error loading '%s': %s\n") % \ (self._filename, e), level=logging.ERROR, noiselevel=-1) except EnvironmentError as e: if not hasattr(e, 'errno'): raise elif e.errno == errno.ENOENT: pass elif e.errno == PermissionDenied.errno: raise PermissionDenied(self._filename) else: raise finally: if f is not None: f.close() if self._data is None: self._data = {} self._data_orig = self._data.copy() self.pruneNonExisting()
def _fstat_nlink(fd): """ @param fd: an open file descriptor @type fd: Integer @rtype: Integer @return: the current number of hardlinks to the file """ try: return os.fstat(fd).st_nlink except EnvironmentError as e: if e.errno in (errno.ENOENT, errno.ESTALE): # Some filesystems such as CIFS return # ENOENT which means st_nlink == 0. return 0 raise
def _getitem(self, cpv): d = {} try: myf = codecs.open(_unicode_encode(os.path.join(self.location, cpv), encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['repo.content'], errors='replace') for k,v in zip(self.auxdbkey_order, myf): d[k] = v.rstrip("\n") except (OSError, IOError) as e: if errno.ENOENT == e.errno: raise KeyError(cpv) raise cache_errors.CacheCorruption(cpv, e) try: d["_mtime_"] = os.fstat(myf.fileno())[stat.ST_MTIME] except OSError as e: myf.close() raise cache_errors.CacheCorruption(cpv, e) myf.close() return d
def _test_lock_fn( lock_fn: typing.Callable[[str, int, int], typing.Callable[[], None]]) -> bool: def _test_lock(fd, lock_path): os.close(fd) try: with open(lock_path, "a") as f: lock_fn(lock_path, f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) except (TryAgain, EnvironmentError) as e: if isinstance(e, TryAgain) or e.errno == errno.EAGAIN: # Parent process holds lock, as expected. sys.exit(0) # Something went wrong. sys.exit(1) fd, lock_path = tempfile.mkstemp() unlock_fn = None try: try: unlock_fn = lock_fn(lock_path, fd, fcntl.LOCK_EX) except (TryAgain, EnvironmentError): pass else: _lock_manager(fd, os.fstat(fd), lock_path) proc = multiprocessing.Process(target=_test_lock, args=(fd, lock_path)) proc.start() proc.join() if proc.exitcode == os.EX_OK: # the test passed return True finally: try: os.unlink(lock_path) except OSError: pass if unlock_fn is not None: unlock_fn() return False
def hardlink_lockfile(lockfilename, max_wait=DeprecationWarning, waiting_msg=None, flags=0): """Does the NFS, hardlink shuffle to ensure locking on the disk. We create a PRIVATE hardlink to the real lockfile, that is just a placeholder on the disk. If our file can 2 references, then we have the lock. :) Otherwise we lather, rise, and repeat. """ if max_wait is not DeprecationWarning: warnings.warn("The 'max_wait' parameter of " "portage.locks.hardlink_lockfile() is now unused. Use " "flags=os.O_NONBLOCK instead.", DeprecationWarning, stacklevel=2) global _quiet out = None displayed_waiting_msg = False preexisting = os.path.exists(lockfilename) myhardlock = hardlock_name(lockfilename) # Since Python 3.4, chown requires int type (no proxies). portage_gid = int(portage.data.portage_gid) # myhardlock must not exist prior to our link() call, and we can # safely unlink it since its file name is unique to our PID try: os.unlink(myhardlock) except OSError as e: if e.errno in (errno.ENOENT, errno.ESTALE): pass else: func_call = "unlink('%s')" % myhardlock if e.errno == OperationNotPermitted.errno: raise OperationNotPermitted(func_call) elif e.errno == PermissionDenied.errno: raise PermissionDenied(func_call) else: raise while True: # create lockfilename if it doesn't exist yet try: myfd = os.open(lockfilename, os.O_CREAT|os.O_RDWR, 0o660) except OSError as e: func_call = "open('%s')" % lockfilename if e.errno == OperationNotPermitted.errno: raise OperationNotPermitted(func_call) elif e.errno == PermissionDenied.errno: raise PermissionDenied(func_call) elif e.errno == ReadOnlyFileSystem.errno: raise ReadOnlyFileSystem(func_call) else: raise else: myfd_st = None try: myfd_st = os.fstat(myfd) if not preexisting: # Don't chown the file if it is preexisting, since we # want to preserve existing permissions in that case. if myfd_st.st_gid != portage_gid: os.fchown(myfd, -1, portage_gid) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): writemsg("%s: fchown('%s', -1, %d)\n" % \ (e, lockfilename, portage_gid), noiselevel=-1) writemsg(_("Cannot chown a lockfile: '%s'\n") % \ lockfilename, noiselevel=-1) writemsg(_("Group IDs of current user: %s\n") % \ " ".join(str(n) for n in os.getgroups()), noiselevel=-1) else: # another process has removed the file, so we'll have # to create it again continue finally: os.close(myfd) # If fstat shows more than one hardlink, then it's extremely # unlikely that the following link call will result in a lock, # so optimize away the wasteful link call and sleep or raise # TryAgain. if myfd_st is not None and myfd_st.st_nlink < 2: try: os.link(lockfilename, myhardlock) except OSError as e: func_call = "link('%s', '%s')" % (lockfilename, myhardlock) if e.errno == OperationNotPermitted.errno: raise OperationNotPermitted(func_call) elif e.errno == PermissionDenied.errno: raise PermissionDenied(func_call) elif e.errno in (errno.ESTALE, errno.ENOENT): # another process has removed the file, so we'll have # to create it again continue else: raise else: if hardlink_is_mine(myhardlock, lockfilename): if out is not None: out.eend(os.EX_OK) break try: os.unlink(myhardlock) except OSError as e: # This should not happen, since the file name of # myhardlock is unique to our host and PID, # and the above link() call succeeded. if e.errno not in (errno.ENOENT, errno.ESTALE): raise raise FileNotFound(myhardlock) if flags & os.O_NONBLOCK: raise TryAgain(lockfilename) if out is None and not _quiet: out = portage.output.EOutput() if out is not None and not displayed_waiting_msg: displayed_waiting_msg = True if waiting_msg is None: waiting_msg = _("waiting for lock on %s\n") % lockfilename out.ebegin(waiting_msg) time.sleep(_HARDLINK_POLL_LATENCY) return True
def _setitem(self, cpv, values): if "_eclasses_" in values: values = ProtectedDict(values) values["INHERITED"] = ' '.join(sorted(values["_eclasses_"])) new_content = [] for k in self.auxdbkey_order: new_content.append(values.get(k, '')) new_content.append('\n') for i in range(magic_line_count - len(self.auxdbkey_order)): new_content.append('\n') new_content = ''.join(new_content) new_content = _unicode_encode(new_content, _encodings['repo.content'], errors='backslashreplace') new_fp = os.path.join(self.location, cpv) try: f = open( _unicode_encode(new_fp, encoding=_encodings['fs'], errors='strict'), 'rb') except EnvironmentError: pass else: try: try: existing_st = os.fstat(f.fileno()) existing_content = f.read() finally: f.close() except EnvironmentError: pass else: existing_mtime = existing_st[stat.ST_MTIME] if values['_mtime_'] == existing_mtime and \ existing_content == new_content: return if self.raise_stat_collision and \ values['_mtime_'] == existing_mtime and \ len(new_content) == existing_st.st_size: raise cache_errors.StatCollision(cpv, new_fp, existing_mtime, existing_st.st_size) s = cpv.rfind("/") fp = os.path.join(self.location, cpv[:s], ".update.%i.%s" % (os.getpid(), cpv[s + 1:])) try: myf = open( _unicode_encode(fp, encoding=_encodings['fs'], errors='strict'), 'wb') except EnvironmentError as e: if errno.ENOENT == e.errno: try: self._ensure_dirs(cpv) myf = open( _unicode_encode(fp, encoding=_encodings['fs'], errors='strict'), 'wb') except EnvironmentError as e: raise cache_errors.CacheCorruption(cpv, e) else: raise cache_errors.CacheCorruption(cpv, e) try: myf.write(new_content) finally: myf.close() self._ensure_access(fp, mtime=values["_mtime_"]) try: os.rename(fp, new_fp) except EnvironmentError as e: try: os.unlink(fp) except EnvironmentError: pass raise cache_errors.CacheCorruption(cpv, e)
def hardlink_lockfile(lockfilename, max_wait=DeprecationWarning, waiting_msg=None, flags=0): """Does the NFS, hardlink shuffle to ensure locking on the disk. We create a PRIVATE hardlink to the real lockfile, that is just a placeholder on the disk. If our file can 2 references, then we have the lock. :) Otherwise we lather, rise, and repeat. """ if max_wait is not DeprecationWarning: warnings.warn( "The 'max_wait' parameter of " "portage.locks.hardlink_lockfile() is now unused. Use " "flags=os.O_NONBLOCK instead.", DeprecationWarning, stacklevel=2) global _quiet out = None displayed_waiting_msg = False preexisting = os.path.exists(lockfilename) myhardlock = hardlock_name(lockfilename) # Since Python 3.4, chown requires int type (no proxies). portage_gid = int(portage.data.portage_gid) # myhardlock must not exist prior to our link() call, and we can # safely unlink it since its file name is unique to our PID try: os.unlink(myhardlock) except OSError as e: if e.errno in (errno.ENOENT, errno.ESTALE): pass else: func_call = "unlink('%s')" % myhardlock if e.errno == OperationNotPermitted.errno: raise OperationNotPermitted(func_call) elif e.errno == PermissionDenied.errno: raise PermissionDenied(func_call) else: raise while True: # create lockfilename if it doesn't exist yet try: myfd = os.open(lockfilename, os.O_CREAT | os.O_RDWR, 0o660) except OSError as e: func_call = "open('%s')" % lockfilename if e.errno == OperationNotPermitted.errno: raise OperationNotPermitted(func_call) elif e.errno == PermissionDenied.errno: raise PermissionDenied(func_call) elif e.errno == ReadOnlyFileSystem.errno: raise ReadOnlyFileSystem(func_call) else: raise else: myfd_st = None try: myfd_st = os.fstat(myfd) if not preexisting: # Don't chown the file if it is preexisting, since we # want to preserve existing permissions in that case. if portage.data.secpass >= 1 and myfd_st.st_gid != portage_gid: os.fchown(myfd, -1, portage_gid) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): writemsg("%s: fchown('%s', -1, %d)\n" % \ (e, lockfilename, portage_gid), noiselevel=-1) writemsg(_("Cannot chown a lockfile: '%s'\n") % \ lockfilename, noiselevel=-1) writemsg(_("Group IDs of current user: %s\n") % \ " ".join(str(n) for n in os.getgroups()), noiselevel=-1) else: # another process has removed the file, so we'll have # to create it again continue finally: os.close(myfd) # If fstat shows more than one hardlink, then it's extremely # unlikely that the following link call will result in a lock, # so optimize away the wasteful link call and sleep or raise # TryAgain. if myfd_st is not None and myfd_st.st_nlink < 2: try: os.link(lockfilename, myhardlock) except OSError as e: func_call = "link('%s', '%s')" % (lockfilename, myhardlock) if e.errno == OperationNotPermitted.errno: raise OperationNotPermitted(func_call) elif e.errno == PermissionDenied.errno: raise PermissionDenied(func_call) elif e.errno in (errno.ESTALE, errno.ENOENT): # another process has removed the file, so we'll have # to create it again continue else: raise else: if hardlink_is_mine(myhardlock, lockfilename): if out is not None: out.eend(os.EX_OK) break try: os.unlink(myhardlock) except OSError as e: # This should not happen, since the file name of # myhardlock is unique to our host and PID, # and the above link() call succeeded. if e.errno not in (errno.ENOENT, errno.ESTALE): raise raise FileNotFound(myhardlock) if flags & os.O_NONBLOCK: raise TryAgain(lockfilename) if out is None and not _quiet: out = portage.output.EOutput() if out is not None and not displayed_waiting_msg: displayed_waiting_msg = True if waiting_msg is None: waiting_msg = _("waiting for lock on %s\n") % lockfilename out.ebegin(waiting_msg) time.sleep(_HARDLINK_POLL_LATENCY) return True
def _lockfile_was_removed(lock_fd, lock_path): """ Check if lock_fd still refers to a file located at lock_path, since the file may have been removed by a concurrent process that held the lock earlier. This implementation includes support for NFS, where stat is not reliable for removed files due to the default file attribute cache behavior ('ac' mount option). @param lock_fd: an open file descriptor for a lock file @type lock_fd: int @param lock_path: path of lock file @type lock_path: str @rtype: bool @return: True if lock_path exists and corresponds to lock_fd, False otherwise """ try: fstat_st = os.fstat(lock_fd) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): _raise_exc(e) return True # Since stat is not reliable for removed files on NFS with the default # file attribute cache behavior ('ac' mount option), create a temporary # hardlink in order to prove that the file path exists on the NFS server. hardlink_path = hardlock_name(lock_path) try: os.unlink(hardlink_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): _raise_exc(e) try: try: os.link(lock_path, hardlink_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): _raise_exc(e) return True hardlink_stat = os.stat(hardlink_path) if hardlink_stat.st_ino != fstat_st.st_ino or hardlink_stat.st_dev != fstat_st.st_dev: # Create another hardlink in order to detect whether or not # hardlink inode numbers are expected to match. For example, # inode numbers are not expected to match for sshfs. inode_test = hardlink_path + '-inode-test' try: os.unlink(inode_test) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): _raise_exc(e) try: os.link(hardlink_path, inode_test) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): _raise_exc(e) return True else: if not os.path.samefile(hardlink_path, inode_test): # This implies that inode numbers are not expected # to match for this file system, so use a simple # stat call to detect if lock_path has been removed. return not os.path.exists(lock_path) finally: try: os.unlink(inode_test) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): _raise_exc(e) return True finally: try: os.unlink(hardlink_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): _raise_exc(e) return False
def _lockfile_iteration(mypath, wantnewlockfile=False, unlinkfile=False, waiting_msg=None, flags=0): """ Acquire a lock on mypath, without retry. Return None if the lockfile was removed by previous lock holder (caller must retry). @param mypath: lock file path @type mypath: str @param wantnewlockfile: use a separate new lock file @type wantnewlockfile: bool @param unlinkfile: remove lock file prior to unlock @type unlinkfile: bool @param waiting_msg: message to show before blocking @type waiting_msg: str @param flags: lock flags (only supports os.O_NONBLOCK) @type flags: int @rtype: bool @return: unlockfile tuple on success, None if retry is needed """ if not mypath: raise InvalidData(_("Empty path given")) # Since Python 3.4, chown requires int type (no proxies). portage_gid = int(portage.data.portage_gid) # Support for file object or integer file descriptor parameters is # deprecated due to ambiguity in whether or not it's safe to close # the file descriptor, making it prone to "Bad file descriptor" errors # or file descriptor leaks. if isinstance(mypath, str) and mypath[-1] == "/": mypath = mypath[:-1] lockfilename_path = mypath if hasattr(mypath, "fileno"): warnings.warn( "portage.locks.lockfile() support for " "file object parameters is deprecated. Use a file path instead.", DeprecationWarning, stacklevel=2, ) lockfilename_path = getattr(mypath, "name", None) mypath = mypath.fileno() if isinstance(mypath, int): warnings.warn( "portage.locks.lockfile() support for integer file " "descriptor parameters is deprecated. Use a file path instead.", DeprecationWarning, stacklevel=2, ) lockfilename = mypath wantnewlockfile = 0 unlinkfile = 0 elif wantnewlockfile: base, tail = os.path.split(mypath) lockfilename = os.path.join(base, "." + tail + ".portage_lockfile") lockfilename_path = lockfilename unlinkfile = 1 else: lockfilename = mypath if isinstance(mypath, str): if not os.path.exists(os.path.dirname(mypath)): raise DirectoryNotFound(os.path.dirname(mypath)) preexisting = os.path.exists(lockfilename) old_mask = os.umask(000) try: while True: try: myfd = os.open(lockfilename, os.O_CREAT | os.O_RDWR, 0o660) except OSError as e: if e.errno in (errno.ENOENT, errno.ESTALE) and os.path.isdir( os.path.dirname(lockfilename)): # Retry required for NFS (see bug 636798). continue else: _raise_exc(e) else: break if not preexisting: try: if (portage.data.secpass >= 1 and os.stat(lockfilename).st_gid != portage_gid): os.chown(lockfilename, -1, portage_gid) except OSError as e: if e.errno in (errno.ENOENT, errno.ESTALE): os.close(myfd) return None writemsg( "%s: chown('%s', -1, %d)\n" % (e, lockfilename, portage_gid), noiselevel=-1, ) writemsg( _("Cannot chown a lockfile: '%s'\n") % lockfilename, noiselevel=-1, ) writemsg( _("Group IDs of current user: %s\n") % " ".join(str(n) for n in os.getgroups()), noiselevel=-1, ) finally: os.umask(old_mask) elif isinstance(mypath, int): myfd = mypath else: raise ValueError( _("Unknown type passed in '%s': '%s'") % (type(mypath), mypath)) # try for a non-blocking lock, if it's held, throw a message # we're waiting on lockfile and use a blocking attempt. locking_method = portage._eintr_func_wrapper(_get_lock_fn()) try: if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ: raise IOError(errno.ENOSYS, "Function not implemented") locking_method(myfd, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError as e: if not hasattr(e, "errno"): raise if e.errno in (errno.EACCES, errno.EAGAIN, errno.ENOLCK): # resource temp unavailable; eg, someone beat us to the lock. if flags & os.O_NONBLOCK: os.close(myfd) raise TryAgain(mypath) global _quiet if _quiet: out = None else: out = portage.output.EOutput() if waiting_msg is None: if isinstance(mypath, int): waiting_msg = _("waiting for lock on fd %i") % myfd else: waiting_msg = _("waiting for lock on %s") % lockfilename if out is not None: out.ebegin(waiting_msg) # try for the exclusive lock now. enolock_msg_shown = False while True: try: locking_method(myfd, fcntl.LOCK_EX) except EnvironmentError as e: if e.errno == errno.ENOLCK: # This is known to occur on Solaris NFS (see # bug #462694). Assume that the error is due # to temporary exhaustion of record locks, # and loop until one becomes available. if not enolock_msg_shown: enolock_msg_shown = True if isinstance(mypath, int): context_desc = (_("Error while waiting " "to lock fd %i") % myfd) else: context_desc = (_("Error while waiting " "to lock '%s'") % lockfilename) writemsg("\n!!! %s: %s\n" % (context_desc, e), noiselevel=-1) time.sleep(_HARDLINK_POLL_LATENCY) continue if out is not None: out.eend(1, str(e)) raise else: break if out is not None: out.eend(os.EX_OK) elif e.errno in (errno.ENOSYS, ): # We're not allowed to lock on this FS. if not isinstance(lockfilename, int): # If a file object was passed in, it's not safe # to close the file descriptor because it may # still be in use. os.close(myfd) lockfilename_path = _unicode_decode(lockfilename_path, encoding=_encodings["fs"], errors="strict") if not isinstance(lockfilename_path, str): raise link_success = hardlink_lockfile(lockfilename_path, waiting_msg=waiting_msg, flags=flags) if not link_success: raise lockfilename = lockfilename_path locking_method = None myfd = HARDLINK_FD else: raise fstat_result = None if isinstance(lockfilename, str) and myfd != HARDLINK_FD and unlinkfile: try: (removed, fstat_result) = _lockfile_was_removed(myfd, lockfilename) except Exception: # Do not leak the file descriptor here. os.close(myfd) raise else: if removed: # Removed by previous lock holder... Caller will retry... os.close(myfd) return None if myfd != HARDLINK_FD: _lock_manager(myfd, os.fstat(myfd) if fstat_result is None else fstat_result, mypath) writemsg(str((lockfilename, myfd, unlinkfile)) + "\n", 1) return (lockfilename, myfd, unlinkfile, locking_method)
def write(self, sign=False, force=False): """ Write Manifest instance to disk, optionally signing it. Returns True if the Manifest is actually written, and False if the write is skipped due to existing Manifest being identical.""" rval = False if not self.allow_create: return rval self.checkIntegrity() try: myentries = list(self._createManifestEntries()) update_manifest = True preserved_stats = {} preserved_stats[self.pkgdir.rstrip(os.sep)] = os.stat(self.pkgdir) if myentries and not force: try: f = io.open(_unicode_encode(self.getFullname(), encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['repo.content'], errors='replace') oldentries = list(self._parseManifestLines(f)) preserved_stats[self.getFullname()] = os.fstat(f.fileno()) f.close() if len(oldentries) == len(myentries): update_manifest = False for i in range(len(oldentries)): if oldentries[i] != myentries[i]: update_manifest = True break except (IOError, OSError) as e: if e.errno == errno.ENOENT: pass else: raise if update_manifest: if myentries or not (self.thin or self.allow_missing): # If myentries is empty, don't write an empty manifest # when thin or allow_missing is enabled. Except for # thin manifests with no DIST entries, myentries is # non-empty for all currently known use cases. write_atomic( self.getFullname(), "".join("%s\n" % str(myentry) for myentry in myentries)) self._apply_max_mtime(preserved_stats, myentries) rval = True else: # With thin manifest, there's no need to have # a Manifest file if there are no DIST entries. try: os.unlink(self.getFullname()) except OSError as e: if e.errno != errno.ENOENT: raise rval = True if sign: self.sign() except (IOError, OSError) as e: if e.errno == errno.EACCES: raise PermissionDenied(str(e)) raise return rval
def _setitem(self, cpv, values): if "_eclasses_" in values: values = ProtectedDict(values) values["INHERITED"] = ' '.join(sorted(values["_eclasses_"])) new_content = [] for k in self.auxdbkey_order: new_content.append(values.get(k, '')) new_content.append('\n') for i in range(magic_line_count - len(self.auxdbkey_order)): new_content.append('\n') new_content = ''.join(new_content) new_content = _unicode_encode(new_content, _encodings['repo.content'], errors='backslashreplace') new_fp = os.path.join(self.location, cpv) try: f = open(_unicode_encode(new_fp, encoding=_encodings['fs'], errors='strict'), 'rb') except EnvironmentError: pass else: try: try: existing_st = os.fstat(f.fileno()) existing_content = f.read() finally: f.close() except EnvironmentError: pass else: existing_mtime = existing_st[stat.ST_MTIME] if values['_mtime_'] == existing_mtime and \ existing_content == new_content: return if self.raise_stat_collision and \ values['_mtime_'] == existing_mtime and \ len(new_content) == existing_st.st_size: raise cache_errors.StatCollision(cpv, new_fp, existing_mtime, existing_st.st_size) s = cpv.rfind("/") fp = os.path.join(self.location,cpv[:s], ".update.%i.%s" % (os.getpid(), cpv[s+1:])) try: myf = open(_unicode_encode(fp, encoding=_encodings['fs'], errors='strict'), 'wb') except EnvironmentError as e: if errno.ENOENT == e.errno: try: self._ensure_dirs(cpv) myf = open(_unicode_encode(fp, encoding=_encodings['fs'], errors='strict'), 'wb') except EnvironmentError as e: raise cache_errors.CacheCorruption(cpv, e) else: raise cache_errors.CacheCorruption(cpv, e) try: myf.write(new_content) finally: myf.close() self._ensure_access(fp, mtime=values["_mtime_"]) try: os.rename(fp, new_fp) except EnvironmentError as e: try: os.unlink(fp) except EnvironmentError: pass raise cache_errors.CacheCorruption(cpv, e)