def _unlinkpaths(paths): """silent, best-effort unlink""" for path in paths: try: util.unlink(path) except OSError: pass
def _cleanuptemppacks(ui, packpath): """In some situations, temporary pack files are left around unecessarily using disk space. We've even seen cases where some users had 170GB+ worth of these. Let's remove these. """ extensions = [ datapack.PACKSUFFIX, datapack.INDEXSUFFIX, historypack.PACKSUFFIX, historypack.INDEXSUFFIX, ] def _shouldhold(f): """Newish files shouldn't be removed as they could be used by another running command. """ if os.path.isdir(f) or os.path.basename(f) == "repacklock": return True try: stat = os.lstat(f) except OSError: # If we can't access the file, it's either being removed, or we # don't have access to it, either way there is nothing we can do # about it, ignore them. return True return time.gmtime(stat.st_atime + 24 * 3600) > time.gmtime() with progress.spinner(ui, _("cleaning old temporary files")): try: for f in os.listdir(packpath): f = os.path.join(packpath, f) if _shouldhold(f): continue __, ext = os.path.splitext(f) if ext not in extensions: try: util.unlink(f) except Exception: pass except OSError as ex: if ex.errno != errno.ENOENT: raise
def _cleanupoldpacks(ui, packpath, limit): """Enforce a size limit on the cache. Packfiles will be removed oldest first, with the asumption that old packfiles contains less useful data than new ones. """ with progress.spinner(ui, _("cleaning old packs")): def _mtime(f): stat = util.lstat(f) return stat.st_mtime def _listpackfiles(path): packs = [] try: for f in os.listdir(path): _, ext = os.path.splitext(f) if ext.endswith("pack"): packs.append(os.path.join(packpath, f)) except OSError as ex: if ex.errno != errno.ENOENT: raise return packs files = sorted(_listpackfiles(packpath), key=_mtime, reverse=True) cachesize = 0 for f in files: stat = os.lstat(f) cachesize += stat.st_size while cachesize > limit: f = files.pop() stat = util.lstat(f) # Dont't remove files that are newer than 10 minutes. This will # avoid a race condition where mercurial downloads files from the # network and expect these to be present on disk. If the 'limit' is # properly set, we should have removed enough files that this # condition won't matter. if time.gmtime(stat.st_mtime + 10 * 60) > time.gmtime(): return root, ext = os.path.splitext(f) try: if ext == datapack.PACKSUFFIX: util.unlink(root + datapack.INDEXSUFFIX) else: util.unlink(root + historypack.INDEXSUFFIX) except OSError as ex: if ex.errno != errno.ENOENT: raise try: util.unlink(f) except OSError as ex: if ex.errno != errno.ENOENT: raise cachesize -= stat.st_size
def checkouttosnapshotmetadata(ui, repo, snapmetadata, clean=True): def checkaddfile(store, file, vfs, clean): if not clean and vfs.exists(file.path): ui.note(_("skip adding %s, it exists\n") % file.path) return ui.note(_("will add %s\n") % file.path) vfs.write(file.path, file.getcontent(store)) # deleting files that should be missing for file in snapmetadata.deleted: try: ui.note(_("will delete %s\n") % file.path) util.unlink(repo.wjoin(file.path)) except OSError: ui.warn(_("%s cannot be removed\n") % file.path) # populating the untracked files for file in snapmetadata.unknown: checkaddfile(repo.svfs.snapshotstore, file, repo.wvfs, clean) # restoring the merge state with repo.wlock(): for file in snapmetadata.localvfsfiles: checkaddfile(repo.svfs.snapshotstore, file, repo.localvfs, clean)
def applytomirrors(repo, status, sourcepath, mirrors, action): """Applies the changes that are in the sourcepath to all the mirrors.""" mirroredfiles = set() # Detect which mirror this file comes from sourcemirror = None for mirror in mirrors: if sourcepath.startswith(mirror): sourcemirror = mirror break if not sourcemirror: raise error.Abort( _("unable to detect source mirror of '%s'") % (sourcepath, )) relpath = sourcepath[len(sourcemirror):] # Apply the change to each mirror one by one allchanges = set(status.modified + status.removed + status.added) for mirror in mirrors: if mirror == sourcemirror: continue mirrorpath = mirror + relpath mirroredfiles.add(mirrorpath) if mirrorpath in allchanges: wctx = repo[None] if (sourcepath not in wctx and mirrorpath not in wctx and sourcepath in status.removed and mirrorpath in status.removed): if repo.ui.verbose: repo.ui.status( _("not mirroring remove of '%s' to '%s';" " it is already removed\n") % (sourcepath, mirrorpath)) continue if wctx[sourcepath].data() == wctx[mirrorpath].data(): if repo.ui.verbose: repo.ui.status( _("not mirroring '%s' to '%s'; it already " "matches\n") % (sourcepath, mirrorpath)) continue raise error.Abort( _("path '%s' needs to be mirrored to '%s', but " "the target already has pending changes") % (sourcepath, mirrorpath)) fullsource = repo.wjoin(sourcepath) fulltarget = repo.wjoin(mirrorpath) dirstate = repo.dirstate if action == "m" or action == "a": mirrorpathdir, unused = util.split(mirrorpath) util.makedirs(repo.wjoin(mirrorpathdir)) util.copyfile(fullsource, fulltarget) if dirstate[mirrorpath] in "?r": dirstate.add(mirrorpath) if action == "a": # For adds, detect copy data as well copysource = dirstate.copied(sourcepath) if copysource and copysource.startswith(sourcemirror): mirrorcopysource = mirror + copysource[len(sourcemirror):] dirstate.copy(mirrorcopysource, mirrorpath) repo.ui.status( _("mirrored copy '%s -> %s' to '%s -> %s'\n") % (copysource, sourcepath, mirrorcopysource, mirrorpath)) else: repo.ui.status( _("mirrored adding '%s' to '%s'\n") % (sourcepath, mirrorpath)) else: repo.ui.status( _("mirrored changes in '%s' to '%s'\n") % (sourcepath, mirrorpath)) elif action == "r": try: util.unlink(fulltarget) except OSError as e: if e.errno == errno.ENOENT: repo.ui.status( _("not mirroring remove of '%s' to '%s'; it " "is already removed\n") % (sourcepath, mirrorpath)) else: raise else: dirstate.remove(mirrorpath) repo.ui.status( _("mirrored remove of '%s' to '%s'\n") % (sourcepath, mirrorpath)) return mirroredfiles