def openlfdirstate(ui, repo): ''' Return a dirstate object that tracks largefiles: i.e. its root is the repo root, but it is saved in .hg/largefiles/dirstate. ''' admin = repo.join(longname) opener = scmutil.opener(admin) lfdirstate = largefiles_dirstate(opener, ui, repo.root, repo.dirstate._validate) # If the largefiles dirstate does not exist, populate and create # it. This ensures that we create it on the first meaningful # largefiles operation in a new clone. It also gives us an easy # way to forcibly rebuild largefiles state: # rm .hg/largefiles/dirstate && hg status # Or even, if things are really messed up: # rm -rf .hg/largefiles && hg status if not os.path.exists(os.path.join(admin, 'dirstate')): util.makedirs(admin) matcher = getstandinmatcher(repo) for standin in dirstate_walk(repo.dirstate, matcher): lfile = splitstandin(standin) hash = readstandin(repo, lfile) lfdirstate.normallookup(lfile) try: if hash == hashfile(lfile): lfdirstate.normal(lfile) except IOError, err: if err.errno != errno.ENOENT: raise lfdirstate.write()
def _move_pending(ui, repo, bfdirstate, ctx, standin, filename): ''' Update bfiles administrative area (.hg/bfiles) to reflect a commit that affects the big file filename (tracked by standin). Specifically: - if the big file was added/modified by this changeset, move the pending revision from .hg/bfiles/pending to .hg/bfiles/committed (reflects state change from pending/uncommitted to pending/committed) - if the big file was removed by this changeset, remove it from .hg/bfiles/dirstate ''' try: fctx = ctx[standin] except error.LookupError: # Standin file not in this changeset: it was removed. Make # sure the bfiles dirstate no longer tracks it. dirstate_drop(bfdirstate, _split_standin(standin)) return hash = fctx.data()[0:40] pending = repo.join(os.path.join('bfiles', 'pending', filename, hash)) if os.path.exists(pending): committed = repo.join(os.path.join( 'bfiles', 'committed', filename, hash)) util.makedirs(os.path.dirname(committed)) ui.debug('moving %s -> %s\n' % (pending, committed)) os.rename(pending, committed) try: os.removedirs(os.path.dirname(pending)) except OSError: # probably not empty, so ignore it pass
def openlfdirstate(ui, repo): ''' Return a dirstate object that tracks largefiles: i.e. its root is the repo root, but it is saved in .hg/largefiles/dirstate. ''' admin = repo.join(longname) opener = scmutil.opener(admin) lfdirstate = largefiles_dirstate(opener, ui, repo.root, repo.dirstate._validate) # If the largefiles dirstate does not exist, populate and create # it. This ensures that we create it on the first meaningful # largefiles operation in a new clone. if not os.path.exists(os.path.join(admin, 'dirstate')): util.makedirs(admin) matcher = getstandinmatcher(repo) for standin in dirstate_walk(repo.dirstate, matcher): lfile = splitstandin(standin) hash = readstandin(repo, lfile) lfdirstate.normallookup(lfile) try: if hash == hashfile(repo.wjoin(lfile)): lfdirstate.normal(lfile) except OSError, err: if err.errno != errno.ENOENT: raise
def putlfile(repo, proto, sha): '''Server command for putting a largefile into a repository's local store and into the user cache.''' with proto.mayberedirectstdio() as output: path = lfutil.storepath(repo, sha) util.makedirs(os.path.dirname(path)) tmpfp = util.atomictempfile(path, createmode=repo.store.createmode) try: for p in proto.getpayload(): tmpfp.write(p) tmpfp._fp.seek(0) if sha != lfutil.hexsha1(tmpfp._fp): raise IOError(0, _(b'largefile contents do not match hash')) tmpfp.close() lfutil.linktousercache(repo, sha) except IOError as e: repo.ui.warn( _(b'largefiles: failed to put %s into store: %s\n') % (sha, e.strerror)) return wireprototypes.pushres(1, output.getvalue() if output else b'') finally: tmpfp.discard() return wireprototypes.pushres(0, output.getvalue() if output else b'')
def putlfile(repo, proto, sha): '''Server command for putting a largefile into a repository's local store and into the user cache.''' proto.redirect() path = lfutil.storepath(repo, sha) util.makedirs(os.path.dirname(path)) tmpfp = util.atomictempfile(path, createmode=repo.store.createmode) try: proto.getfile(tmpfp) tmpfp._fp.seek(0) if sha != lfutil.hexsha1(tmpfp._fp): raise IOError(0, _('largefile contents do not match hash')) tmpfp.close() lfutil.linktousercache(repo, sha) except IOError as e: repo.ui.warn( _('largefiles: failed to put %s into store: %s\n') % (sha, e.strerror)) return wireproto.pushres(1) finally: tmpfp.discard() return wireproto.pushres(0)
def _gethash(self, filename, hash): """Get file with the provided hash and store it in the local repo's store and in the usercache. filename is for informational messages only. """ util.makedirs(lfutil.storepath(self.repo, '')) storefilename = lfutil.storepath(self.repo, hash) tmpname = storefilename + '.tmp' with util.atomictempfile( tmpname, createmode=self.repo.store.createmode) as tmpfile: try: gothash = self._getfile(tmpfile, filename, hash) except StoreError as err: self.ui.warn(err.longmessage()) gothash = "" if gothash != hash: if gothash != "": self.ui.warn( _('%s: data corruption (expected %s, got %s)\n') % (filename, hash, gothash)) util.unlink(tmpname) return False util.rename(tmpname, storefilename) lfutil.linktousercache(self.repo, hash) return True
def putlfile(repo, proto, sha): '''Put a largefile into a repository's local store and into the user cache.''' proto.redirect() path = lfutil.storepath(repo, sha) util.makedirs(os.path.dirname(path)) tmpfp = util.atomictempfile(path, createmode=repo.store.createmode) try: try: proto.getfile(tmpfp) tmpfp._fp.seek(0) if sha != lfutil.hexsha1(tmpfp._fp): raise IOError(0, _('largefile contents do not match hash')) tmpfp.close() lfutil.linktousercache(repo, sha) except IOError, e: repo.ui.warn(_('largefiles: failed to put %s into store: %s\n') % (sha, e.strerror)) return wireproto.pushres(1) finally: tmpfp.discard() return wireproto.pushres(0)
def annotatecontext(repo, path, opts=defaultopts, rebuild=False): """context needed to perform (fast) annotate on a file an annotatecontext of a single file consists of two structures: the linelog and the revmap. this function takes care of locking. only 1 process is allowed to write that file's linelog and revmap at a time. when something goes wrong, this function will assume the linelog and the revmap are in a bad state, and remove them from disk. use this function in the following way: with annotatecontext(...) as actx: actx. .... """ helper = pathhelper(repo, path, opts) util.makedirs(helper.dirname) revmappath = helper.revmappath linelogpath = helper.linelogpath actx = None try: with helper.lock(): actx = _annotatecontext(repo, path, linelogpath, revmappath, opts) if rebuild: actx.rebuild() yield actx except Exception: if actx is not None: actx.rebuild() repo.ui.debug(b'fastannotate: %s: cache broken and deleted\n' % path) raise finally: if actx is not None: actx.close()
def _gethash(self, filename, hash): """Get file with the provided hash and store it in the local repo's store and in the usercache. filename is for informational messages only. """ util.makedirs(lfutil.storepath(self.repo, '')) storefilename = lfutil.storepath(self.repo, hash) tmpname = storefilename + '.tmp' tmpfile = util.atomictempfile( tmpname, createmode=self.repo.store.createmode) try: gothash = self._getfile(tmpfile, filename, hash) except StoreError as err: self.ui.warn(err.longmessage()) gothash = "" tmpfile.close() if gothash != hash: if gothash != "": self.ui.warn( _('%s: data corruption (expected %s, got %s)\n') % (filename, hash, gothash)) util.unlink(tmpname) return False util.rename(tmpname, storefilename) lfutil.linktousercache(self.repo, hash) return True
def _updatebigrepo(ui, repo, files, brepo, bigfiles, ds): for file in files: f = repo.wjoin(file) hash = accelerated_hash(repo, file, os.lstat(f), ds) bigfiles[file] = hash rf = "%s/%s.%s" % (brepo, file, hash) util.makedirs(os.path.dirname(rf)) try: ext = f.split('.')[-1] dont_pack=['gz', 'zip', 'tgz', '7z', 'jpg', 'jpeg', 'gif', 'mpg', 'mpeg', 'avi', 'rar', 'cab'] if ext in dont_pack: util.copyfile(f, rf) else: fo = open(f, 'rb') rfo_fileobj = open(rf+'.gz', 'wb') rfo = gzip.GzipFile(file+'.'+hash, 'wb', 9, rfo_fileobj) def read10Mb(): return fo.read(1024*1024*10) for chunk in iter(read10Mb, ''): rfo.write(chunk) fo.close() rfo.close() rfo_fileobj.close() except: ui.write(_('failed to store %s\n') % f)
def copytostoreabsolute(repo, file, hash): util.makedirs(os.path.dirname(storepath(repo, hash))) if inusercache(repo.ui, hash): link(usercachepath(repo.ui, hash), storepath(repo, hash)) else: shutil.copyfile(file, storepath(repo, hash)) os.chmod(storepath(repo, hash), os.stat(file).st_mode) linktousercache(repo, hash)
def get(self, files): '''Get the specified big files from the store and write to local files under repo.root. files is a list of (filename, hash) tuples. Return (success, missing), lists of files successfuly downloaded and those not found in the store. success is a list of (filename, hash) tuples; missing is a list of filenames that we could not get. (The detailed error message will already have been presented to the user, so missing is just supplied as a summary.)''' success = [] missing = [] ui = self.ui at = 0 for filename, hash in files: ui.progress(_('Getting kbfiles'), at, unit='kbfile', total=len(files)) at += 1 ui.note(_('getting %s\n') % filename) outfilename = self.repo.wjoin(filename) destdir = os.path.dirname(outfilename) util.makedirs(destdir) if not os.path.isdir(destdir): self.abort( error.RepoError( _('cannot create dest directory %s') % destdir)) # No need to pass mode='wb' to fdopen(), since mkstemp() already # opened the file in binary mode. (tmpfd, tmpfilename) = tempfile.mkstemp(dir=destdir, prefix=os.path.basename(filename)) tmpfile = os.fdopen(tmpfd, 'w') try: bhash = self._getfile(tmpfile, filename, hash) except StoreError, err: ui.warn(err.longmessage()) os.remove(tmpfilename) missing.append(filename) continue hhash = binascii.hexlify(bhash) if hhash != hash: ui.warn( _('%s: data corruption (expected %s, got %s)\n') % (filename, hash, hhash)) os.remove(tmpfilename) missing.append(filename) else: if os.path.exists(outfilename): # for windows os.remove(outfilename) os.rename(tmpfilename, outfilename) bfutil.copy_to_cache(self.repo, self.repo['.'].node(), filename, True) success.append((filename, hhash))
def put(self, source, filename, hash): destdir = os.path.join(self.url, filename) dest = os.path.join(destdir, hash) if os.path.exists(dest): # No big deal: this could happen if someone restores a big # file to a previous revision. return util.makedirs(destdir) shutil.copy(source, dest)
def get(self, files): '''Get the specified largefiles from the store and write to local files under repo.root. files is a list of (filename, hash) tuples. Return (success, missing), lists of files successfully downloaded and those not found in the store. success is a list of (filename, hash) tuples; missing is a list of filenames that we could not get. (The detailed error message will already have been presented to the user, so missing is just supplied as a summary.)''' success = [] missing = [] ui = self.ui util.makedirs(lfutil.storepath(self.repo, '')) at = 0 available = self.exists(set(hash for (_filename, hash) in files)) for filename, hash in files: ui.progress(_('getting largefiles'), at, unit='lfile', total=len(files)) at += 1 ui.note(_('getting %s:%s\n') % (filename, hash)) if not available.get(hash): ui.warn( _('%s: largefile %s not available from %s\n') % (filename, hash, self.url)) missing.append(filename) continue storefilename = lfutil.storepath(self.repo, hash) tmpfile = util.atomictempfile( storefilename + '.tmp', createmode=self.repo.store.createmode) try: hhash = self._getfile(tmpfile, filename, hash) except StoreError, err: ui.warn(err.longmessage()) hhash = "" tmpfile.close() if hhash != hash: if hhash != "": ui.warn( _('%s: data corruption (expected %s, got %s)\n') % (filename, hash, hhash)) util.unlink(storefilename + '.tmp') missing.append(filename) continue util.rename(storefilename + '.tmp', storefilename) lfutil.linktousercache(self.repo, hash) success.append((filename, hhash))
def get(self, files): '''Get the specified big files from the store and write to local files under repo.root. files is a list of (filename, hash) tuples. Return (success, missing), lists of files successfuly downloaded and those not found in the store. success is a list of (filename, hash) tuples; missing is a list of filenames that we could not get. (The detailed error message will already have been presented to the user, so missing is just supplied as a summary.)''' success = [] missing = [] ui = self.ui at = 0 for filename, hash in files: ui.progress(_('getting kbfiles'), at, unit='kbfile', total=len(files)) at += 1 ui.note(_('getting %s\n') % filename) outfilename = self.repo.wjoin(filename) destdir = os.path.dirname(outfilename) util.makedirs(destdir) if not os.path.isdir(destdir): self.abort(error.RepoError(_('cannot create dest directory %s') % destdir)) # No need to pass mode='wb' to fdopen(), since mkstemp() already # opened the file in binary mode. (tmpfd, tmpfilename) = tempfile.mkstemp( dir=destdir, prefix=os.path.basename(filename)) tmpfile = os.fdopen(tmpfd, 'w') try: bhash = self._getfile(tmpfile, filename, hash) except StoreError, err: tmpfile.close() ui.warn(err.longmessage()) os.remove(tmpfilename) missing.append(filename) continue hhash = binascii.hexlify(bhash) if hhash != hash: ui.warn(_('%s: data corruption (expected %s, got %s)\n') % (filename, hash, hhash)) os.remove(tmpfilename) missing.append(filename) else: if os.path.exists(outfilename): # for windows os.remove(outfilename) os.rename(tmpfilename, outfilename) bfutil.copytocache(self.repo, self.repo['.'].node(), filename, True) success.append((filename, hhash))
def link(src, dest): """Try to create hardlink - if that fails, efficiently make a copy.""" util.makedirs(os.path.dirname(dest)) try: util.oslink(src, dest) except OSError: # if hardlinks fail, fallback on atomic copy with open(src, 'rb') as srcf, util.atomictempfile(dest) as dstf: for chunk in util.filechunkiter(srcf): dstf.write(chunk) os.chmod(dest, os.stat(src).st_mode)
def copytostoreabsolute(repo, file, hash): if inusercache(repo.ui, hash): link(usercachepath(repo.ui, hash), storepath(repo, hash)) elif not getattr(repo, "_isconverting", False): util.makedirs(os.path.dirname(storepath(repo, hash))) dst = util.atomictempfile(storepath(repo, hash), createmode=repo.store.createmode) for chunk in util.filechunkiter(open(file, 'rb')): dst.write(chunk) dst.close() linktousercache(repo, hash)
def findfile(repo, hash): if instore(repo, hash): repo.ui.note(_('Found %s in store\n') % hash) return storepath(repo, hash) elif inusercache(repo.ui, hash): repo.ui.note(_('Found %s in system cache\n') % hash) path = storepath(repo, hash) util.makedirs(os.path.dirname(path)) link(usercachepath(repo.ui, hash), path) return path return None
def copytostoreabsolute(repo, file, hash): if inusercache(repo.ui, hash): link(usercachepath(repo.ui, hash), storepath(repo, hash)) else: util.makedirs(os.path.dirname(storepath(repo, hash))) dst = util.atomictempfile(storepath(repo, hash), createmode=repo.store.createmode) for chunk in util.filechunkiter(open(file, 'rb')): dst.write(chunk) dst.close() linktousercache(repo, hash)
def link(src, dest): util.makedirs(os.path.dirname(dest)) try: util.oslink(src, dest) except OSError: # if hardlinks fail, fallback on atomic copy dst = util.atomictempfile(dest) for chunk in util.filechunkiter(open(src, 'rb')): dst.write(chunk) dst.close() os.chmod(dest, os.stat(src).st_mode)
def copytostoreabsolute(repo, file, hash): if inusercache(repo.ui, hash): link(usercachepath(repo.ui, hash), storepath(repo, hash)) else: util.makedirs(os.path.dirname(storepath(repo, hash))) with open(file, 'rb') as srcf: with util.atomictempfile(storepath(repo, hash), createmode=repo.store.createmode) as dstf: for chunk in util.filechunkiter(srcf): dstf.write(chunk) linktousercache(repo, hash)
def copytostoreabsolute(repo, file, hash): util.makedirs(os.path.dirname(storepath(repo, hash))) if inusercache(repo.ui, hash): link(usercachepath(repo.ui, hash), storepath(repo, hash)) else: dst = util.atomictempfile(storepath(repo, hash)) for chunk in util.filechunkiter(open(file, 'rb')): dst.write(chunk) dst.close() util.copymode(file, storepath(repo, hash)) linktousercache(repo, hash)
def copyfromcache(repo, hash, filename): '''Copy the specified largefile from the repo or system cache to filename in the repository. Return true on success or false if the file was not found in either cache (which should not happened: this is meant to be called only after ensuring that the needed largefile exists in the cache).''' path = findfile(repo, hash) if path is None: return False util.makedirs(os.path.dirname(repo.wjoin(filename))) shutil.copy(path, repo.wjoin(filename)) return True
def link(src, dest): """Try to create hardlink - if that fails, efficiently make a copy.""" util.makedirs(os.path.dirname(dest)) try: util.oslink(src, dest) except OSError: # if hardlinks fail, fallback on atomic copy with open(src, 'rb') as srcf: with util.atomictempfile(dest) as dstf: for chunk in util.filechunkiter(srcf): dstf.write(chunk) os.chmod(dest, os.stat(src).st_mode)
def writehash(hash, filename, executable): util.makedirs(os.path.dirname(filename)) if os.path.exists(filename): os.unlink(filename) wfile = open(filename, 'wb') try: wfile.write(hash) wfile.write('\n') finally: wfile.close() if os.path.exists(filename): os.chmod(filename, getmode(executable))
def _lockflock(self): """the same as 'lock' but use flock instead of lockmod.lock, to avoid creating temporary symlinks.""" import fcntl lockpath = self.linelogpath util.makedirs(os.path.dirname(lockpath)) lockfd = os.open(lockpath, os.O_RDONLY | os.O_CREAT, 0o664) fcntl.flock(lockfd, fcntl.LOCK_EX) try: yield finally: fcntl.flock(lockfd, fcntl.LOCK_UN) os.close(lockfd)
def get(self, files): '''Get the specified largefiles from the store and write to local files under repo.root. files is a list of (filename, hash) tuples. Return (success, missing), lists of files successfully downloaded and those not found in the store. success is a list of (filename, hash) tuples; missing is a list of filenames that we could not get. (The detailed error message will already have been presented to the user, so missing is just supplied as a summary.)''' success = [] missing = [] ui = self.ui util.makedirs(lfutil.storepath(self.repo, '')) at = 0 available = self.exists(set(hash for (_filename, hash) in files)) for filename, hash in files: ui.progress(_('getting largefiles'), at, unit='lfile', total=len(files)) at += 1 ui.note(_('getting %s:%s\n') % (filename, hash)) if not available.get(hash): ui.warn(_('%s: largefile %s not available from %s\n') % (filename, hash, self.url)) missing.append(filename) continue storefilename = lfutil.storepath(self.repo, hash) tmpfile = util.atomictempfile(storefilename + '.tmp', createmode=self.repo.store.createmode) try: hhash = self._getfile(tmpfile, filename, hash) except StoreError, err: ui.warn(err.longmessage()) hhash = "" tmpfile.close() if hhash != hash: if hhash != "": ui.warn(_('%s: data corruption (expected %s, got %s)\n') % (filename, hash, hhash)) util.unlink(storefilename + '.tmp') missing.append(filename) continue util.rename(storefilename + '.tmp', storefilename) lfutil.linktousercache(self.repo, hash) success.append((filename, hhash))
def _gethash(self, filename, hash): """Get file with the provided hash and store it in the local repo's store and in the usercache. filename is for informational messages only. """ util.makedirs(lfutil.storepath(self.repo, "")) storefilename = lfutil.storepath(self.repo, hash) tmpname = storefilename + ".tmp" tmpfile = util.atomictempfile(tmpname, createmode=self.repo.store.createmode) try: gothash = self._getfile(tmpfile, filename, hash) except StoreError, err: self.ui.warn(err.longmessage()) gothash = ""
def openbfdirstate(ui, repo): """ Return a dirstate object that tracks big files: i.e. its root is the repo root, but it is saved in .hg/bfiles/dirstate. """ admin = repo.join(longname) try: # Mercurial >= 1.9 opener = scmutil.opener(admin) except ImportError: # Mercurial <= 1.8 opener = util.opener(admin) if hasattr(repo.dirstate, "_validate"): bfdirstate = dirstate.dirstate(opener, ui, repo.root, repo.dirstate._validate) else: bfdirstate = dirstate.dirstate(opener, ui, repo.root) # If the bfiles dirstate does not exist, populate and create it. This # ensures that we create it on the first meaningful bfiles operation in # a new clone. It also gives us an easy way to forcibly rebuild bfiles # state: # rm .hg/bfiles/dirstate && hg bfstatus # Or even, if things are really messed up: # rm -rf .hg/bfiles && hg bfstatus # (although that can lose data, e.g. pending big file revisions in # .hg/bfiles/{pending,committed}). if not os.path.exists(os.path.join(admin, "dirstate")): util.makedirs(admin) matcher = getstandinmatcher(repo) for standin in dirstate_walk(repo.dirstate, matcher): bigfile = splitstandin(standin) hash = readstandin(repo, standin) try: curhash = hashfile(bigfile) except IOError, err: if err.errno == errno.ENOENT: dirstate_normaldirty(bfdirstate, bigfile) else: raise else: if curhash == hash: bfdirstate.normal(unixpath(bigfile)) else: dirstate_normaldirty(bfdirstate, bigfile) bfdirstate.write()
def _gethash(self, filename, hash): """Get file with the provided hash and store it in the local repo's store and in the usercache. filename is for informational messages only. """ util.makedirs(lfutil.storepath(self.repo, '')) storefilename = lfutil.storepath(self.repo, hash) tmpname = storefilename + '.tmp' tmpfile = util.atomictempfile(tmpname, createmode=self.repo.store.createmode) try: gothash = self._getfile(tmpfile, filename, hash) except StoreError, err: self.ui.warn(err.longmessage()) gothash = ""
def _write_hash(hhash, fn, mode=None): if mode is None: mode = 0666 util.makedirs(os.path.dirname(fn)) if os.path.exists(fn): os.unlink(fn) if os.name == 'posix': # Yuck: on Unix, go through open(2) to ensure that the caller's mode is # filtered by umask() in the kernel, where it's supposed to be done. wfile = os.fdopen(os.open(fn, os.O_WRONLY|os.O_CREAT, mode), 'wb') elif os.name == 'nt': # But on Windows, use open() directly, since passing mode='wb' to os.fdopen() # does not work. (Python bug?) wfile = open(fn, 'wb') wfile.write(hhash) wfile.write('\n') wfile.close()
def do_bfput(self): """respond to the bfput command: send a file """ key, fname = self.getarg() self.log('do_bfput: key=%r, fname=%r', key, fname) if os.path.exists(fname): self.log('dest file exists: returning early') self.respond('skip') return destdir = os.path.dirname(fname) try: self.log('opening dest file %r', fname) util.makedirs(destdir) fd = open(fname, "wb") except (OSError, IOError), err: self.log('error opening dest file: %s', err) self.respond('cannot create file: %s' % err) return
def writehash(hash, filename, executable): util.makedirs(os.path.dirname(filename)) if os.path.exists(filename): os.unlink(filename) if os.name == "posix": # Yuck: on Unix, go through open(2) to ensure that the caller's mode is # filtered by umask() in the kernel, where it's supposed to be done. wfile = os.fdopen(os.open(filename, os.O_WRONLY | os.O_CREAT, getmode(executable)), "wb") else: # But on Windows, use open() directly, since passing mode='wb' to # os.fdopen() does not work. (Python bug?) wfile = open(filename, "wb") try: wfile.write(hash) wfile.write("\n") finally: wfile.close()
def flock(lockpath, description, timeout=-1): """A flock based lock object. Currently it is always non-blocking. Note that since it is flock based, you can accidentally take it multiple times within one process and the first one to be released will release all of them. So the caller needs to be careful to not create more than one instance per lock. """ # best effort lightweight lock try: import fcntl fcntl.flock except ImportError: # fallback to Mercurial lock vfs = vfsmod.vfs(os.path.dirname(lockpath)) with lockmod.lock(vfs, os.path.basename(lockpath), timeout=timeout): yield return # make sure lock file exists util.makedirs(os.path.dirname(lockpath)) with open(lockpath, 'a'): pass lockfd = os.open(lockpath, os.O_RDONLY, 0o664) start = time.time() while True: try: fcntl.flock(lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB) break except IOError as ex: if ex.errno == errno.EAGAIN: if timeout != -1 and time.time() - start > timeout: raise error.LockHeld(errno.EAGAIN, lockpath, description, '') else: time.sleep(0.05) continue raise try: yield finally: fcntl.flock(lockfd, fcntl.LOCK_UN) os.close(lockfd)
def _open_bfdirstate(ui, repo, correct=True): ''' Return a dirstate object that tracks big files: i.e. its root is the repo root, but it is saved in .hg/bfiles/dirstate. ''' admin = repo.join('bfiles') opener = scmutil.opener(admin) bfdirstate = open_dirstate(opener, ui, repo.root) # If the bfiles dirstate does not exist, populate and create it. This # ensures that we create it on the first meaningful bfiles operation in # a new clone. It also gives us an easy way to forcibly rebuild bfiles # state: # rm .hg/bfiles/dirstate && hg bfstatus # Or even, if things are really messed up: # rm -rf .hg/bfiles && hg bfstatus # (although that can lose data, e.g. pending big file revisions in # .hg/bfiles/{pending,committed}). if not os.path.exists(os.path.join(admin, 'dirstate')): util.makedirs(admin) matcher = _get_standin_matcher(repo) for standin in dirstate_walk(repo.dirstate, matcher): bigfile = _split_standin(standin) state = repo.dirstate[standin] if state == 'r': bfdirstate.remove(bigfile) continue hash = _read_standin(repo, standin) try: curhash = _hashfile(open(bigfile, 'rb')) except IOError, err: if err.errno == errno.ENOENT: bfdirstate.normallookup(bigfile) else: raise else: if curhash == hash: bfdirstate.normal(bigfile) else: bfdirstate.normallookup(bigfile) bfdirstate.write()
def open_bfdirstate(ui, repo): ''' Return a dirstate object that tracks big files: i.e. its root is the repo root, but it is saved in .hg/bfiles/dirstate. ''' admin = repo.join(long_name) opener = util.opener(admin) if hasattr(repo.dirstate, '_validate'): bfdirstate = dirstate.dirstate(opener, ui, repo.root, repo.dirstate._validate) else: bfdirstate = dirstate.dirstate(opener, ui, repo.root) # If the bfiles dirstate does not exist, populate and create it. This # ensures that we create it on the first meaningful bfiles operation in # a new clone. It also gives us an easy way to forcibly rebuild bfiles # state: # rm .hg/bfiles/dirstate && hg bfstatus # Or even, if things are really messed up: # rm -rf .hg/bfiles && hg bfstatus # (although that can lose data, e.g. pending big file revisions in # .hg/bfiles/{pending,committed}). if not os.path.exists(os.path.join(admin, 'dirstate')): util.makedirs(admin) matcher = get_standin_matcher(repo) for standin in dirstate_walk(repo.dirstate, matcher): bigfile = split_standin(standin) hash = read_standin(repo, standin) try: curhash = hashfile(bigfile) except IOError, err: if err.errno == errno.ENOENT: dirstate_normaldirty(bfdirstate, bigfile) else: raise else: if curhash == hash: bfdirstate.normal(unixpath(bigfile)) else: dirstate_normaldirty(bfdirstate, bigfile) bfdirstate.write()
def openlfdirstate(ui, repo, create=True): ''' Return a dirstate object that tracks largefiles: i.e. its root is the repo root, but it is saved in .hg/largefiles/dirstate. ''' lfstoredir = repo.join(longname) opener = scmutil.opener(lfstoredir) lfdirstate = largefilesdirstate(opener, ui, repo.root, repo.dirstate._validate) # If the largefiles dirstate does not exist, populate and create # it. This ensures that we create it on the first meaningful # largefiles operation in a new clone. if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')): util.makedirs(lfstoredir) matcher = getstandinmatcher(repo) for standin in repo.dirstate.walk(matcher, [], False, False): lfile = splitstandin(standin) lfdirstate.normallookup(lfile) return lfdirstate
def write_hash(hash, filename, executable): util.makedirs(os.path.dirname(filename)) if os.path.exists(filename): os.unlink(filename) if os.name == 'posix': # Yuck: on Unix, go through open(2) to ensure that the caller's mode is # filtered by umask() in the kernel, where it's supposed to be done. wfile = os.fdopen( os.open(filename, os.O_WRONLY | os.O_CREAT, get_mode(executable)), 'wb') else: # But on Windows, use open() directly, since passing mode='wb' to os.fdopen() # does not work. (Python bug?) wfile = open(filename, 'wb') try: wfile.write(hash) wfile.write('\n') finally: wfile.close()
def copyfromcache(repo, hash, filename): '''Copy the specified largefile from the repo or system cache to filename in the repository. Return true on success or false if the file was not found in either cache (which should not happened: this is meant to be called only after ensuring that the needed largefile exists in the cache).''' path = findfile(repo, hash) if path is None: return False util.makedirs(os.path.dirname(repo.wjoin(filename))) # The write may fail before the file is fully written, but we # don't use atomic writes in the working copy. dest = repo.wjoin(filename) with open(path, 'rb') as srcfd: with open(dest, 'wb') as destfd: gothash = copyandhash(srcfd, destfd) if gothash != hash: repo.ui.warn(_('%s: data corruption in %s with hash %s\n') % (filename, path, gothash)) util.unlink(dest) return False return True
def bigupdate(ui, repo, *pats, **opts): '''fetch files from versions directory as recorded in '.bigfiles'. Also complain about necessary files missing in the version directory''' ds = read_bigfiledirstate(ui, repo) bigfiles = parse_bigfiles(repo) tracked_gotbig, added_big, modified, removed, gotsmall, \ missinginrepo = _bigstatus(ui, repo, pats, opts, ds, bigfiles) brepo = bigfiles_repo(ui) tocopy = removed if opts['clean']: tocopy = tocopy+modified for file in tocopy: f = repo.wjoin(file) hash= bigfiles[file] rf = "%s/%s.%s" % (brepo, file, hash) ui.write(_("fetching %s\n") % rf) if not opts['dry_run']: util.makedirs(os.path.dirname(f)) if os.path.exists(f): util.unlink(f) if os.path.exists(rf): util.copyfile(rf, f) else: fo = open(f, 'wb') rfo = gzip.open(rf + '.gz', 'rb') def read10Mb(): return rfo.read(1024*1024*10) for chunk in iter(read10Mb, ''): fo.write(chunk) fo.close() rfo.close() if missinginrepo: ui.write(_("\nNeeded files missing in bigrepo %s:\n") % brepo) for file in missinginrepo: hash = bigfiles[file] ui.write("%s.%s\n" % (file, hash)) write_bigfiledirstate(ui, repo, ds)
def copyfromcache(repo, hash, filename): '''Copy the specified largefile from the repo or system cache to filename in the repository. Return true on success or false if the file was not found in either cache (which should not happened: this is meant to be called only after ensuring that the needed largefile exists in the cache).''' path = findfile(repo, hash) if path is None: return False util.makedirs(os.path.dirname(repo.wjoin(filename))) # The write may fail before the file is fully written, but we # don't use atomic writes in the working copy. dest = repo.wjoin(filename) with open(path, 'rb') as srcfd: with open(dest, 'wb') as destfd: gothash = copyandhash(srcfd, destfd) if gothash != hash: repo.ui.warn( _('%s: data corruption in %s with hash %s\n') % (filename, path, gothash)) util.unlink(dest) return False return True
def writehash(hash, filename, executable): util.makedirs(os.path.dirname(filename)) util.writefile(filename, hash + '\n') os.chmod(filename, getmode(executable))
def linktousercache(repo, hash): util.makedirs(os.path.dirname(usercachepath(repo.ui, hash))) link(storepath(repo, hash), usercachepath(repo.ui, hash))
def mkstemp(repo, prefix): '''Returns a file descriptor and a filename corresponding to a temporary file in the repo's largefiles store.''' path = repo.join(longname) util.makedirs(path) return tempfile.mkstemp(prefix=prefix, dir=path)
def linktousercache(repo, hash): path = usercachepath(repo.ui, hash) if path: util.makedirs(os.path.dirname(path)) link(storepath(repo, hash), path)
def put(self, source, hash): util.makedirs(os.path.dirname(lfutil.storepath(self.remote, hash))) if lfutil.instore(self.remote, hash): return lfutil.link(lfutil.storepath(self.repo, hash), lfutil.storepath(self.remote, hash))
def update_bfiles(ui, repo): wlock = repo.wlock() try: bfdirstate = bfutil.open_bfdirstate(ui, repo) s = bfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False, False, False) (unsure, modified, added, removed, missing, unknown, ignored, clean) = s bfiles = bfutil.list_bfiles(repo) toget = [] at = 0 updated = 0 removed = 0 printed = False if bfiles: ui.status(_('Getting changed bfiles\n')) printed = True for bfile in bfiles: at += 1 if os.path.exists(repo.wjoin(bfile)) and not os.path.exists( repo.wjoin(bfutil.standin(bfile))): os.unlink(repo.wjoin(bfile)) removed += 1 bfdirstate.forget(bfutil.unixpath(bfile)) continue expectedhash = repo[None][bfutil.standin(bfile)].data().strip() mode = os.stat(repo.wjoin(bfutil.standin(bfile))).st_mode if not os.path.exists( repo.wjoin(bfile)) or expectedhash != bfutil.hashfile( repo.wjoin(bfile)): path = bfutil.find_file(repo, expectedhash) if not path: toget.append((bfile, expectedhash)) else: util.makedirs(os.path.dirname(repo.wjoin(bfile))) shutil.copy(path, repo.wjoin(bfile)) os.chmod(repo.wjoin(bfile), mode) updated += 1 bfdirstate.normal(bfutil.unixpath(bfile)) elif os.path.exists(repo.wjoin(bfile)) and mode != os.stat( repo.wjoin(bfile)).st_mode: os.chmod(repo.wjoin(bfile), mode) updated += 1 bfdirstate.normal(bfutil.unixpath(bfile)) if toget: store = basestore._open_store(repo) (success, missing) = store.get(toget) else: success, missing = [], [] for (filename, hash) in success: mode = os.stat(repo.wjoin(bfutil.standin(filename))).st_mode os.chmod(repo.wjoin(filename), mode) updated += 1 bfdirstate.normal(bfutil.unixpath(filename)) for bfile in bfdirstate: if bfile not in bfiles: if os.path.exists(repo.wjoin(bfile)): if not printed: ui.status(_('Getting changed bfiles\n')) printed = True os.unlink(repo.wjoin(bfile)) removed += 1 bfdirstate.forget(bfutil.unixpath(bfile)) bfdirstate.write() if printed: ui.status( _('%d big files updated, %d removed\n') % (updated, removed)) finally: wlock.release()
def revert_bfiles(ui, repo): wlock = repo.wlock() try: bfdirstate = bfutil.open_bfdirstate(ui, repo) s = bfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False, False, False) (unsure, modified, added, removed, missing, unknown, ignored, clean) = s bfiles = bfutil.list_bfiles(repo) toget = [] at = 0 updated = 0 for bfile in bfiles: if not os.path.exists(repo.wjoin(bfutil.standin(bfile))): bfdirstate.remove(bfile) continue if os.path.exists( repo.wjoin(bfutil.standin(os.path.join(bfile + '.orig')))): shutil.copyfile(repo.wjoin(bfile), repo.wjoin(bfile + '.orig')) at += 1 expectedhash = repo[None][bfutil.standin(bfile)].data().strip() mode = os.stat(repo.wjoin(bfutil.standin(bfile))).st_mode if not os.path.exists( repo.wjoin(bfile)) or expectedhash != bfutil.hashfile( repo.wjoin(bfile)): path = bfutil.find_file(repo, expectedhash) if path is None: toget.append((bfile, expectedhash)) else: util.makedirs(os.path.dirname(repo.wjoin(bfile))) shutil.copy(path, repo.wjoin(bfile)) os.chmod(repo.wjoin(bfile), mode) updated += 1 if bfutil.standin(bfile) not in repo['.']: bfdirstate.add(bfutil.unixpath(bfile)) elif expectedhash == repo['.'][bfutil.standin( bfile)].data().strip(): bfdirstate.normal(bfutil.unixpath(bfile)) else: bfutil.dirstate_normaldirty(bfdirstate, bfutil.unixpath(bfile)) elif os.path.exists(repo.wjoin(bfile)) and mode != os.stat( repo.wjoin(bfile)).st_mode: os.chmod(repo.wjoin(bfile), mode) updated += 1 if bfutil.standin(bfile) not in repo['.']: bfdirstate.add(bfutil.unixpath(bfile)) elif expectedhash == repo['.'][bfutil.standin( bfile)].data().strip(): bfdirstate.normal(bfutil.unixpath(bfile)) else: bfutil.dirstate_normaldirty(bfdirstate, bfutil.unixpath(bfile)) if toget: store = basestore._open_store(repo) (success, missing) = store.get(toget) else: success, missing = [], [] for (filename, hash) in success: mode = os.stat(repo.wjoin(bfutil.standin(filename))).st_mode os.chmod(repo.wjoin(filename), mode) updated += 1 if bfutil.standin(filename) not in repo['.']: bfdirstate.add(bfutil.unixpath(filename)) elif hash == repo['.'][bfutil.standin(filename)].data().strip(): bfdirstate.normal(bfutil.unixpath(filename)) else: bfutil.dirstate_normaldirty(bfdirstate, bfutil.unixpath(filename)) removed = 0 for bfile in bfdirstate: if not os.path.exists(repo.wjoin(bfutil.standin(bfile))): if os.path.exists(repo.wjoin(bfile)): os.unlink(repo.wjoin(bfile)) removed += 1 if bfutil.standin(bfile) in repo['.']: bfdirstate.remove(bfutil.unixpath(bfile)) else: bfdirstate.forget(bfutil.unixpath(bfile)) else: state = repo.dirstate[bfutil.standin(bfile)] if state == 'n': bfdirstate.normal(bfile) elif state == 'r': bfdirstate.remove(bfile) elif state == 'a': bfdirstate.add(bfile) elif state == '?': bfdirstate.forget(bfile) bfdirstate.write() finally: wlock.release()