def cachelfiles(ui, repo, node): '''cachelfiles ensures that all largefiles needed by the specified revision are present in the repository's largefile cache. returns a tuple (cached, missing). cached is the list of files downloaded by this operation; missing is the list of files that were needed but could not be found.''' lfiles = lfutil.listlfiles(repo, node) toget = [] for lfile in lfiles: # If we are mid-merge, then we have to trust the standin that is in the # working copy to have the correct hashvalue. This is because the # original hg.merge() already updated the standin as part of the normal # merge process -- we just have to udpate the largefile to match. if (getattr(repo, "_ismerging", False) and os.path.exists(repo.wjoin(lfutil.standin(lfile)))): expectedhash = lfutil.readstandin(repo, lfile) else: expectedhash = repo[node][lfutil.standin(lfile)].data().strip() # if it exists and its hash matches, it might have been locally # modified before updating and the user chose 'local'. in this case, # it will not be in any store, so don't look for it. if ((not os.path.exists(repo.wjoin(lfile)) or expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and not lfutil.findfile(repo, expectedhash)): toget.append((lfile, expectedhash)) if toget: store = basestore._openstore(repo) ret = store.get(toget) return ret return ([], [])
def cachelfiles(ui, repo, node): """cachelfiles ensures that all largefiles needed by the specified revision are present in the repository's largefile cache. returns a tuple (cached, missing). cached is the list of files downloaded by this operation; missing is the list of files that were needed but could not be found.""" lfiles = lfutil.listlfiles(repo, node) toget = [] for lfile in lfiles: # If we are mid-merge, then we have to trust the standin that is in the # working copy to have the correct hashvalue. This is because the # original hg.merge() already updated the standin as part of the normal # merge process -- we just have to udpate the largefile to match. if getattr(repo, "_ismerging", False) and os.path.exists(repo.wjoin(lfutil.standin(lfile))): expectedhash = lfutil.readstandin(repo, lfile) else: expectedhash = repo[node][lfutil.standin(lfile)].data().strip() # if it exists and its hash matches, it might have been locally # modified before updating and the user chose 'local'. in this case, # it will not be in any store, so don't look for it. if ( not os.path.exists(repo.wjoin(lfile)) or expectedhash != lfutil.hashfile(repo.wjoin(lfile)) ) and not lfutil.findfile(repo, expectedhash): toget.append((lfile, expectedhash)) if toget: store = basestore._openstore(repo) ret = store.get(toget) return ret return ([], [])
def _updatelfile(repo, lfdirstate, lfile): '''updates a single largefile and copies the state of its standin from the repository's dirstate to its state in the lfdirstate. returns 1 if the file was modified, -1 if the file was removed, 0 if the file was unchanged, and None if the needed largefile was missing from the cache.''' ret = 0 abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): if os.path.exists(absstandin + '.orig') and os.path.exists(abslfile): shutil.copyfile(abslfile, abslfile + '.orig') expecthash = lfutil.readstandin(repo, lfile) if (expecthash != '' and (not os.path.exists(abslfile) or expecthash != lfutil.hashfile(abslfile))): if not lfutil.copyfromcache(repo, expecthash, lfile): # use normallookup() to allocate entry in largefiles dirstate, # because lack of it misleads lfilesrepo.status() into # recognition that such cache missing files are REMOVED. if lfile not in repo[None]: # not switched to normal file util.unlinkpath(abslfile, ignoremissing=True) lfdirstate.normallookup(lfile) return None # don't try to set the mode else: # Synchronize largefile dirstate to the last modified time of # the file lfdirstate.normal(lfile) ret = 1 mode = os.stat(absstandin).st_mode if mode != os.stat(abslfile).st_mode: os.chmod(abslfile, mode) ret = 1 else: # Remove lfiles for which the standin is deleted, unless the # lfile is added to the repository again. This happens when a # largefile is converted back to a normal file: the standin # disappears, but a new (normal) file appears as the lfile. if (os.path.exists(abslfile) and repo.dirstate.normalize(lfile) not in repo[None]): util.unlinkpath(abslfile) ret = -1 state = repo.dirstate[lfutil.standin(lfile)] if state == 'n': # When rebasing, we need to synchronize the standin and the largefile, # because otherwise the largefile will get reverted. But for commit's # sake, we have to mark the file as unclean. if getattr(repo, "_isrebasing", False): lfdirstate.normallookup(lfile) else: lfdirstate.normal(lfile) elif state == 'r': lfdirstate.remove(lfile) elif state == 'a': lfdirstate.add(lfile) elif state == '?': lfdirstate.drop(lfile) return ret
def overridecat(orig, ui, repo, file1, *pats, **opts): ctx = scmutil.revsingle(repo, opts.get('rev')) err = 1 notbad = set() m = scmutil.match(ctx, (file1, ) + pats, opts) origmatchfn = m.matchfn def lfmatchfn(f): if origmatchfn(f): return True lf = lfutil.splitstandin(f) if lf is None: return False notbad.add(lf) return origmatchfn(lf) m.matchfn = lfmatchfn origbadfn = m.bad def lfbadfn(f, msg): if not f in notbad: origbadfn(f, msg) m.bad = lfbadfn for f in ctx.walk(m): fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(), pathname=f) lf = lfutil.splitstandin(f) if lf is None or origmatchfn(f): # duplicating unreachable code from commands.cat data = ctx[f].data() if opts.get('decode'): data = repo.wwritedata(f, data) fp.write(data) else: hash = lfutil.readstandin(repo, lf, ctx.rev()) if not lfutil.inusercache(repo.ui, hash): store = basestore._openstore(repo) success, missing = store.get([(lf, hash)]) if len(success) != 1: raise util.Abort( _('largefile %s is not in cache and could not be ' 'downloaded') % lf) path = lfutil.usercachepath(repo.ui, hash) fpin = open(path, "rb") for chunk in util.filechunkiter(fpin, 128 * 1024): fp.write(chunk) fpin.close() fp.close() err = 0 return err
def catlfile(repo, lfile, rev, filename): hash = lfutil.readstandin(repo, lfile, rev) if not lfutil.inusercache(repo.ui, hash): store = basestore._openstore(repo) success, missing = store.get([(lfile, hash)]) if len(success) != 1: raise util.Abort(_("largefile %s is not in cache and could not be downloaded") % lfile) path = lfutil.usercachepath(repo.ui, hash) fpout = cmdutil.makefileobj(repo, filename) fpin = open(path, "rb") fpout.write(fpin.read()) fpout.close() fpin.close() return 0
def catlfile(repo, lfile, rev, filename): hash = lfutil.readstandin(repo, lfile, rev) if not lfutil.inusercache(repo.ui, hash): store = basestore._openstore(repo) success, missing = store.get([(lfile, hash)]) if len(success) != 1: raise util.Abort( _('largefile %s is not in cache and could not be downloaded') % lfile) path = lfutil.usercachepath(repo.ui, hash) fpout = cmdutil.makefileobj(repo, filename) fpin = open(path, "rb") fpout.write(fpin.read()) fpout.close() fpin.close() return 0
def overridecat(orig, ui, repo, file1, *pats, **opts): ctx = scmutil.revsingle(repo, opts.get('rev')) err = 1 notbad = set() m = scmutil.match(ctx, (file1,) + pats, opts) origmatchfn = m.matchfn def lfmatchfn(f): if origmatchfn(f): return True lf = lfutil.splitstandin(f) if lf is None: return False notbad.add(lf) return origmatchfn(lf) m.matchfn = lfmatchfn origbadfn = m.bad def lfbadfn(f, msg): if not f in notbad: origbadfn(f, msg) m.bad = lfbadfn for f in ctx.walk(m): fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(), pathname=f) lf = lfutil.splitstandin(f) if lf is None or origmatchfn(f): # duplicating unreachable code from commands.cat data = ctx[f].data() if opts.get('decode'): data = repo.wwritedata(f, data) fp.write(data) else: hash = lfutil.readstandin(repo, lf, ctx.rev()) if not lfutil.inusercache(repo.ui, hash): store = basestore._openstore(repo) success, missing = store.get([(lf, hash)]) if len(success) != 1: raise util.Abort( _('largefile %s is not in cache and could not be ' 'downloaded') % lf) path = lfutil.usercachepath(repo.ui, hash) fpin = open(path, "rb") for chunk in util.filechunkiter(fpin, 128 * 1024): fp.write(chunk) fpin.close() fp.close() err = 0 return err
def _updatelfile(repo, lfdirstate, lfile): '''updates a single largefile and copies the state of its standin from the repository's dirstate to its state in the lfdirstate. returns 1 if the file was modified, -1 if the file was removed, 0 if the file was unchanged, and None if the needed largefile was missing from the cache.''' ret = 0 abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): if os.path.exists(absstandin+'.orig'): shutil.copyfile(abslfile, abslfile+'.orig') expecthash = lfutil.readstandin(repo, lfile) if (expecthash != '' and (not os.path.exists(abslfile) or expecthash != lfutil.hashfile(abslfile))): if not lfutil.copyfromcache(repo, expecthash, lfile): return None # don't try to set the mode or update the dirstate ret = 1 mode = os.stat(absstandin).st_mode if mode != os.stat(abslfile).st_mode: os.chmod(abslfile, mode) ret = 1 else: if os.path.exists(abslfile): os.unlink(abslfile) ret = -1 state = repo.dirstate[lfutil.standin(lfile)] if state == 'n': lfdirstate.normal(lfile) elif state == 'r': lfdirstate.remove(lfile) elif state == 'a': lfdirstate.add(lfile) elif state == '?': lfdirstate.drop(lfile) return ret
def updatelfiles(ui, repo, filelist=None, printmessage=None, normallookup=False): '''Update largefiles according to standins in the working directory If ``printmessage`` is other than ``None``, it means "print (or ignore, for false) message forcibly". ''' statuswriter = lfutil.getstatuswriter(ui, repo, printmessage) wlock = repo.wlock() try: lfdirstate = lfutil.openlfdirstate(ui, repo) lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate) if filelist is not None: filelist = set(filelist) lfiles = [f for f in lfiles if f in filelist] update = {} updated, removed = 0, 0 for lfile in lfiles: abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): if (os.path.exists(absstandin + '.orig') and os.path.exists(abslfile)): shutil.copyfile(abslfile, abslfile + '.orig') util.unlinkpath(absstandin + '.orig') expecthash = lfutil.readstandin(repo, lfile) if expecthash != '': if lfile not in repo[None]: # not switched to normal file util.unlinkpath(abslfile, ignoremissing=True) # use normallookup() to allocate an entry in largefiles # dirstate to prevent lfilesrepo.status() from reporting # missing files as removed. lfdirstate.normallookup(lfile) update[lfile] = expecthash else: # Remove lfiles for which the standin is deleted, unless the # lfile is added to the repository again. This happens when a # largefile is converted back to a normal file: the standin # disappears, but a new (normal) file appears as the lfile. if (os.path.exists(abslfile) and repo.dirstate.normalize(lfile) not in repo[None]): util.unlinkpath(abslfile) removed += 1 # largefile processing might be slow and be interrupted - be prepared lfdirstate.write() if lfiles: statuswriter(_('getting changed largefiles\n')) cachelfiles(ui, repo, None, lfiles) for lfile in lfiles: update1 = 0 expecthash = update.get(lfile) if expecthash: if not lfutil.copyfromcache(repo, expecthash, lfile): # failed ... but already removed and set to normallookup continue # Synchronize largefile dirstate to the last modified # time of the file lfdirstate.normal(lfile) update1 = 1 # copy the state of largefile standin from the repository's # dirstate to its state in the lfdirstate. abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): mode = os.stat(absstandin).st_mode if mode != os.stat(abslfile).st_mode: os.chmod(abslfile, mode) update1 = 1 updated += update1 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup) lfdirstate.write() if lfiles: statuswriter(_('%d largefiles updated, %d removed\n') % (updated, removed)) finally: wlock.release()
def updatelfiles(ui, repo, filelist=None, printmessage=True, normallookup=False): wlock = repo.wlock() try: lfdirstate = lfutil.openlfdirstate(ui, repo) lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate) if filelist is not None: lfiles = [f for f in lfiles if f in filelist] update = {} updated, removed = 0, 0 for lfile in lfiles: abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): if (os.path.exists(absstandin + '.orig') and os.path.exists(abslfile)): shutil.copyfile(abslfile, abslfile + '.orig') util.unlinkpath(absstandin + '.orig') expecthash = lfutil.readstandin(repo, lfile) if (expecthash != '' and (not os.path.exists(abslfile) or expecthash != lfutil.hashfile(abslfile))): if lfile not in repo[None]: # not switched to normal file util.unlinkpath(abslfile, ignoremissing=True) # use normallookup() to allocate entry in largefiles # dirstate, because lack of it misleads # lfilesrepo.status() into recognition that such cache # missing files are REMOVED. lfdirstate.normallookup(lfile) update[lfile] = expecthash else: # Remove lfiles for which the standin is deleted, unless the # lfile is added to the repository again. This happens when a # largefile is converted back to a normal file: the standin # disappears, but a new (normal) file appears as the lfile. if (os.path.exists(abslfile) and repo.dirstate.normalize(lfile) not in repo[None]): util.unlinkpath(abslfile) removed += 1 # largefile processing might be slow and be interrupted - be prepared lfdirstate.write() if lfiles: if printmessage: ui.status(_('getting changed largefiles\n')) cachelfiles(ui, repo, None, lfiles) for lfile in lfiles: update1 = 0 expecthash = update.get(lfile) if expecthash: if not lfutil.copyfromcache(repo, expecthash, lfile): # failed ... but already removed and set to normallookup continue # Synchronize largefile dirstate to the last modified # time of the file lfdirstate.normal(lfile) update1 = 1 # copy the state of largefile standin from the repository's # dirstate to its state in the lfdirstate. abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): mode = os.stat(absstandin).st_mode if mode != os.stat(abslfile).st_mode: os.chmod(abslfile, mode) update1 = 1 updated += update1 standin = lfutil.standin(lfile) if standin in repo.dirstate: stat = repo.dirstate._map[standin] state, mtime = stat[0], stat[3] else: state, mtime = '?', -1 if state == 'n': if normallookup or mtime < 0: # state 'n' doesn't ensure 'clean' in this case lfdirstate.normallookup(lfile) else: lfdirstate.normal(lfile) elif state == 'm': lfdirstate.normallookup(lfile) elif state == 'r': lfdirstate.remove(lfile) elif state == 'a': lfdirstate.add(lfile) elif state == '?': lfdirstate.drop(lfile) lfdirstate.write() if printmessage and lfiles: ui.status( _('%d largefiles updated, %d removed\n') % (updated, removed)) finally: wlock.release()
def updatelfiles(ui, repo, filelist=None, printmessage=None, normallookup=False, checked=False): '''Update largefiles according to standins in the working directory If ``printmessage`` is other than ``None``, it means "print (or ignore, for false) message forcibly". ''' statuswriter = lfutil.getstatuswriter(ui, repo, printmessage) wlock = repo.wlock() try: lfdirstate = lfutil.openlfdirstate(ui, repo) lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate) if filelist is not None: filelist = set(filelist) lfiles = [f for f in lfiles if f in filelist] update = {} updated, removed = 0, 0 for lfile in lfiles: abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): if (os.path.exists(absstandin + '.orig') and os.path.exists(abslfile)): shutil.copyfile(abslfile, abslfile + '.orig') util.unlinkpath(absstandin + '.orig') expecthash = lfutil.readstandin(repo, lfile) if (expecthash != '' and (checked or not os.path.exists(abslfile) or expecthash != lfutil.hashfile(abslfile))): if lfile not in repo[None]: # not switched to normal file util.unlinkpath(abslfile, ignoremissing=True) # use normallookup() to allocate an entry in largefiles # dirstate, because lack of it misleads # lfilesrepo.status() into recognition that such cache # missing files are removed. lfdirstate.normallookup(lfile) update[lfile] = expecthash else: # Remove lfiles for which the standin is deleted, unless the # lfile is added to the repository again. This happens when a # largefile is converted back to a normal file: the standin # disappears, but a new (normal) file appears as the lfile. if (os.path.exists(abslfile) and repo.dirstate.normalize(lfile) not in repo[None]): util.unlinkpath(abslfile) removed += 1 # largefile processing might be slow and be interrupted - be prepared lfdirstate.write() if lfiles: statuswriter(_('getting changed largefiles\n')) cachelfiles(ui, repo, None, lfiles) for lfile in lfiles: update1 = 0 expecthash = update.get(lfile) if expecthash: if not lfutil.copyfromcache(repo, expecthash, lfile): # failed ... but already removed and set to normallookup continue # Synchronize largefile dirstate to the last modified # time of the file lfdirstate.normal(lfile) update1 = 1 # copy the state of largefile standin from the repository's # dirstate to its state in the lfdirstate. abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): mode = os.stat(absstandin).st_mode if mode != os.stat(abslfile).st_mode: os.chmod(abslfile, mode) update1 = 1 updated += update1 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup) lfdirstate.write() if lfiles: statuswriter( _('%d largefiles updated, %d removed\n') % (updated, removed)) finally: wlock.release()
def updatelfiles(ui, repo, filelist=None, printmessage=True, normallookup=False): wlock = repo.wlock() try: lfdirstate = lfutil.openlfdirstate(ui, repo) lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate) if filelist is not None: filelist = set(filelist) lfiles = [f for f in lfiles if f in filelist] update = {} updated, removed = 0, 0 for lfile in lfiles: abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): if (os.path.exists(absstandin + '.orig') and os.path.exists(abslfile)): shutil.copyfile(abslfile, abslfile + '.orig') util.unlinkpath(absstandin + '.orig') expecthash = lfutil.readstandin(repo, lfile) if (expecthash != '' and (not os.path.exists(abslfile) or expecthash != lfutil.hashfile(abslfile))): if lfile not in repo[None]: # not switched to normal file util.unlinkpath(abslfile, ignoremissing=True) # use normallookup() to allocate entry in largefiles # dirstate, because lack of it misleads # lfilesrepo.status() into recognition that such cache # missing files are REMOVED. lfdirstate.normallookup(lfile) update[lfile] = expecthash else: # Remove lfiles for which the standin is deleted, unless the # lfile is added to the repository again. This happens when a # largefile is converted back to a normal file: the standin # disappears, but a new (normal) file appears as the lfile. if (os.path.exists(abslfile) and repo.dirstate.normalize(lfile) not in repo[None]): util.unlinkpath(abslfile) removed += 1 # largefile processing might be slow and be interrupted - be prepared lfdirstate.write() if lfiles: if printmessage: ui.status(_('getting changed largefiles\n')) cachelfiles(ui, repo, None, lfiles) for lfile in lfiles: update1 = 0 expecthash = update.get(lfile) if expecthash: if not lfutil.copyfromcache(repo, expecthash, lfile): # failed ... but already removed and set to normallookup continue # Synchronize largefile dirstate to the last modified # time of the file lfdirstate.normal(lfile) update1 = 1 # copy the state of largefile standin from the repository's # dirstate to its state in the lfdirstate. abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): mode = os.stat(absstandin).st_mode if mode != os.stat(abslfile).st_mode: os.chmod(abslfile, mode) update1 = 1 updated += update1 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup) if filelist is not None: # If "local largefile" is chosen at file merging, it is # not listed in "filelist" (= dirstate syncing is # omitted), because the standin file is not changed before and # after merging. # But the status of such files may have to be changed by # merging. For example, locally modified ("M") largefile # has to become re-added("A"), if it is "normal" file in # the target revision of linear-merging. for lfile in lfdirstate: if lfile not in filelist: lfutil.synclfdirstate(repo, lfdirstate, lfile, True) lfdirstate.write() if printmessage and lfiles: ui.status(_('%d largefiles updated, %d removed\n') % (updated, removed)) finally: wlock.release()
def updatelfiles(ui, repo, filelist=None, printmessage=True): wlock = repo.wlock() try: lfdirstate = lfutil.openlfdirstate(ui, repo) lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate) if filelist is not None: lfiles = [f for f in lfiles if f in filelist] update = {} updated, removed = 0, 0 for lfile in lfiles: abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): if (os.path.exists(absstandin + '.orig') and os.path.exists(abslfile)): shutil.copyfile(abslfile, abslfile + '.orig') expecthash = lfutil.readstandin(repo, lfile) if (expecthash != '' and (not os.path.exists(abslfile) or expecthash != lfutil.hashfile(abslfile))): if lfile not in repo[None]: # not switched to normal file util.unlinkpath(abslfile, ignoremissing=True) # use normallookup() to allocate entry in largefiles # dirstate, because lack of it misleads # lfilesrepo.status() into recognition that such cache # missing files are REMOVED. lfdirstate.normallookup(lfile) update[lfile] = expecthash else: # Remove lfiles for which the standin is deleted, unless the # lfile is added to the repository again. This happens when a # largefile is converted back to a normal file: the standin # disappears, but a new (normal) file appears as the lfile. if (os.path.exists(abslfile) and repo.dirstate.normalize(lfile) not in repo[None]): util.unlinkpath(abslfile) removed += 1 # largefile processing might be slow and be interrupted - be prepared lfdirstate.write() if lfiles: if printmessage: ui.status(_('getting changed largefiles\n')) cachelfiles(ui, repo, None, lfiles) for lfile in lfiles: update1 = 0 expecthash = update.get(lfile) if expecthash: if not lfutil.copyfromcache(repo, expecthash, lfile): # failed ... but already removed and set to normallookup continue # Synchronize largefile dirstate to the last modified # time of the file lfdirstate.normal(lfile) update1 = 1 # copy the state of largefile standin from the repository's # dirstate to its state in the lfdirstate. abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): mode = os.stat(absstandin).st_mode if mode != os.stat(abslfile).st_mode: os.chmod(abslfile, mode) update1 = 1 updated += update1 state = repo.dirstate[lfutil.standin(lfile)] if state == 'n': # When rebasing, we need to synchronize the standin and the # largefile, because otherwise the largefile will get reverted. # But for commit's sake, we have to mark the file as unclean. if getattr(repo, "_isrebasing", False): lfdirstate.normallookup(lfile) else: lfdirstate.normal(lfile) elif state == 'r': lfdirstate.remove(lfile) elif state == 'a': lfdirstate.add(lfile) elif state == '?': lfdirstate.drop(lfile) lfdirstate.write() if printmessage and lfiles: ui.status(_('%d largefiles updated, %d removed\n') % (updated, removed)) finally: wlock.release()
def updatelfiles(ui, repo, filelist=None, printmessage=True, normallookup=False): wlock = repo.wlock() try: lfdirstate = lfutil.openlfdirstate(ui, repo) lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate) if filelist is not None: filelist = set(filelist) lfiles = [f for f in lfiles if f in filelist] update = {} updated, removed = 0, 0 for lfile in lfiles: abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): if (os.path.exists(absstandin + '.orig') and os.path.exists(abslfile)): shutil.copyfile(abslfile, abslfile + '.orig') util.unlinkpath(absstandin + '.orig') expecthash = lfutil.readstandin(repo, lfile) if (expecthash != '' and (not os.path.exists(abslfile) or expecthash != lfutil.hashfile(abslfile))): if lfile not in repo[None]: # not switched to normal file util.unlinkpath(abslfile, ignoremissing=True) # use normallookup() to allocate entry in largefiles # dirstate, because lack of it misleads # lfilesrepo.status() into recognition that such cache # missing files are REMOVED. lfdirstate.normallookup(lfile) update[lfile] = expecthash else: # Remove lfiles for which the standin is deleted, unless the # lfile is added to the repository again. This happens when a # largefile is converted back to a normal file: the standin # disappears, but a new (normal) file appears as the lfile. if (os.path.exists(abslfile) and repo.dirstate.normalize(lfile) not in repo[None]): util.unlinkpath(abslfile) removed += 1 # largefile processing might be slow and be interrupted - be prepared lfdirstate.write() if lfiles: if printmessage: ui.status(_('getting changed largefiles\n')) cachelfiles(ui, repo, None, lfiles) for lfile in lfiles: update1 = 0 expecthash = update.get(lfile) if expecthash: if not lfutil.copyfromcache(repo, expecthash, lfile): # failed ... but already removed and set to normallookup continue # Synchronize largefile dirstate to the last modified # time of the file lfdirstate.normal(lfile) update1 = 1 # copy the state of largefile standin from the repository's # dirstate to its state in the lfdirstate. abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): mode = os.stat(absstandin).st_mode if mode != os.stat(abslfile).st_mode: os.chmod(abslfile, mode) update1 = 1 updated += update1 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup) if filelist is not None: # If "local largefile" is chosen at file merging, it is # not listed in "filelist" (= dirstate syncing is # omitted), because the standin file is not changed before and # after merging. # But the status of such files may have to be changed by # merging. For example, locally modified ("M") largefile # has to become re-added("A"), if it is "normal" file in # the target revision of linear-merging. for lfile in lfdirstate: if lfile not in filelist: lfutil.synclfdirstate(repo, lfdirstate, lfile, True) lfdirstate.write() if printmessage and lfiles: ui.status( _('%d largefiles updated, %d removed\n') % (updated, removed)) finally: wlock.release()
def updatelfiles(ui, repo, filelist=None, printmessage=None, normallookup=False): '''Update largefiles according to standins in the working directory If ``printmessage`` is other than ``None``, it means "print (or ignore, for false) message forcibly". ''' statuswriter = lfutil.getstatuswriter(ui, repo, printmessage) with repo.wlock(): lfdirstate = lfutil.openlfdirstate(ui, repo) lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate) if filelist is not None: filelist = set(filelist) lfiles = [f for f in lfiles if f in filelist] update = {} updated, removed = 0, 0 wvfs = repo.wvfs for lfile in lfiles: rellfile = lfile rellfileorig = os.path.relpath(scmutil.origpath( ui, repo, wvfs.join(rellfile)), start=repo.root) relstandin = lfutil.standin(lfile) relstandinorig = os.path.relpath(scmutil.origpath( ui, repo, wvfs.join(relstandin)), start=repo.root) if wvfs.exists(relstandin): if (wvfs.exists(relstandinorig) and wvfs.exists(rellfile)): shutil.copyfile(wvfs.join(rellfile), wvfs.join(rellfileorig)) wvfs.unlinkpath(relstandinorig) expecthash = lfutil.readstandin(repo, lfile) if expecthash != '': if lfile not in repo[None]: # not switched to normal file wvfs.unlinkpath(rellfile, ignoremissing=True) # use normallookup() to allocate an entry in largefiles # dirstate to prevent lfilesrepo.status() from reporting # missing files as removed. lfdirstate.normallookup(lfile) update[lfile] = expecthash else: # Remove lfiles for which the standin is deleted, unless the # lfile is added to the repository again. This happens when a # largefile is converted back to a normal file: the standin # disappears, but a new (normal) file appears as the lfile. if (wvfs.exists(rellfile) and repo.dirstate.normalize(lfile) not in repo[None]): wvfs.unlinkpath(rellfile) removed += 1 # largefile processing might be slow and be interrupted - be prepared lfdirstate.write() if lfiles: statuswriter(_('getting changed largefiles\n')) cachelfiles(ui, repo, None, lfiles) for lfile in lfiles: update1 = 0 expecthash = update.get(lfile) if expecthash: if not lfutil.copyfromcache(repo, expecthash, lfile): # failed ... but already removed and set to normallookup continue # Synchronize largefile dirstate to the last modified # time of the file lfdirstate.normal(lfile) update1 = 1 # copy the state of largefile standin from the repository's # dirstate to its state in the lfdirstate. rellfile = lfile relstandin = lfutil.standin(lfile) if wvfs.exists(relstandin): mode = wvfs.stat(relstandin).st_mode if mode != wvfs.stat(rellfile).st_mode: wvfs.chmod(rellfile, mode) update1 = 1 updated += update1 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup) lfdirstate.write() if lfiles: statuswriter( _('%d largefiles updated, %d removed\n') % (updated, removed))