def upload_bfiles(ui, rsrc, rdst, files): '''upload big files to the central store''' if not files: return # Don't upload locally. All bfiles are in the system wide cache # so the other repo can just get them from there. if not rdst.path.startswith('http'): return store = basestore._open_store(rsrc, rdst.path, put=True) at = 0 for hash in files: ui.progress(_('Uploading bfiles'), at, unit='bfile', total=len(files)) if store.exists(hash): at += 1 continue source = bfutil.find_file(rsrc, hash) if not source: raise util.Abort(_('Missing bfile %s needs to be uploaded') % hash) # XXX check for errors here store.put(source, hash) at += 1 ui.progress('Uploading bfiles', None)
def getfilectx(repo, memctx, f): if bfutil.standin(f) in files: # if the file isn't in the manifest then it was removed # or renamed, raise IOError to indicate this try: fctx = ctx.filectx(bfutil.standin(f)) except error.LookupError: raise IOError() renamed = fctx.renamed() if renamed: renamed = bfutil.split_standin(renamed[0]) hash = fctx.data().strip() path = bfutil.find_file(rsrc, hash) ### TODO: What if the file is not cached? data = '' with open(path, 'rb') as fd: data = fd.read() return context.memfilectx(f, data, 'l' in fctx.flags(), 'x' in fctx.flags(), renamed) else: try: fctx = ctx.filectx(f) except error.LookupError: raise IOError() renamed = fctx.renamed() if renamed: renamed = renamed[0] data = fctx.data() if f == '.hgtags': newdata = [] for line in data.splitlines(): id, name = line.split(' ', 1) newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]), name)) data = ''.join(newdata) return context.memfilectx(f, data, 'l' in fctx.flags(), 'x' in fctx.flags(), renamed)
def update_bfiles(ui, repo): wlock = repo.wlock() try: bfdirstate = bfutil.open_bfdirstate(ui, repo) s = bfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False, False, False) (unsure, modified, added, removed, missing, unknown, ignored, clean) = s bfiles = bfutil.list_bfiles(repo) toget = [] at = 0 updated = 0 removed = 0 printed = False if bfiles: ui.status(_('Getting changed bfiles\n')) printed = True for bfile in bfiles: at += 1 if os.path.exists(repo.wjoin(bfile)) and not os.path.exists( repo.wjoin(bfutil.standin(bfile))): os.unlink(repo.wjoin(bfile)) removed += 1 bfdirstate.forget(bfutil.unixpath(bfile)) continue expectedhash = repo[None][bfutil.standin(bfile)].data().strip() mode = os.stat(repo.wjoin(bfutil.standin(bfile))).st_mode if not os.path.exists( repo.wjoin(bfile)) or expectedhash != bfutil.hashfile( repo.wjoin(bfile)): path = bfutil.find_file(repo, expectedhash) if not path: toget.append((bfile, expectedhash)) else: util.makedirs(os.path.dirname(repo.wjoin(bfile))) shutil.copy(path, repo.wjoin(bfile)) os.chmod(repo.wjoin(bfile), mode) updated += 1 bfdirstate.normal(bfutil.unixpath(bfile)) elif os.path.exists(repo.wjoin(bfile)) and mode != os.stat( repo.wjoin(bfile)).st_mode: os.chmod(repo.wjoin(bfile), mode) updated += 1 bfdirstate.normal(bfutil.unixpath(bfile)) if toget: store = basestore._open_store(repo) (success, missing) = store.get(toget) else: success, missing = [], [] for (filename, hash) in success: mode = os.stat(repo.wjoin(bfutil.standin(filename))).st_mode os.chmod(repo.wjoin(filename), mode) updated += 1 bfdirstate.normal(bfutil.unixpath(filename)) for bfile in bfdirstate: if bfile not in bfiles: if os.path.exists(repo.wjoin(bfile)): if not printed: ui.status(_('Getting changed bfiles\n')) printed = True os.unlink(repo.wjoin(bfile)) removed += 1 bfdirstate.forget(bfutil.unixpath(bfile)) bfdirstate.write() if printed: ui.status( _('%d big files updated, %d removed\n') % (updated, removed)) finally: wlock.release()
def revert_bfiles(ui, repo): wlock = repo.wlock() try: bfdirstate = bfutil.open_bfdirstate(ui, repo) s = bfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False, False, False) (unsure, modified, added, removed, missing, unknown, ignored, clean) = s bfiles = bfutil.list_bfiles(repo) toget = [] at = 0 updated = 0 for bfile in bfiles: if not os.path.exists(repo.wjoin(bfutil.standin(bfile))): bfdirstate.remove(bfile) continue if os.path.exists( repo.wjoin(bfutil.standin(os.path.join(bfile + '.orig')))): shutil.copyfile(repo.wjoin(bfile), repo.wjoin(bfile + '.orig')) at += 1 expectedhash = repo[None][bfutil.standin(bfile)].data().strip() mode = os.stat(repo.wjoin(bfutil.standin(bfile))).st_mode if not os.path.exists( repo.wjoin(bfile)) or expectedhash != bfutil.hashfile( repo.wjoin(bfile)): path = bfutil.find_file(repo, expectedhash) if path is None: toget.append((bfile, expectedhash)) else: util.makedirs(os.path.dirname(repo.wjoin(bfile))) shutil.copy(path, repo.wjoin(bfile)) os.chmod(repo.wjoin(bfile), mode) updated += 1 if bfutil.standin(bfile) not in repo['.']: bfdirstate.add(bfutil.unixpath(bfile)) elif expectedhash == repo['.'][bfutil.standin( bfile)].data().strip(): bfdirstate.normal(bfutil.unixpath(bfile)) else: bfutil.dirstate_normaldirty(bfdirstate, bfutil.unixpath(bfile)) elif os.path.exists(repo.wjoin(bfile)) and mode != os.stat( repo.wjoin(bfile)).st_mode: os.chmod(repo.wjoin(bfile), mode) updated += 1 if bfutil.standin(bfile) not in repo['.']: bfdirstate.add(bfutil.unixpath(bfile)) elif expectedhash == repo['.'][bfutil.standin( bfile)].data().strip(): bfdirstate.normal(bfutil.unixpath(bfile)) else: bfutil.dirstate_normaldirty(bfdirstate, bfutil.unixpath(bfile)) if toget: store = basestore._open_store(repo) (success, missing) = store.get(toget) else: success, missing = [], [] for (filename, hash) in success: mode = os.stat(repo.wjoin(bfutil.standin(filename))).st_mode os.chmod(repo.wjoin(filename), mode) updated += 1 if bfutil.standin(filename) not in repo['.']: bfdirstate.add(bfutil.unixpath(filename)) elif hash == repo['.'][bfutil.standin(filename)].data().strip(): bfdirstate.normal(bfutil.unixpath(filename)) else: bfutil.dirstate_normaldirty(bfdirstate, bfutil.unixpath(filename)) removed = 0 for bfile in bfdirstate: if not os.path.exists(repo.wjoin(bfutil.standin(bfile))): if os.path.exists(repo.wjoin(bfile)): os.unlink(repo.wjoin(bfile)) removed += 1 if bfutil.standin(bfile) in repo['.']: bfdirstate.remove(bfutil.unixpath(bfile)) else: bfdirstate.forget(bfutil.unixpath(bfile)) else: state = repo.dirstate[bfutil.standin(bfile)] if state == 'n': bfdirstate.normal(bfile) elif state == 'r': bfdirstate.remove(bfile) elif state == 'a': bfdirstate.add(bfile) elif state == '?': bfdirstate.forget(bfile) bfdirstate.write() finally: wlock.release()
def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None, prefix=None, mtime=None, subrepos=None): # No need to lock because we are only reading history and bfile caches # neither of which are modified if kind not in archival.archivers: raise util.Abort(_("unknown archive type '%s'") % kind) ctx = repo[node] # In Mercurial <= 1.5 the prefix is passed to the archiver so try that # if that doesn't work we are probably in Mercurial >= 1.6 where the # prefix is not handled by the archiver try: archiver = archival.archivers[kind](dest, prefix, mtime or ctx.date()[0]) def write(name, mode, islink, getdata): if matchfn and not matchfn(name): return data = getdata() if decode: data = repo.wwritedata(name, data) archiver.addfile(name, mode, islink, data) except TypeError: if kind == 'files': if prefix: raise util.Abort(_('cannot give prefix when archiving to files')) else: prefix = archival.tidyprefix(dest, kind, prefix) def write(name, mode, islink, getdata): if matchfn and not matchfn(name): return data = getdata() if decode: data = repo.wwritedata(name, data) archiver.addfile(prefix + name, mode, islink, data) archiver = archival.archivers[kind](dest, mtime or ctx.date()[0]) if repo.ui.configbool("ui", "archivemeta", True): def metadata(): base = 'repo: %s\nnode: %s\nbranch: %s\n' % ( hex(repo.changelog.node(0)), hex(node), ctx.branch()) tags = ''.join('tag: %s\n' % t for t in ctx.tags() if repo.tagtype(t) == 'global') if not tags: repo.ui.pushbuffer() opts = {'template': '{latesttag}\n{latesttagdistance}', 'style': '', 'patch': None, 'git': None} cmdutil.show_changeset(repo.ui, repo, opts).show(ctx) ltags, dist = repo.ui.popbuffer().split('\n') tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':')) tags += 'latesttagdistance: %s\n' % dist return base + tags write('.hg_archival.txt', 0644, False, metadata) for f in ctx: ff = ctx.flags(f) getdata = ctx[f].data if bfutil.is_standin(f): path = bfutil.find_file(repo, getdata().strip()) ### TODO: What if the file is not cached? f = bfutil.split_standin(f) def getdatafn(): with open(path, 'rb') as fd: return fd.read() getdata = getdatafn write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata) if subrepos: for subpath in ctx.substate: sub = ctx.sub(subpath) try: sub.archive(repo.ui, archiver, prefix) except TypeError: sub.archive(archiver, prefix) archiver.done()
def update_bfiles(ui, repo): wlock = repo.wlock() try: bfdirstate = bfutil.open_bfdirstate(ui, repo) s = bfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False, False, False) (unsure, modified, added, removed, missing, unknown, ignored, clean) = s bfiles = bfutil.list_bfiles(repo) toget = [] at = 0 updated = 0 removed = 0 printed = False if bfiles: ui.status(_('Getting changed bfiles\n')) printed = True for bfile in bfiles: at += 1 if os.path.exists(repo.wjoin(bfile)) and not os.path.exists(repo.wjoin(bfutil.standin(bfile))): os.unlink(repo.wjoin(bfile)) removed += 1 bfdirstate.forget(bfutil.unixpath(bfile)) continue expectedhash = repo[None][bfutil.standin(bfile)].data().strip() mode = os.stat(repo.wjoin(bfutil.standin(bfile))).st_mode if not os.path.exists(repo.wjoin(bfile)) or expectedhash != bfutil.hashfile(repo.wjoin(bfile)): path = bfutil.find_file(repo, expectedhash) if not path: toget.append((bfile, expectedhash)) else: util.makedirs(os.path.dirname(repo.wjoin(bfile))) shutil.copy(path, repo.wjoin(bfile)) os.chmod(repo.wjoin(bfile), mode) updated += 1 bfdirstate.normal(bfutil.unixpath(bfile)) elif os.path.exists(repo.wjoin(bfile)) and mode != os.stat(repo.wjoin(bfile)).st_mode: os.chmod(repo.wjoin(bfile), mode) updated += 1 bfdirstate.normal(bfutil.unixpath(bfile)) if toget: store = basestore._open_store(repo) (success, missing) = store.get(toget) else: success, missing = [],[] for (filename, hash) in success: mode = os.stat(repo.wjoin(bfutil.standin(filename))).st_mode os.chmod(repo.wjoin(filename), mode) updated += 1 bfdirstate.normal(bfutil.unixpath(filename)) for bfile in bfdirstate: if bfile not in bfiles: if os.path.exists(repo.wjoin(bfile)): if not printed: ui.status(_('Getting changed bfiles\n')) printed = True os.unlink(repo.wjoin(bfile)) removed += 1 bfdirstate.forget(bfutil.unixpath(bfile)) bfdirstate.write() if printed: ui.status(_('%d big files updated, %d removed\n') % (updated, removed)) finally: wlock.release()
def revert_bfiles(ui, repo): wlock = repo.wlock() try: bfdirstate = bfutil.open_bfdirstate(ui, repo) s = bfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False, False, False) (unsure, modified, added, removed, missing, unknown, ignored, clean) = s bfiles = bfutil.list_bfiles(repo) toget = [] at = 0 updated = 0 for bfile in bfiles: if not os.path.exists(repo.wjoin(bfutil.standin(bfile))): bfdirstate.remove(bfile) continue if os.path.exists(repo.wjoin(bfutil.standin(os.path.join(bfile + '.orig')))): shutil.copyfile(repo.wjoin(bfile), repo.wjoin(bfile + '.orig')) at += 1 expectedhash = repo[None][bfutil.standin(bfile)].data().strip() mode = os.stat(repo.wjoin(bfutil.standin(bfile))).st_mode if not os.path.exists(repo.wjoin(bfile)) or expectedhash != bfutil.hashfile(repo.wjoin(bfile)): path = bfutil.find_file(repo, expectedhash) if path is None: toget.append((bfile, expectedhash)) else: util.makedirs(os.path.dirname(repo.wjoin(bfile))) shutil.copy(path, repo.wjoin(bfile)) os.chmod(repo.wjoin(bfile), mode) updated += 1 if bfutil.standin(bfile) not in repo['.']: bfdirstate.add(bfutil.unixpath(bfile)) elif expectedhash == repo['.'][bfutil.standin(bfile)].data().strip(): bfdirstate.normal(bfutil.unixpath(bfile)) else: bfutil.dirstate_normaldirty(bfdirstate, bfutil.unixpath(bfile)) elif os.path.exists(repo.wjoin(bfile)) and mode != os.stat(repo.wjoin(bfile)).st_mode: os.chmod(repo.wjoin(bfile), mode) updated += 1 if bfutil.standin(bfile) not in repo['.']: bfdirstate.add(bfutil.unixpath(bfile)) elif expectedhash == repo['.'][bfutil.standin(bfile)].data().strip(): bfdirstate.normal(bfutil.unixpath(bfile)) else: bfutil.dirstate_normaldirty(bfdirstate, bfutil.unixpath(bfile)) if toget: store = basestore._open_store(repo) (success, missing) = store.get(toget) else: success, missing = [], [] for (filename, hash) in success: mode = os.stat(repo.wjoin(bfutil.standin(filename))).st_mode os.chmod(repo.wjoin(filename), mode) updated += 1 if bfutil.standin(filename) not in repo['.']: bfdirstate.add(bfutil.unixpath(filename)) elif hash == repo['.'][bfutil.standin(filename)].data().strip(): bfdirstate.normal(bfutil.unixpath(filename)) else: bfutil.dirstate_normaldirty(bfdirstate, bfutil.unixpath(filename)) removed = 0 for bfile in bfdirstate: if not os.path.exists(repo.wjoin(bfutil.standin(bfile))): if os.path.exists(repo.wjoin(bfile)): os.unlink(repo.wjoin(bfile)) removed += 1 if bfutil.standin(bfile) in repo['.']: bfdirstate.remove(bfutil.unixpath(bfile)) else: bfdirstate.forget(bfutil.unixpath(bfile)) else: state = repo.dirstate[bfutil.standin(bfile)] if state == 'n': bfdirstate.normal(bfile) elif state == 'r': bfdirstate.remove(bfile) elif state == 'a': bfdirstate.add(bfile) elif state == '?': bfdirstate.forget(bfile) bfdirstate.write() finally: wlock.release()