def getfilectx(repo, memctx, f):
        if lfutil.standin(f) in files:
            # if the file isn't in the manifest then it was removed
            # or renamed, raise IOError to indicate this
            try:
                fctx = ctx.filectx(lfutil.standin(f))
            except error.LookupError:
                raise IOError()
            renamed = fctx.renamed()
            if renamed:
                renamed = lfutil.splitstandin(renamed[0])

            hash = fctx.data().strip()
            path = lfutil.findfile(rsrc, hash)
            ### TODO: What if the file is not cached?
            data = ''
            fd = None
            try:
                fd = open(path, 'rb')
                data = fd.read()
            finally:
                if fd:
                    fd.close()
            return context.memfilectx(f, data, 'l' in fctx.flags(),
                                      'x' in fctx.flags(), renamed)
        else:
            return _getnormalcontext(repo.ui, ctx, f, revmap)
Exemplo n.º 2
0
    def _verifyfile(self, cctx, cset, contents, standin, verified):
        filename = lfutil.splitstandin(standin)
        if not filename:
            return False
        fctx = cctx[standin]
        key = (filename, fctx.filenode())
        if key in verified:
            return False

        expecthash = fctx.data()[0:40]
        verified.add(key)
        if not lfutil.instore(self.remote, expecthash):
            self.ui.warn(
                _('changeset %s: %s missing\n'
                  '  (looked for hash %s)\n')
                % (cset, filename, expecthash))
            return True                 # failed

        if contents:
            storepath = lfutil.storepath(self.remote, expecthash)
            actualhash = lfutil.hashfile(storepath)
            if actualhash != expecthash:
                self.ui.warn(
                    _('changeset %s: %s: contents differ\n'
                      '  (%s:\n'
                      '  expected hash %s,\n'
                      '  but got %s)\n')
                    % (cset, filename, storepath, expecthash, actualhash))
                return True             # failed
        return False
Exemplo n.º 3
0
    def _verifyfile(self, cctx, cset, contents, standin, verified):
        filename = lfutil.splitstandin(standin)
        if not filename:
            return False
        fctx = cctx[standin]
        key = (filename, fctx.filenode())
        if key in verified:
            return False

        expecthash = fctx.data()[0:40]
        storepath, exists = lfutil.findstorepath(self.remote, expecthash)
        verified.add(key)
        if not exists:
            self.ui.warn(
                _('changeset %s: %s references missing %s\n')
                % (cset, filename, storepath))
            return True                 # failed

        if contents:
            actualhash = lfutil.hashfile(storepath)
            if actualhash != expecthash:
                self.ui.warn(
                    _('changeset %s: %s references corrupted %s\n')
                    % (cset, filename, storepath))
                return True             # failed
        return False
Exemplo n.º 4
0
    def _verifyfile(self, cctx, cset, contents, standin, verified):
        filename = lfutil.splitstandin(standin)
        if not filename:
            return False
        fctx = cctx[standin]
        key = (filename, fctx.filenode())
        if key in verified:
            return False

        verified.add(key)

        expecthash = fctx.data()[0:40]
        stat = self._stat([expecthash])[expecthash]
        if not stat:
            return False
        elif stat == 1:
            self.ui.warn(
                _('changeset %s: %s: contents differ\n')
                % (cset, filename))
            return True # failed
        elif stat == 2:
            self.ui.warn(
                _('changeset %s: %s missing\n')
                % (cset, filename))
            return True # failed
        else:
            raise RuntimeError('verify failed: unexpected response from '
                               'statlfile (%r)' % stat)
Exemplo n.º 5
0
 def matchfn(f):
     if lfutil.isstandin(f):
         # We need to keep track of what largefiles are being
         # matched so we know which ones to update later --
         # otherwise we accidentally revert changes to other
         # largefiles. This is repo-specific, so duckpunch the
         # repo object to keep the list of largefiles for us
         # later.
         if orig_matchfn(lfutil.splitstandin(f)) and (f in repo[None] or f in ctx):
             lfileslist = getattr(repo, "_lfilestoupdate", [])
             lfileslist.append(lfutil.splitstandin(f))
             repo._lfilestoupdate = lfileslist
             return True
         else:
             return False
     return orig_matchfn(f)
Exemplo n.º 6
0
    def _verifyfile(self, cctx, cset, contents, standin, verified):
        filename = lfutil.splitstandin(standin)
        if not filename:
            return False
        fctx = cctx[standin]
        key = (filename, fctx.filenode())
        if key in verified:
            return False

        verified.add(key)

        expecthash = fctx.data()[0:40]
        stat = self._stat([expecthash])[expecthash]
        if not stat:
            return False
        elif stat == 1:
            self.ui.warn(
                _('changeset %s: %s: contents differ\n') % (cset, filename))
            return True  # failed
        elif stat == 2:
            self.ui.warn(_('changeset %s: %s missing\n') % (cset, filename))
            return True  # failed
        else:
            raise RuntimeError('verify failed: unexpected response from '
                               'statlfile (%r)' % stat)
Exemplo n.º 7
0
def outgoinghook(ui, repo, other, opts, missing):
    if opts.pop('large', None):
        lfhashes = set()
        if ui.debugflag:
            toupload = {}
            def addfunc(fn, lfhash):
                if fn not in toupload:
                    toupload[fn] = []
                toupload[fn].append(lfhash)
                lfhashes.add(lfhash)
            def showhashes(fn):
                for lfhash in sorted(toupload[fn]):
                    ui.debug('    %s\n' % (lfhash))
        else:
            toupload = set()
            def addfunc(fn, lfhash):
                toupload.add(fn)
                lfhashes.add(lfhash)
            def showhashes(fn):
                pass
        _getoutgoings(repo, other, missing, addfunc)

        if not toupload:
            ui.status(_('largefiles: no files to upload\n'))
        else:
            ui.status(_('largefiles to upload (%d entities):\n')
                      % (len(lfhashes)))
            for file in sorted(toupload):
                ui.status(lfutil.splitstandin(file) + '\n')
                showhashes(file)
            ui.status('\n')
Exemplo n.º 8
0
    def getfilectx(repo, memctx, f):
        if lfutil.standin(f) in files:
            # if the file isn't in the manifest then it was removed
            # or renamed, raise IOError to indicate this
            try:
                fctx = ctx.filectx(lfutil.standin(f))
            except error.LookupError:
                return None
            renamed = fctx.renamed()
            if renamed:
                renamed = lfutil.splitstandin(renamed[0])

            hash = fctx.data().strip()
            path = lfutil.findfile(rsrc, hash)

            # If one file is missing, likely all files from this rev are
            if path is None:
                cachelfiles(ui, rsrc, ctx.node())
                path = lfutil.findfile(rsrc, hash)

                if path is None:
                    raise util.Abort(_("missing largefile '%s' from revision %s") % (f, node.hex(ctx.node())))

            data = ""
            fd = None
            try:
                fd = open(path, "rb")
                data = fd.read()
            finally:
                if fd:
                    fd.close()
            return context.memfilectx(repo, f, data, "l" in fctx.flags(), "x" in fctx.flags(), renamed)
        else:
            return _getnormalcontext(repo, ctx, f, revmap)
Exemplo n.º 9
0
    def getfilectx(repo, memctx, f):
        if lfutil.standin(f) in files:
            # if the file isn't in the manifest then it was removed
            # or renamed, raise IOError to indicate this
            try:
                fctx = ctx.filectx(lfutil.standin(f))
            except error.LookupError:
                raise IOError()
            renamed = fctx.renamed()
            if renamed:
                renamed = lfutil.splitstandin(renamed[0])

            hash = fctx.data().strip()
            path = lfutil.findfile(rsrc, hash)
            ### TODO: What if the file is not cached?
            data = ""
            fd = None
            try:
                fd = open(path, "rb")
                data = fd.read()
            finally:
                if fd:
                    fd.close()
            return context.memfilectx(f, data, "l" in fctx.flags(), "x" in fctx.flags(), renamed)
        else:
            return _getnormalcontext(repo.ui, ctx, f, revmap)
Exemplo n.º 10
0
    def _verifyfile(self, cctx, cset, contents, standin, verified):
        filename = lfutil.splitstandin(standin)
        if not filename:
            return False
        fctx = cctx[standin]
        key = (filename, fctx.filenode())
        if key in verified:
            return False

        expecthash = fctx.data()[0:40]
        verified.add(key)
        if not lfutil.instore(self.remote, expecthash):
            self.ui.warn(
                _('changeset %s: %s missing\n'
                  '  (looked for hash %s)\n') % (cset, filename, expecthash))
            return True  # failed

        if contents:
            storepath = lfutil.storepath(self.remote, expecthash)
            actualhash = lfutil.hashfile(storepath)
            if actualhash != expecthash:
                self.ui.warn(
                    _('changeset %s: %s: contents differ\n'
                      '  (%s:\n'
                      '  expected hash %s,\n'
                      '  but got %s)\n') %
                    (cset, filename, storepath, expecthash, actualhash))
                return True  # failed
        return False
Exemplo n.º 11
0
 def lfmatchfn(f):
     if origmatchfn(f):
         return True
     lf = lfutil.splitstandin(f)
     if lf is None:
         return False
     notbad.add(lf)
     return origmatchfn(lf)
Exemplo n.º 12
0
 def lfmatchfn(f):
     if origmatchfn(f):
         return True
     lf = lfutil.splitstandin(f)
     if lf is None:
         return False
     notbad.add(lf)
     return origmatchfn(lf)
Exemplo n.º 13
0
 def matchfn(f):
     if lfutil.isstandin(f):
         # We need to keep track of what largefiles are being
         # matched so we know which ones to update later --
         # otherwise we accidentally revert changes to other
         # largefiles. This is repo-specific, so duckpunch the
         # repo object to keep the list of largefiles for us
         # later.
         if origmatchfn(lfutil.splitstandin(f)) and \
                 (f in repo[None] or f in ctx):
             lfileslist = getattr(repo, '_lfilestoupdate', [])
             lfileslist.append(lfutil.splitstandin(f))
             repo._lfilestoupdate = lfileslist
             return True
         else:
             return False
     return origmatchfn(f)
Exemplo n.º 14
0
def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
    repo._get(repo._state + ('hg', ))
    rev = repo._state[1]
    ctx = repo._repo[rev]

    lfcommands.cachelfiles(ui, repo._repo, ctx.node())

    def write(name, mode, islink, getdata):
        # At this point, the standin has been replaced with the largefile name,
        # so the normal matcher works here without the lfutil variants.
        if match and not match(f):
            return
        data = getdata()

        archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)

    for f in ctx:
        ff = ctx.flags(f)
        getdata = ctx[f].data
        if lfutil.isstandin(f):
            path = lfutil.findfile(repo._repo, getdata().strip())
            if path is None:
                raise util.Abort(
                    _('largefile %s not found in repo store or system cache') %
                    lfutil.splitstandin(f))
            f = lfutil.splitstandin(f)

            def getdatafn():
                fd = None
                try:
                    fd = open(os.path.join(prefix, path), 'rb')
                    return fd.read()
                finally:
                    if fd:
                        fd.close()

            getdata = getdatafn

        write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)

    for subpath in sorted(ctx.substate):
        sub = ctx.sub(subpath)
        submatch = match_.narrowmatcher(subpath, match)
        sub.archive(ui, archiver,
                    os.path.join(prefix, repo._path) + '/', submatch)
Exemplo n.º 15
0
def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
    repo._get(repo._state + ('hg',))
    rev = repo._state[1]
    ctx = repo._repo[rev]

    lfcommands.cachelfiles(ui, repo._repo, ctx.node())

    def write(name, mode, islink, getdata):
        # At this point, the standin has been replaced with the largefile name,
        # so the normal matcher works here without the lfutil variants.
        if match and not match(f):
            return
        data = getdata()

        archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)

    for f in ctx:
        ff = ctx.flags(f)
        getdata = ctx[f].data
        if lfutil.isstandin(f):
            path = lfutil.findfile(repo._repo, getdata().strip())
            if path is None:
                raise util.Abort(
                    _('largefile %s not found in repo store or system cache')
                    % lfutil.splitstandin(f))
            f = lfutil.splitstandin(f)

            def getdatafn():
                fd = None
                try:
                    fd = open(os.path.join(prefix, path), 'rb')
                    return fd.read()
                finally:
                    if fd:
                        fd.close()

            getdata = getdatafn

        write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)

    for subpath in sorted(ctx.substate):
        sub = ctx.sub(subpath)
        submatch = match_.narrowmatcher(subpath, match)
        sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
                    submatch)
Exemplo n.º 16
0
        def commitctx(self, *args, **kwargs):
            node = super(lfiles_repo, self).commitctx(*args, **kwargs)
            ctx = self[node]
            for filename in ctx.files():
                if lfutil.isstandin(filename) and filename in ctx.manifest():
                    realfile = lfutil.splitstandin(filename)
                    lfutil.copytostore(self, ctx.node(), realfile)

            return node
Exemplo n.º 17
0
        def commitctx(self, *args, **kwargs):
            node = super(lfiles_repo, self).commitctx(*args, **kwargs)
            ctx = self[node]
            for filename in ctx.files():
                if lfutil.isstandin(filename) and filename in ctx.manifest():
                    realfile = lfutil.splitstandin(filename)
                    lfutil.copytostore(self, ctx.node(), realfile)

            return node
Exemplo n.º 18
0
def add_largefiles(ui, repo, *pats, **opts):
    large = opts.pop("large", None)
    lfsize = lfutil.getminsize(ui, lfutil.islfilesrepo(repo), opts.pop("lfsize", None))

    lfmatcher = None
    if lfutil.islfilesrepo(repo):
        lfpats = ui.configlist(lfutil.longname, "patterns", default=[])
        if lfpats:
            lfmatcher = match_.match(repo.root, "", list(lfpats))

    lfnames = []
    m = scmutil.match(repo[None], pats, opts)
    m.bad = lambda x, y: None
    wctx = repo[None]
    for f in repo.walk(m):
        exact = m.exact(f)
        lfile = lfutil.standin(f) in wctx
        nfile = f in wctx
        exists = lfile or nfile

        # Don't warn the user when they attempt to add a normal tracked file.
        # The normal add code will do that for us.
        if exact and exists:
            if lfile:
                ui.warn(_("%s already a largefile\n") % f)
            continue

        if exact or not exists:
            abovemin = lfsize and os.lstat(repo.wjoin(f)).st_size >= lfsize * 1024 * 1024
            if large or abovemin or (lfmatcher and lfmatcher(f)):
                lfnames.append(f)
                if ui.verbose or not exact:
                    ui.status(_("adding %s as a largefile\n") % m.rel(f))

    bad = []
    standins = []

    # Need to lock, otherwise there could be a race condition between
    # when standins are created and added to the repo.
    wlock = repo.wlock()
    try:
        if not opts.get("dry_run"):
            lfdirstate = lfutil.openlfdirstate(ui, repo)
            for f in lfnames:
                standinname = lfutil.standin(f)
                lfutil.writestandin(repo, standinname, hash="", executable=lfutil.getexecutable(repo.wjoin(f)))
                standins.append(standinname)
                if lfdirstate[f] == "r":
                    lfdirstate.normallookup(f)
                else:
                    lfdirstate.add(f)
            lfdirstate.write()
            bad += [lfutil.splitstandin(f) for f in lfutil.repo_add(repo, standins) if f in m.files()]
    finally:
        wlock.release()
    return bad
Exemplo n.º 19
0
 def override_match(ctx, pats=[], opts={}, globbed=False,
         default='relpath'):
     newpats = []
     # The patterns were previously mangled to add the standin
     # directory; we need to remove that now
     for pat in pats:
         if match_.patkind(pat) is None and lfutil.shortname in pat:
             newpats.append(pat.replace(lfutil.shortname, ''))
         else:
             newpats.append(pat)
     match = oldmatch(ctx, newpats, opts, globbed, default)
     m = copy.copy(match)
     lfile = lambda f: lfutil.standin(f) in manifest
     m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
     m._fmap = set(m._files)
     orig_matchfn = m.matchfn
     m.matchfn = lambda f: (lfutil.isstandin(f) and
                         lfile(lfutil.splitstandin(f)) and
                         orig_matchfn(lfutil.splitstandin(f)) or
                         None)
     return m
Exemplo n.º 20
0
def _addchangeset(ui, rsrc, rdst, ctx, revmap):
    # Convert src parents to dst parents
    parents = _convertparents(ctx, revmap)

    # Generate list of changed files
    files = _getchangedfiles(ctx, parents)

    def getfilectx(repo, memctx, f):
        if lfutil.standin(f) in files:
            # if the file isn't in the manifest then it was removed
            # or renamed, raise IOError to indicate this
            try:
                fctx = ctx.filectx(lfutil.standin(f))
            except error.LookupError:
                return None
            renamed = fctx.renamed()
            if renamed:
                renamed = lfutil.splitstandin(renamed[0])

            hash = fctx.data().strip()
            path = lfutil.findfile(rsrc, hash)

            # If one file is missing, likely all files from this rev are
            if path is None:
                cachelfiles(ui, rsrc, ctx.node())
                path = lfutil.findfile(rsrc, hash)

                if path is None:
                    raise util.Abort(
                        _("missing largefile \'%s\' from revision %s")
                         % (f, node.hex(ctx.node())))

            data = ''
            fd = None
            try:
                fd = open(path, 'rb')
                data = fd.read()
            finally:
                if fd:
                    fd.close()
            return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
                                      'x' in fctx.flags(), renamed)
        else:
            return _getnormalcontext(repo, ctx, f, revmap)

    dstfiles = []
    for file in files:
        if lfutil.isstandin(file):
            dstfiles.append(lfutil.splitstandin(file))
        else:
            dstfiles.append(file)
    # Commit
    _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
Exemplo n.º 21
0
def outgoinghook(ui, repo, other, opts, missing):
    if opts.pop('large', None):
        toupload = set()
        lfutil.getlfilestoupload(repo, missing,
                                 lambda fn, lfhash: toupload.add(fn))
        if not toupload:
            ui.status(_('largefiles: no files to upload\n'))
        else:
            ui.status(_('largefiles to upload:\n'))
            for file in sorted(toupload):
                ui.status(lfutil.splitstandin(file) + '\n')
            ui.status('\n')
Exemplo n.º 22
0
def override_outgoing(orig, ui, repo, dest=None, **opts):
    orig(ui, repo, dest, **opts)

    if opts.pop('large', None):
        toupload = getoutgoinglfiles(ui, repo, dest, **opts)
        if toupload is None:
            ui.status(_('largefiles: No remote repo\n'))
        else:
            ui.status(_('largefiles to upload:\n'))
            for file in toupload:
                ui.status(lfutil.splitstandin(file) + '\n')
            ui.status('\n')
Exemplo n.º 23
0
def _addchangeset(ui, rsrc, rdst, ctx, revmap):
    # Convert src parents to dst parents
    parents = _convertparents(ctx, revmap)

    # Generate list of changed files
    files = _getchangedfiles(ctx, parents)

    def getfilectx(repo, memctx, f):
        if lfutil.standin(f) in files:
            # if the file isn't in the manifest then it was removed
            # or renamed, raise IOError to indicate this
            try:
                fctx = ctx.filectx(lfutil.standin(f))
            except error.LookupError:
                raise IOError
            renamed = fctx.renamed()
            if renamed:
                renamed = lfutil.splitstandin(renamed[0])

            hash = fctx.data().strip()
            path = lfutil.findfile(rsrc, hash)

            # If one file is missing, likely all files from this rev are
            if path is None:
                cachelfiles(ui, rsrc, ctx.node())
                path = lfutil.findfile(rsrc, hash)

                if path is None:
                    raise util.Abort(
                        _("missing largefile \'%s\' from revision %s") %
                        (f, node.hex(ctx.node())))

            data = ''
            fd = None
            try:
                fd = open(path, 'rb')
                data = fd.read()
            finally:
                if fd:
                    fd.close()
            return context.memfilectx(repo, f, data, 'l' in fctx.flags(), 'x'
                                      in fctx.flags(), renamed)
        else:
            return _getnormalcontext(repo, ctx, f, revmap)

    dstfiles = []
    for file in files:
        if lfutil.isstandin(file):
            dstfiles.append(lfutil.splitstandin(file))
        else:
            dstfiles.append(file)
    # Commit
    _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
Exemplo n.º 24
0
def overridecat(orig, ui, repo, file1, *pats, **opts):
    ctx = scmutil.revsingle(repo, opts.get('rev'))
    err = 1
    notbad = set()
    m = scmutil.match(ctx, (file1, ) + pats, opts)
    origmatchfn = m.matchfn

    def lfmatchfn(f):
        if origmatchfn(f):
            return True
        lf = lfutil.splitstandin(f)
        if lf is None:
            return False
        notbad.add(lf)
        return origmatchfn(lf)

    m.matchfn = lfmatchfn
    origbadfn = m.bad

    def lfbadfn(f, msg):
        if not f in notbad:
            origbadfn(f, msg)

    m.bad = lfbadfn
    for f in ctx.walk(m):
        fp = cmdutil.makefileobj(repo,
                                 opts.get('output'),
                                 ctx.node(),
                                 pathname=f)
        lf = lfutil.splitstandin(f)
        if lf is None or origmatchfn(f):
            # duplicating unreachable code from commands.cat
            data = ctx[f].data()
            if opts.get('decode'):
                data = repo.wwritedata(f, data)
            fp.write(data)
        else:
            hash = lfutil.readstandin(repo, lf, ctx.rev())
            if not lfutil.inusercache(repo.ui, hash):
                store = basestore._openstore(repo)
                success, missing = store.get([(lf, hash)])
                if len(success) != 1:
                    raise util.Abort(
                        _('largefile %s is not in cache and could not be '
                          'downloaded') % lf)
            path = lfutil.usercachepath(repo.ui, hash)
            fpin = open(path, "rb")
            for chunk in util.filechunkiter(fpin, 128 * 1024):
                fp.write(chunk)
            fpin.close()
        fp.close()
        err = 0
    return err
Exemplo n.º 25
0
def override_outgoing(orig, ui, repo, dest=None, **opts):
    orig(ui, repo, dest, **opts)

    if opts.pop('large', None):
        toupload = getoutgoinglfiles(ui, repo, dest, **opts)
        if toupload is None:
            ui.status(_('largefiles: No remote repo\n'))
        else:
            ui.status(_('largefiles to upload:\n'))
            for file in toupload:
                ui.status(lfutil.splitstandin(file) + '\n')
            ui.status('\n')
Exemplo n.º 26
0
def outgoinghook(ui, repo, other, opts, missing):
    if opts.pop('large', None):
        toupload = set()
        lfutil.getlfilestoupload(repo, missing,
                                 lambda fn, lfhash: toupload.add(fn))
        if not toupload:
            ui.status(_('largefiles: no files to upload\n'))
        else:
            ui.status(_('largefiles to upload:\n'))
            for file in sorted(toupload):
                ui.status(lfutil.splitstandin(file) + '\n')
            ui.status('\n')
Exemplo n.º 27
0
def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
    # Use better variable names here. Because this is a wrapper we cannot
    # change the variable names in the function declaration.
    fcdest, fcother, fcancestor = fcd, fco, fca
    if not lfutil.isstandin(orig):
        return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
    else:
        if not fcother.cmp(fcdest):  # files identical?
            return None

        # backwards, use working dir parent as ancestor
        if fcancestor == fcother:
            fcancestor = fcdest.parents()[0]

        if orig != fcother.path():
            repo.ui.status(
                _("merging %s and %s to %s\n")
                % (lfutil.splitstandin(orig), lfutil.splitstandin(fcother.path()), lfutil.splitstandin(fcdest.path()))
            )
        else:
            repo.ui.status(_("merging %s\n") % lfutil.splitstandin(fcdest.path()))

        if fcancestor.path() != fcother.path() and fcother.data() == fcancestor.data():
            return 0
        if fcancestor.path() != fcdest.path() and fcdest.data() == fcancestor.data():
            repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
            return 0

        if (
            repo.ui.promptchoice(
                _("largefile %s has a merge conflict\n" "keep (l)ocal or take (o)ther?") % lfutil.splitstandin(orig),
                (_("&Local"), _("&Other")),
                0,
            )
            == 0
        ):
            return 0
        else:
            repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
            return 0
Exemplo n.º 28
0
def overridemanifestmerge(origfn,
                          repo,
                          p1,
                          p2,
                          pa,
                          branchmerge,
                          force,
                          partial,
                          acceptremote=False):
    overwrite = force and not branchmerge
    actions = origfn(repo, p1, p2, pa, branchmerge, force, partial,
                     acceptremote)
    processed = []

    for action in actions:
        if overwrite:
            processed.append(action)
            continue
        f, m, args, msg = action

        choices = (_('&Largefile'), _('&Normal file'))

        splitstandin = lfutil.splitstandin(f)
        if (m == "g" and splitstandin is not None and splitstandin in p1
                and f in p2):
            # Case 1: normal file in the working copy, largefile in
            # the second parent
            lfile = splitstandin
            standin = f
            msg = _('%s has been turned into a largefile\n'
                    'use (l)argefile or keep as (n)ormal file?') % lfile
            if repo.ui.promptchoice(msg, choices, 0) == 0:
                processed.append((lfile, "r", None, msg))
                processed.append((standin, "g", (p2.flags(standin), ), msg))
            else:
                processed.append((standin, "r", None, msg))
        elif m == "g" and lfutil.standin(f) in p1 and f in p2:
            # Case 2: largefile in the working copy, normal file in
            # the second parent
            standin = lfutil.standin(f)
            lfile = f
            msg = _('%s has been turned into a normal file\n'
                    'keep as (l)argefile or use (n)ormal file?') % lfile
            if repo.ui.promptchoice(msg, choices, 0) == 0:
                processed.append((lfile, "r", None, msg))
            else:
                processed.append((standin, "r", None, msg))
                processed.append((lfile, "g", (p2.flags(lfile), ), msg))
        else:
            processed.append(action)

    return processed
Exemplo n.º 29
0
def override_manifestmerge(origfn, repo, p1, p2, pa, overwrite, partial):
    actions = origfn(repo, p1, p2, pa, overwrite, partial)
    processed = []

    for action in actions:
        if overwrite:
            processed.append(action)
            continue
        f, m = action[:2]

        choices = (_('&Largefile'), _('&Normal file'))
        if m == "g" and lfutil.splitstandin(f) in p1 and f in p2:
            # Case 1: normal file in the working copy, largefile in
            # the second parent
            lfile = lfutil.splitstandin(f)
            standin = f
            msg = _('%s has been turned into a largefile\n'
                    'use (l)argefile or keep as (n)ormal file?') % lfile
            if repo.ui.promptchoice(msg, choices, 0) == 0:
                processed.append((lfile, "r"))
                processed.append((standin, "g", p2.flags(standin)))
            else:
                processed.append((standin, "r"))
        elif m == "m" and lfutil.standin(f) in p1 and f in p2:
            # Case 2: largefile in the working copy, normal file in
            # the second parent
            standin = lfutil.standin(f)
            lfile = f
            msg = _('%s has been turned into a normal file\n'
                    'keep as (l)argefile or use (n)ormal file?') % lfile
            if repo.ui.promptchoice(msg, choices, 0) == 0:
                processed.append((lfile, "r"))
            else:
                processed.append((standin, "r"))
                processed.append((lfile, "g", p2.flags(lfile)))
        else:
            processed.append(action)

    return processed
Exemplo n.º 30
0
def override_manifestmerge(origfn, repo, p1, p2, pa, overwrite, partial):
    actions = origfn(repo, p1, p2, pa, overwrite, partial)
    processed = []

    for action in actions:
        if overwrite:
            processed.append(action)
            continue
        f, m = action[:2]

        choices = (_('&Largefile'), _('&Normal file'))
        if m == "g" and lfutil.splitstandin(f) in p1 and f in p2:
            # Case 1: normal file in the working copy, largefile in
            # the second parent
            lfile = lfutil.splitstandin(f)
            standin = f
            msg = _('%s has been turned into a largefile\n'
                    'use (l)argefile or keep as (n)ormal file?') % lfile
            if repo.ui.promptchoice(msg, choices, 0) == 0:
                processed.append((lfile, "r"))
                processed.append((standin, "g", p2.flags(standin)))
            else:
                processed.append((standin, "r"))
        elif m == "m" and lfutil.standin(f) in p1 and f in p2:
            # Case 2: largefile in the working copy, normal file in
            # the second parent
            standin = lfutil.standin(f)
            lfile = f
            msg = _('%s has been turned into a normal file\n'
                    'keep as (l)argefile or use (n)ormal file?') % lfile
            if repo.ui.promptchoice(msg, choices, 0) == 0:
                processed.append((lfile, "r"))
            else:
                processed.append((standin, "r"))
                processed.append((lfile, "g", p2.flags(lfile)))
        else:
            processed.append(action)

    return processed
Exemplo n.º 31
0
def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca):
    # Use better variable names here. Because this is a wrapper we cannot
    # change the variable names in the function declaration.
    fcdest, fcother, fcancestor = fcd, fco, fca
    if not lfutil.isstandin(orig):
        return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
    else:
        if not fcother.cmp(fcdest):  # files identical?
            return None

        # backwards, use working dir parent as ancestor
        if fcancestor == fcother:
            fcancestor = fcdest.parents()[0]

        if orig != fcother.path():
            repo.ui.status(
                _('merging %s and %s to %s\n') %
                (lfutil.splitstandin(orig), lfutil.splitstandin(
                    fcother.path()), lfutil.splitstandin(fcdest.path())))
        else:
            repo.ui.status(
                _('merging %s\n') % lfutil.splitstandin(fcdest.path()))

        if fcancestor.path() != fcother.path() and fcother.data() == \
                fcancestor.data():
            return 0
        if fcancestor.path() != fcdest.path() and fcdest.data() == \
                fcancestor.data():
            repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
            return 0

        if repo.ui.promptchoice(
                _('largefile %s has a merge conflict\n'
                  'keep (l)ocal or take (o)ther?') % lfutil.splitstandin(orig),
            (_('&Local'), _('&Other')), 0) == 0:
            return 0
        else:
            repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
            return 0
Exemplo n.º 32
0
 def override_match(ctx,
                    pats=[],
                    opts={},
                    globbed=False,
                    default='relpath'):
     newpats = []
     # The patterns were previously mangled to add the standin
     # directory; we need to remove that now
     for pat in pats:
         if match_.patkind(pat) is None and lfutil.shortname in pat:
             newpats.append(pat.replace(lfutil.shortname, ''))
         else:
             newpats.append(pat)
     match = oldmatch(ctx, newpats, opts, globbed, default)
     m = copy.copy(match)
     lfile = lambda f: lfutil.standin(f) in manifest
     m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
     m._fmap = set(m._files)
     orig_matchfn = m.matchfn
     m.matchfn = lambda f: (lfutil.isstandin(f) and lfile(
         lfutil.splitstandin(f)) and orig_matchfn(
             lfutil.splitstandin(f)) or None)
     return m
Exemplo n.º 33
0
def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
                             partial, acceptremote, followcopies):
    overwrite = force and not branchmerge
    actions = origfn(repo, p1, p2, pas, branchmerge, force, partial,
                     acceptremote, followcopies)

    if overwrite:
        return actions

    removes = set(a[0] for a in actions['r'])

    newglist = []
    for action in actions['g']:
        f, args, msg = action
        splitstandin = f and lfutil.splitstandin(f)
        if (splitstandin is not None and
            splitstandin in p1 and splitstandin not in removes):
            # Case 1: normal file in the working copy, largefile in
            # the second parent
            lfile = splitstandin
            standin = f
            msg = _('remote turned local normal file %s into a largefile\n'
                    'use (l)argefile or keep (n)ormal file?'
                    '$$ &Largefile $$ &Normal file') % lfile
            if repo.ui.promptchoice(msg, 0) == 0:
                actions['r'].append((lfile, None, msg))
                newglist.append((standin, (p2.flags(standin),), msg))
            else:
                actions['r'].append((standin, None, msg))
        elif lfutil.standin(f) in p1 and lfutil.standin(f) not in removes:
            # Case 2: largefile in the working copy, normal file in
            # the second parent
            standin = lfutil.standin(f)
            lfile = f
            msg = _('remote turned local largefile %s into a normal file\n'
                    'keep (l)argefile or use (n)ormal file?'
                    '$$ &Largefile $$ &Normal file') % lfile
            if repo.ui.promptchoice(msg, 0) == 0:
                actions['r'].append((lfile, None, msg))
            else:
                actions['r'].append((standin, None, msg))
                newglist.append((lfile, (p2.flags(lfile),), msg))
        else:
            newglist.append(action)

    newglist.sort()
    actions['g'] = newglist

    return actions
Exemplo n.º 34
0
def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca):
    if not lfutil.isstandin(orig):
        return origfn(repo, mynode, orig, fcd, fco, fca)

    ahash = fca.data().strip().lower()
    dhash = fcd.data().strip().lower()
    ohash = fco.data().strip().lower()
    if (ohash != ahash and ohash != dhash
            and (dhash == ahash or repo.ui.promptchoice(
                _('largefile %s has a merge conflict\nancestor was %s\n'
                  'keep (l)ocal %s or\ntake (o)ther %s?'
                  '$$ &Local $$ &Other') %
                (lfutil.splitstandin(orig), ahash, dhash, ohash), 0) == 1)):
        repo.wwrite(fcd.path(), fco.data(), fco.flags())
    return 0
Exemplo n.º 35
0
def overridemanifestmerge(origfn, repo, p1, p2, pa, branchmerge, force,
                          partial, acceptremote=False):
    overwrite = force and not branchmerge
    actions = origfn(repo, p1, p2, pa, branchmerge, force, partial,
                     acceptremote)

    if overwrite:
        return actions

    removes = set(a[0] for a in actions if a[1] == 'r')
    processed = []

    for action in actions:
        f, m, args, msg = action

        splitstandin = lfutil.splitstandin(f)
        if (m == "g" and splitstandin is not None and
            splitstandin in p1 and splitstandin not in removes):
            # Case 1: normal file in the working copy, largefile in
            # the second parent
            lfile = splitstandin
            standin = f
            msg = _('remote turned local normal file %s into a largefile\n'
                    'use (l)argefile or keep (n)ormal file?'
                    '$$ &Largefile $$ &Normal file') % lfile
            if repo.ui.promptchoice(msg, 0) == 0:
                processed.append((lfile, "r", None, msg))
                processed.append((standin, "g", (p2.flags(standin),), msg))
            else:
                processed.append((standin, "r", None, msg))
        elif (m == "g" and
            lfutil.standin(f) in p1 and lfutil.standin(f) not in removes):
            # Case 2: largefile in the working copy, normal file in
            # the second parent
            standin = lfutil.standin(f)
            lfile = f
            msg = _('remote turned local largefile %s into a normal file\n'
                    'keep (l)argefile or use (n)ormal file?'
                    '$$ &Largefile $$ &Normal file') % lfile
            if repo.ui.promptchoice(msg, 0) == 0:
                processed.append((lfile, "r", None, msg))
            else:
                processed.append((standin, "r", None, msg))
                processed.append((lfile, "g", (p2.flags(lfile),), msg))
        else:
            processed.append(action)

    return processed
Exemplo n.º 36
0
def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
                             partial, acceptremote, followcopies):
    overwrite = force and not branchmerge
    actions = origfn(repo, p1, p2, pas, branchmerge, force, partial,
                     acceptremote, followcopies)

    if overwrite:
        return actions

    removes = set(a[0] for a in actions if a[1] == 'r')
    processed = []

    for action in actions:
        f, m, args, msg = action

        splitstandin = f and lfutil.splitstandin(f)
        if (m == "g" and splitstandin is not None and splitstandin in p1
                and splitstandin not in removes):
            # Case 1: normal file in the working copy, largefile in
            # the second parent
            lfile = splitstandin
            standin = f
            msg = _('remote turned local normal file %s into a largefile\n'
                    'use (l)argefile or keep (n)ormal file?'
                    '$$ &Largefile $$ &Normal file') % lfile
            if repo.ui.promptchoice(msg, 0) == 0:
                processed.append((lfile, "r", None, msg))
                processed.append((standin, "g", (p2.flags(standin), ), msg))
            else:
                processed.append((standin, "r", None, msg))
        elif (m == "g" and lfutil.standin(f) in p1
              and lfutil.standin(f) not in removes):
            # Case 2: largefile in the working copy, normal file in
            # the second parent
            standin = lfutil.standin(f)
            lfile = f
            msg = _('remote turned local largefile %s into a normal file\n'
                    'keep (l)argefile or use (n)ormal file?'
                    '$$ &Largefile $$ &Normal file') % lfile
            if repo.ui.promptchoice(msg, 0) == 0:
                processed.append((lfile, "r", None, msg))
            else:
                processed.append((standin, "r", None, msg))
                processed.append((lfile, "g", (p2.flags(lfile), ), msg))
        else:
            processed.append(action)

    return processed
Exemplo n.º 37
0
def overridecat(orig, ui, repo, file1, *pats, **opts):
    ctx = scmutil.revsingle(repo, opts.get('rev'))
    err = 1
    notbad = set()
    m = scmutil.match(ctx, (file1,) + pats, opts)
    origmatchfn = m.matchfn
    def lfmatchfn(f):
        if origmatchfn(f):
            return True
        lf = lfutil.splitstandin(f)
        if lf is None:
            return False
        notbad.add(lf)
        return origmatchfn(lf)
    m.matchfn = lfmatchfn
    origbadfn = m.bad
    def lfbadfn(f, msg):
        if not f in notbad:
            origbadfn(f, msg)
    m.bad = lfbadfn
    for f in ctx.walk(m):
        fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
                                 pathname=f)
        lf = lfutil.splitstandin(f)
        if lf is None or origmatchfn(f):
            # duplicating unreachable code from commands.cat
            data = ctx[f].data()
            if opts.get('decode'):
                data = repo.wwritedata(f, data)
            fp.write(data)
        else:
            hash = lfutil.readstandin(repo, lf, ctx.rev())
            if not lfutil.inusercache(repo.ui, hash):
                store = basestore._openstore(repo)
                success, missing = store.get([(lf, hash)])
                if len(success) != 1:
                    raise util.Abort(
                        _('largefile %s is not in cache and could not be '
                          'downloaded')  % lf)
            path = lfutil.usercachepath(repo.ui, hash)
            fpin = open(path, "rb")
            for chunk in util.filechunkiter(fpin, 128 * 1024):
                fp.write(chunk)
            fpin.close()
        fp.close()
        err = 0
    return err
Exemplo n.º 38
0
def overridemanifestmerge(origfn, repo, p1, p2, pa, branchmerge, force,
                          partial, acceptremote=False):
    overwrite = force and not branchmerge
    actions = origfn(repo, p1, p2, pa, branchmerge, force, partial,
                     acceptremote)
    processed = []

    for action in actions:
        if overwrite:
            processed.append(action)
            continue
        f, m, args, msg = action

        choices = (_('&Largefile'), _('&Normal file'))

        splitstandin = lfutil.splitstandin(f)
        if (m == "g" and splitstandin is not None and
            splitstandin in p1 and f in p2):
            # Case 1: normal file in the working copy, largefile in
            # the second parent
            lfile = splitstandin
            standin = f
            msg = _('%s has been turned into a largefile\n'
                    'use (l)argefile or keep as (n)ormal file?') % lfile
            if repo.ui.promptchoice(msg, choices, 0) == 0:
                processed.append((lfile, "r", None, msg))
                processed.append((standin, "g", (p2.flags(standin),), msg))
            else:
                processed.append((standin, "r", None, msg))
        elif m == "g" and lfutil.standin(f) in p1 and f in p2:
            # Case 2: largefile in the working copy, normal file in
            # the second parent
            standin = lfutil.standin(f)
            lfile = f
            msg = _('%s has been turned into a normal file\n'
                    'keep as (l)argefile or use (n)ormal file?') % lfile
            if repo.ui.promptchoice(msg, choices, 0) == 0:
                processed.append((lfile, "r", None, msg))
            else:
                processed.append((standin, "r", None, msg))
                processed.append((lfile, "g", (p2.flags(lfile),), msg))
        else:
            processed.append(action)

    return processed
Exemplo n.º 39
0
def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca):
    if not lfutil.isstandin(orig):
        return origfn(repo, mynode, orig, fcd, fco, fca)

    ahash = fca.data().strip().lower()
    dhash = fcd.data().strip().lower()
    ohash = fco.data().strip().lower()
    if (ohash != ahash and
        ohash != dhash and
        (dhash == ahash or
         repo.ui.promptchoice(
             _('largefile %s has a merge conflict\nancestor was %s\n'
               'keep (l)ocal %s or\ntake (o)ther %s?'
               '$$ &Local $$ &Other') %
               (lfutil.splitstandin(orig), ahash, dhash, ohash),
             0) == 1)):
        repo.wwrite(fcd.path(), fco.data(), fco.flags())
    return 0
Exemplo n.º 40
0
def _addchangeset(ui, rsrc, rdst, ctx, revmap):
 # Convert src parents to dst parents
    parents = _convertparents(ctx, revmap)

    # Generate list of changed files
    files = _getchangedfiles(ctx, parents)

    def getfilectx(repo, memctx, f):
        if lfutil.standin(f) in files:
            # if the file isn't in the manifest then it was removed
            # or renamed, raise IOError to indicate this
            try:
                fctx = ctx.filectx(lfutil.standin(f))
            except error.LookupError:
                raise IOError()
            renamed = fctx.renamed()
            if renamed:
                renamed = lfutil.splitstandin(renamed[0])

            hash = fctx.data().strip()
            path = lfutil.findfile(rsrc, hash)
            ### TODO: What if the file is not cached?
            data = ''
            fd = None
            try:
                fd = open(path, 'rb')
                data = fd.read()
            finally:
                if fd:
                    fd.close()
            return context.memfilectx(f, data, 'l' in fctx.flags(),
                                      'x' in fctx.flags(), renamed)
        else:
            return _getnormalcontext(repo.ui, ctx, f, revmap)

    dstfiles = []
    for file in files:
        if lfutil.isstandin(file):
            dstfiles.append(lfutil.splitstandin(file))
        else:
            dstfiles.append(file)
    # Commit
    _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
Exemplo n.º 41
0
    def getfilectx(repo, memctx, f):
        if lfutil.isstandin(f):
            # if the file isn't in the manifest then it was removed
            # or renamed, raise IOError to indicate this
            srcfname = lfutil.splitstandin(f)
            try:
                fctx = ctx.filectx(srcfname)
            except error.LookupError:
                raise IOError
            renamed = fctx.renamed()
            if renamed:
                # standin is always a largefile because largefile-ness
                # doesn't change after rename or copy
                renamed = lfutil.standin(renamed[0])

            return context.memfilectx(f, lfiletohash[srcfname] + '\n', 'l' in
                fctx.flags(), 'x' in fctx.flags(), renamed)
        else:
            return _getnormalcontext(repo.ui, ctx, f, revmap)
Exemplo n.º 42
0
    def getfilectx(repo, memctx, f):
        if lfutil.isstandin(f):
            # if the file isn't in the manifest then it was removed
            # or renamed, raise IOError to indicate this
            srcfname = lfutil.splitstandin(f)
            try:
                fctx = ctx.filectx(srcfname)
            except error.LookupError:
                raise IOError()
            renamed = fctx.renamed()
            if renamed:
                # standin is always a largefile because largefile-ness
                # doesn't change after rename or copy
                renamed = lfutil.standin(renamed[0])

            return context.memfilectx(f, lfiletohash[srcfname] + '\n', 'l' in
                fctx.flags(), 'x' in fctx.flags(), renamed)
        else:
            return _getnormalcontext(repo.ui, ctx, f, revmap)
Exemplo n.º 43
0
def _addchangeset(ui, rsrc, rdst, ctx, revmap):
 # Convert src parents to dst parents
    parents = _convertparents(ctx, revmap)

    # Generate list of changed files
    files = _getchangedfiles(ctx, parents)

    def getfilectx(repo, memctx, f):
        if lfutil.standin(f) in files:
            # if the file isn't in the manifest then it was removed
            # or renamed, raise IOError to indicate this
            try:
                fctx = ctx.filectx(lfutil.standin(f))
            except error.LookupError:
                raise IOError()
            renamed = fctx.renamed()
            if renamed:
                renamed = lfutil.splitstandin(renamed[0])

            hash = fctx.data().strip()
            path = lfutil.findfile(rsrc, hash)
            ### TODO: What if the file is not cached?
            data = ''
            fd = None
            try:
                fd = open(path, 'rb')
                data = fd.read()
            finally:
                if fd:
                    fd.close()
            return context.memfilectx(f, data, 'l' in fctx.flags(),
                                      'x' in fctx.flags(), renamed)
        else:
            return _getnormalcontext(repo.ui, ctx, f, revmap)

    dstfiles = []
    for file in files:
        if lfutil.isstandin(file):
            dstfiles.append(lfutil.splitstandin(file))
        else:
            dstfiles.append(file)
    # Commit
    _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
Exemplo n.º 44
0
    def getfilectx(repo, memctx, f):
        if lfutil.standin(f) in files:
            # if the file isn't in the manifest then it was removed
            # or renamed, raise IOError to indicate this
            try:
                fctx = ctx.filectx(lfutil.standin(f))
            except error.LookupError:
                raise IOError()
            renamed = fctx.renamed()
            if renamed:
                renamed = lfutil.splitstandin(renamed[0])

            hash = fctx.data().strip()
            path = lfutil.findfile(rsrc, hash)
            ### TODO: What if the file is not cached?
            data = ''
            fd = None
            try:
                fd = open(path, 'rb')
                data = fd.read()
            finally:
                if fd:
                    fd.close()
            return context.memfilectx(f, data, 'l' in fctx.flags(),
                                      'x' in fctx.flags(), renamed)
        else:
            try:
                fctx = ctx.filectx(f)
            except error.LookupError:
                raise IOError()
            renamed = fctx.renamed()
            if renamed:
                renamed = renamed[0]
            data = fctx.data()
            if f == '.hgtags':
                newdata = []
                for line in data.splitlines():
                    id, name = line.split(' ', 1)
                    newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
                        name))
                data = ''.join(newdata)
            return context.memfilectx(f, data, 'l' in fctx.flags(),
                                      'x' in fctx.flags(), renamed)
Exemplo n.º 45
0
    def getfilectx(repo, memctx, f):
        if lfutil.standin(f) in files:
            # if the file isn't in the manifest then it was removed
            # or renamed, raise IOError to indicate this
            try:
                fctx = ctx.filectx(lfutil.standin(f))
            except error.LookupError:
                raise IOError
            renamed = fctx.renamed()
            if renamed:
                renamed = lfutil.splitstandin(renamed[0])

            hash = fctx.data().strip()
            path = lfutil.findfile(rsrc, hash)

            # If one file is missing, likely all files from this rev are
            if path is None:
                cachelfiles(ui, rsrc, ctx.node())
                path = lfutil.findfile(rsrc, hash)

                if path is None:
                    raise util.Abort(
                        _("missing largefile \'%s\' from revision %s") %
                        (f, node.hex(ctx.node())))

            data = ''
            fd = None
            try:
                fd = open(path, 'rb')
                data = fd.read()
            finally:
                if fd:
                    fd.close()
            return context.memfilectx(repo, f, data, 'l' in fctx.flags(), 'x'
                                      in fctx.flags(), renamed)
        else:
            return _getnormalcontext(repo, ctx, f, revmap)
Exemplo n.º 46
0
    def verify(self, revs, contents=False):
        '''Verify the existence (and, optionally, contents) of every big
        file revision referenced by every changeset in revs.
        Return 0 if all is well, non-zero on any errors.'''

        self.ui.status(
            _('searching %d changesets for largefiles\n') % len(revs))
        verified = set()  # set of (filename, filenode) tuples
        filestocheck = []  # list of (cset, filename, expectedhash)
        for rev in revs:
            cctx = self.repo[rev]
            cset = "%d:%s" % (cctx.rev(), node.short(cctx.node()))

            for standin in cctx:
                filename = lfutil.splitstandin(standin)
                if filename:
                    fctx = cctx[standin]
                    key = (filename, fctx.filenode())
                    if key not in verified:
                        verified.add(key)
                        expectedhash = fctx.data()[0:40]
                        filestocheck.append((cset, filename, expectedhash))

        failed = self._verifyfiles(contents, filestocheck)

        numrevs = len(verified)
        numlfiles = len(set([fname for (fname, fnode) in verified]))
        if contents:
            self.ui.status(
                _('verified contents of %d revisions of %d largefiles\n') %
                (numrevs, numlfiles))
        else:
            self.ui.status(
                _('verified existence of %d revisions of %d largefiles\n') %
                (numrevs, numlfiles))
        return int(failed)
Exemplo n.º 47
0
    def getfilectx(repo, memctx, f):
        if lfutil.isstandin(f):
            # if the file isn't in the manifest then it was removed
            # or renamed, raise IOError to indicate this
            srcfname = lfutil.splitstandin(f)
            try:
                fctx = ctx.filectx(srcfname)
            except error.LookupError:
                raise IOError()
            renamed = fctx.renamed()
            if renamed:
                # standin is always a largefile because largefile-ness
                # doesn't change after rename or copy
                renamed = lfutil.standin(renamed[0])

            return context.memfilectx(f, lfiletohash[srcfname] + '\n', 'l' in
                fctx.flags(), 'x' in fctx.flags(), renamed)
        else:
            try:
                fctx = ctx.filectx(f)
            except error.LookupError:
                raise IOError()
            renamed = fctx.renamed()
            if renamed:
                renamed = renamed[0]

            data = fctx.data()
            if f == '.hgtags':
                newdata = []
                for line in data.splitlines():
                    id, name = line.split(' ', 1)
                    newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
                        name))
                data = ''.join(newdata)
            return context.memfilectx(f, data, 'l' in fctx.flags(),
                                      'x' in fctx.flags(), renamed)
Exemplo n.º 48
0
        def status(self, node1='.', node2=None, match=None, ignored=False,
                clean=False, unknown=False, listsubrepos=False):
            listignored, listclean, listunknown = ignored, clean, unknown
            if not self.lfstatus:
                return super(lfilesrepo, self).status(node1, node2, match,
                    listignored, listclean, listunknown, listsubrepos)
            else:
                # some calls in this function rely on the old version of status
                self.lfstatus = False
                if isinstance(node1, context.changectx):
                    ctx1 = node1
                else:
                    ctx1 = self[node1]
                if isinstance(node2, context.changectx):
                    ctx2 = node2
                else:
                    ctx2 = self[node2]
                working = ctx2.rev() is None
                parentworking = working and ctx1 == self['.']

                def inctx(file, ctx):
                    try:
                        if ctx.rev() is None:
                            return file in ctx.manifest()
                        ctx[file]
                        return True
                    except KeyError:
                        return False

                if match is None:
                    match = match_.always(self.root, self.getcwd())

                wlock = None
                try:
                    try:
                        # updating the dirstate is optional
                        # so we don't wait on the lock
                        wlock = self.wlock(False)
                    except error.LockError:
                        pass

                    # First check if there were files specified on the
                    # command line.  If there were, and none of them were
                    # largefiles, we should just bail here and let super
                    # handle it -- thus gaining a big performance boost.
                    lfdirstate = lfutil.openlfdirstate(ui, self)
                    if match.files() and not match.anypats():
                        for f in lfdirstate:
                            if match(f):
                                break
                        else:
                            return super(lfilesrepo, self).status(node1, node2,
                                    match, listignored, listclean,
                                    listunknown, listsubrepos)

                    # Create a copy of match that matches standins instead
                    # of largefiles.
                    def tostandins(files):
                        if not working:
                            return files
                        newfiles = []
                        dirstate = self.dirstate
                        for f in files:
                            sf = lfutil.standin(f)
                            if sf in dirstate:
                                newfiles.append(sf)
                            elif sf in dirstate.dirs():
                                # Directory entries could be regular or
                                # standin, check both
                                newfiles.extend((f, sf))
                            else:
                                newfiles.append(f)
                        return newfiles

                    m = copy.copy(match)
                    m._files = tostandins(m._files)

                    result = super(lfilesrepo, self).status(node1, node2, m,
                        ignored, clean, unknown, listsubrepos)
                    if working:

                        def sfindirstate(f):
                            sf = lfutil.standin(f)
                            dirstate = self.dirstate
                            return sf in dirstate or sf in dirstate.dirs()

                        match._files = [f for f in match._files
                                        if sfindirstate(f)]
                        # Don't waste time getting the ignored and unknown
                        # files from lfdirstate
                        s = lfdirstate.status(match, [], False,
                                listclean, False)
                        (unsure, modified, added, removed, missing, _unknown,
                                _ignored, clean) = s
                        if parentworking:
                            for lfile in unsure:
                                standin = lfutil.standin(lfile)
                                if standin not in ctx1:
                                    # from second parent
                                    modified.append(lfile)
                                elif ctx1[standin].data().strip() \
                                        != lfutil.hashfile(self.wjoin(lfile)):
                                    modified.append(lfile)
                                else:
                                    clean.append(lfile)
                                    lfdirstate.normal(lfile)
                        else:
                            tocheck = unsure + modified + added + clean
                            modified, added, clean = [], [], []

                            for lfile in tocheck:
                                standin = lfutil.standin(lfile)
                                if inctx(standin, ctx1):
                                    if ctx1[standin].data().strip() != \
                                            lfutil.hashfile(self.wjoin(lfile)):
                                        modified.append(lfile)
                                    else:
                                        clean.append(lfile)
                                else:
                                    added.append(lfile)

                        # Standins no longer found in lfdirstate has been
                        # removed
                        for standin in ctx1.manifest():
                            if not lfutil.isstandin(standin):
                                continue
                            lfile = lfutil.splitstandin(standin)
                            if not match(lfile):
                                continue
                            if lfile not in lfdirstate:
                                removed.append(lfile)

                        # Filter result lists
                        result = list(result)

                        # Largefiles are not really removed when they're
                        # still in the normal dirstate. Likewise, normal
                        # files are not really removed if they are still in
                        # lfdirstate. This happens in merges where files
                        # change type.
                        removed = [f for f in removed
                                   if f not in self.dirstate]
                        result[2] = [f for f in result[2]
                                     if f not in lfdirstate]

                        lfiles = set(lfdirstate._map)
                        # Unknown files
                        result[4] = set(result[4]).difference(lfiles)
                        # Ignored files
                        result[5] = set(result[5]).difference(lfiles)
                        # combine normal files and largefiles
                        normals = [[fn for fn in filelist
                                    if not lfutil.isstandin(fn)]
                                   for filelist in result]
                        lfiles = (modified, added, removed, missing, [], [],
                                  clean)
                        result = [sorted(list1 + list2)
                                  for (list1, list2) in zip(normals, lfiles)]
                    else:
                        def toname(f):
                            if lfutil.isstandin(f):
                                return lfutil.splitstandin(f)
                            return f
                        result = [[toname(f) for f in items]
                                  for items in result]

                    if wlock:
                        lfdirstate.write()

                finally:
                    if wlock:
                        wlock.release()

                if not listunknown:
                    result[4] = []
                if not listignored:
                    result[5] = []
                if not listclean:
                    result[6] = []
                self.lfstatus = True
                return result
Exemplo n.º 49
0
 def files(self):
     filenames = super(lfilesctx, self).files()
     return [lfutil.splitstandin(f) or f for f in filenames]
Exemplo n.º 50
0
        def commit(self, text="", user=None, date=None, match=None,
                force=False, editor=False, extra={}):
            orig = super(lfilesrepo, self).commit

            wlock = self.wlock()
            try:
                # Case 0: Rebase or Transplant
                # We have to take the time to pull down the new largefiles now.
                # Otherwise, any largefiles that were modified in the
                # destination changesets get overwritten, either by the rebase
                # or in the first commit after the rebase or transplant.
                # updatelfiles will update the dirstate to mark any pulled
                # largefiles as modified
                if getattr(self, "_isrebasing", False) or \
                        getattr(self, "_istransplanting", False):
                    lfcommands.updatelfiles(self.ui, self, filelist=None,
                                            printmessage=False)
                    result = orig(text=text, user=user, date=date, match=match,
                                    force=force, editor=editor, extra=extra)
                    return result
                # Case 1: user calls commit with no specific files or
                # include/exclude patterns: refresh and commit all files that
                # are "dirty".
                if ((match is None) or
                    (not match.anypats() and not match.files())):
                    # Spend a bit of time here to get a list of files we know
                    # are modified so we can compare only against those.
                    # It can cost a lot of time (several seconds)
                    # otherwise to update all standins if the largefiles are
                    # large.
                    lfdirstate = lfutil.openlfdirstate(ui, self)
                    dirtymatch = match_.always(self.root, self.getcwd())
                    s = lfdirstate.status(dirtymatch, [], False, False, False)
                    (unsure, modified, added, removed, _missing, _unknown,
                            _ignored, _clean) = s
                    modifiedfiles = unsure + modified + added + removed
                    lfiles = lfutil.listlfiles(self)
                    # this only loops through largefiles that exist (not
                    # removed/renamed)
                    for lfile in lfiles:
                        if lfile in modifiedfiles:
                            if os.path.exists(
                                    self.wjoin(lfutil.standin(lfile))):
                                # this handles the case where a rebase is being
                                # performed and the working copy is not updated
                                # yet.
                                if os.path.exists(self.wjoin(lfile)):
                                    lfutil.updatestandin(self,
                                        lfutil.standin(lfile))
                                    lfdirstate.normal(lfile)

                    result = orig(text=text, user=user, date=date, match=match,
                                    force=force, editor=editor, extra=extra)

                    if result is not None:
                        for lfile in lfdirstate:
                            if lfile in modifiedfiles:
                                if (not os.path.exists(self.wjoin(
                                   lfutil.standin(lfile)))) or \
                                   (not os.path.exists(self.wjoin(lfile))):
                                    lfdirstate.drop(lfile)

                    # This needs to be after commit; otherwise precommit hooks
                    # get the wrong status
                    lfdirstate.write()
                    return result

                lfiles = lfutil.listlfiles(self)
                match._files = self._subdirlfs(match.files(), lfiles)

                # Case 2: user calls commit with specified patterns: refresh
                # any matching big files.
                smatcher = lfutil.composestandinmatcher(self, match)
                standins = self.dirstate.walk(smatcher, [], False, False)

                # No matching big files: get out of the way and pass control to
                # the usual commit() method.
                if not standins:
                    return orig(text=text, user=user, date=date, match=match,
                                    force=force, editor=editor, extra=extra)

                # Refresh all matching big files.  It's possible that the
                # commit will end up failing, in which case the big files will
                # stay refreshed.  No harm done: the user modified them and
                # asked to commit them, so sooner or later we're going to
                # refresh the standins.  Might as well leave them refreshed.
                lfdirstate = lfutil.openlfdirstate(ui, self)
                for standin in standins:
                    lfile = lfutil.splitstandin(standin)
                    if lfdirstate[lfile] != 'r':
                        lfutil.updatestandin(self, standin)
                        lfdirstate.normal(lfile)
                    else:
                        lfdirstate.drop(lfile)

                # Cook up a new matcher that only matches regular files or
                # standins corresponding to the big files requested by the
                # user.  Have to modify _files to prevent commit() from
                # complaining "not tracked" for big files.
                match = copy.copy(match)
                origmatchfn = match.matchfn

                # Check both the list of largefiles and the list of
                # standins because if a largefile was removed, it
                # won't be in the list of largefiles at this point
                match._files += sorted(standins)

                actualfiles = []
                for f in match._files:
                    fstandin = lfutil.standin(f)

                    # ignore known largefiles and standins
                    if f in lfiles or fstandin in standins:
                        continue

                    # append directory separator to avoid collisions
                    if not fstandin.endswith(os.sep):
                        fstandin += os.sep

                    actualfiles.append(f)
                match._files = actualfiles

                def matchfn(f):
                    if origmatchfn(f):
                        return f not in lfiles
                    else:
                        return f in standins

                match.matchfn = matchfn
                result = orig(text=text, user=user, date=date, match=match,
                                force=force, editor=editor, extra=extra)
                # This needs to be after commit; otherwise precommit hooks
                # get the wrong status
                lfdirstate.write()
                return result
            finally:
                wlock.release()
Exemplo n.º 51
0
 def toname(f):
     if lfutil.isstandin(f):
         return lfutil.splitstandin(f)
     return f
Exemplo n.º 52
0
def overridearchive(orig,
                    repo,
                    dest,
                    node,
                    kind,
                    decode=True,
                    matchfn=None,
                    prefix=None,
                    mtime=None,
                    subrepos=None):
    # No need to lock because we are only reading history and
    # largefile caches, neither of which are modified.
    lfcommands.cachelfiles(repo.ui, repo, node)

    if kind not in archival.archivers:
        raise util.Abort(_("unknown archive type '%s'") % kind)

    ctx = repo[node]

    if kind == 'files':
        if prefix:
            raise util.Abort(_('cannot give prefix when archiving to files'))
    else:
        prefix = archival.tidyprefix(dest, kind, prefix)

    def write(name, mode, islink, getdata):
        if matchfn and not matchfn(name):
            return
        data = getdata()
        if decode:
            data = repo.wwritedata(name, data)
        archiver.addfile(prefix + name, mode, islink, data)

    archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])

    if repo.ui.configbool("ui", "archivemeta", True):

        def metadata():
            base = 'repo: %s\nnode: %s\nbranch: %s\n' % (hex(
                repo.changelog.node(0)), hex(node), ctx.branch())

            tags = ''.join('tag: %s\n' % t for t in ctx.tags()
                           if repo.tagtype(t) == 'global')
            if not tags:
                repo.ui.pushbuffer()
                opts = {
                    'template': '{latesttag}\n{latesttagdistance}',
                    'style': '',
                    'patch': None,
                    'git': None
                }
                cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
                ltags, dist = repo.ui.popbuffer().split('\n')
                tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
                tags += 'latesttagdistance: %s\n' % dist

            return base + tags

        write('.hg_archival.txt', 0644, False, metadata)

    for f in ctx:
        ff = ctx.flags(f)
        getdata = ctx[f].data
        if lfutil.isstandin(f):
            path = lfutil.findfile(repo, getdata().strip())
            if path is None:
                raise util.Abort(
                    _('largefile %s not found in repo store or system cache') %
                    lfutil.splitstandin(f))
            f = lfutil.splitstandin(f)

            def getdatafn():
                fd = None
                try:
                    fd = open(path, 'rb')
                    return fd.read()
                finally:
                    if fd:
                        fd.close()

            getdata = getdatafn
        write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)

    if subrepos:
        for subpath in sorted(ctx.substate):
            sub = ctx.sub(subpath)
            submatch = match_.narrowmatcher(subpath, matchfn)
            sub.archive(repo.ui, archiver, prefix, submatch)

    archiver.done()
Exemplo n.º 53
0
def addlargefiles(ui, repo, *pats, **opts):
    large = opts.pop('large', None)
    lfsize = lfutil.getminsize(ui, lfutil.islfilesrepo(repo),
                               opts.pop('lfsize', None))

    lfmatcher = None
    if lfutil.islfilesrepo(repo):
        lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
        if lfpats:
            lfmatcher = match_.match(repo.root, '', list(lfpats))

    lfnames = []
    m = scmutil.match(repo[None], pats, opts)
    m.bad = lambda x, y: None
    wctx = repo[None]
    for f in repo.walk(m):
        exact = m.exact(f)
        lfile = lfutil.standin(f) in wctx
        nfile = f in wctx
        exists = lfile or nfile

        # Don't warn the user when they attempt to add a normal tracked file.
        # The normal add code will do that for us.
        if exact and exists:
            if lfile:
                ui.warn(_('%s already a largefile\n') % f)
            continue

        if (exact or not exists) and not lfutil.isstandin(f):
            wfile = repo.wjoin(f)

            # In case the file was removed previously, but not committed
            # (issue3507)
            if not os.path.exists(wfile):
                continue

            abovemin = (lfsize
                        and os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
            if large or abovemin or (lfmatcher and lfmatcher(f)):
                lfnames.append(f)
                if ui.verbose or not exact:
                    ui.status(_('adding %s as a largefile\n') % m.rel(f))

    bad = []
    standins = []

    # Need to lock, otherwise there could be a race condition between
    # when standins are created and added to the repo.
    wlock = repo.wlock()
    try:
        if not opts.get('dry_run'):
            lfdirstate = lfutil.openlfdirstate(ui, repo)
            for f in lfnames:
                standinname = lfutil.standin(f)
                lfutil.writestandin(repo,
                                    standinname,
                                    hash='',
                                    executable=lfutil.getexecutable(
                                        repo.wjoin(f)))
                standins.append(standinname)
                if lfdirstate[f] == 'r':
                    lfdirstate.normallookup(f)
                else:
                    lfdirstate.add(f)
            lfdirstate.write()
            bad += [
                lfutil.splitstandin(f) for f in repo[None].add(standins)
                if f in m.files()
            ]
    finally:
        wlock.release()
    return bad
Exemplo n.º 54
0
        def status(self, node1='.', node2=None, match=None, ignored=False,
                clean=False, unknown=False, listsubrepos=False):
            listignored, listclean, listunknown = ignored, clean, unknown
            if not self.lfstatus:
                return super(lfilesrepo, self).status(node1, node2, match,
                    listignored, listclean, listunknown, listsubrepos)
            else:
                # some calls in this function rely on the old version of status
                self.lfstatus = False
                if isinstance(node1, context.changectx):
                    ctx1 = node1
                else:
                    ctx1 = repo[node1]
                if isinstance(node2, context.changectx):
                    ctx2 = node2
                else:
                    ctx2 = repo[node2]
                working = ctx2.rev() is None
                parentworking = working and ctx1 == self['.']

                def inctx(file, ctx):
                    try:
                        if ctx.rev() is None:
                            return file in ctx.manifest()
                        ctx[file]
                        return True
                    except KeyError:
                        return False

                if match is None:
                    match = match_.always(self.root, self.getcwd())

                # First check if there were files specified on the
                # command line.  If there were, and none of them were
                # largefiles, we should just bail here and let super
                # handle it -- thus gaining a big performance boost.
                lfdirstate = lfutil.openlfdirstate(ui, self)
                if match.files() and not match.anypats():
                    for f in lfdirstate:
                        if match(f):
                            break
                    else:
                        return super(lfilesrepo, self).status(node1, node2,
                                match, listignored, listclean,
                                listunknown, listsubrepos)

                # Create a copy of match that matches standins instead
                # of largefiles.
                def tostandins(files):
                    if not working:
                        return files
                    newfiles = []
                    dirstate = repo.dirstate
                    for f in files:
                        sf = lfutil.standin(f)
                        if sf in dirstate:
                            newfiles.append(sf)
                        elif sf in dirstate.dirs():
                            # Directory entries could be regular or
                            # standin, check both
                            newfiles.extend((f, sf))
                        else:
                            newfiles.append(f)
                    return newfiles

                # Create a function that we can use to override what is
                # normally the ignore matcher.  We've already checked
                # for ignored files on the first dirstate walk, and
                # unecessarily re-checking here causes a huge performance
                # hit because lfdirstate only knows about largefiles
                def _ignoreoverride(self):
                    return False

                m = copy.copy(match)
                m._files = tostandins(m._files)

                # Get ignored files here even if we weren't asked for them; we
                # must use the result here for filtering later
                result = super(lfilesrepo, self).status(node1, node2, m,
                    True, clean, unknown, listsubrepos)
                if working:
                    try:
                        # Any non-largefiles that were explicitly listed must be
                        # taken out or lfdirstate.status will report an error.
                        # The status of these files was already computed using
                        # super's status.
                        # Override lfdirstate's ignore matcher to not do
                        # anything
                        origignore = lfdirstate._ignore
                        lfdirstate._ignore = _ignoreoverride

                        def sfindirstate(f):
                            sf = lfutil.standin(f)
                            dirstate = repo.dirstate
                            return sf in dirstate or sf in dirstate.dirs()
                        match._files = [f for f in match._files
                                        if sfindirstate(f)]
                        # Don't waste time getting the ignored and unknown
                        # files again; we already have them
                        s = lfdirstate.status(match, [], False,
                                listclean, False)
                        (unsure, modified, added, removed, missing, unknown,
                                ignored, clean) = s
                        # Replace the list of ignored and unknown files with
                        # the previously caclulated lists, and strip out the
                        # largefiles
                        lfiles = set(lfdirstate._map)
                        ignored = set(result[5]).difference(lfiles)
                        unknown = set(result[4]).difference(lfiles)
                        if parentworking:
                            for lfile in unsure:
                                standin = lfutil.standin(lfile)
                                if standin not in ctx1:
                                    # from second parent
                                    modified.append(lfile)
                                elif ctx1[standin].data().strip() \
                                        != lfutil.hashfile(self.wjoin(lfile)):
                                    modified.append(lfile)
                                else:
                                    clean.append(lfile)
                                    lfdirstate.normal(lfile)
                        else:
                            tocheck = unsure + modified + added + clean
                            modified, added, clean = [], [], []

                            for lfile in tocheck:
                                standin = lfutil.standin(lfile)
                                if inctx(standin, ctx1):
                                    if ctx1[standin].data().strip() != \
                                            lfutil.hashfile(self.wjoin(lfile)):
                                        modified.append(lfile)
                                    else:
                                        clean.append(lfile)
                                else:
                                    added.append(lfile)
                    finally:
                        # Replace the original ignore function
                        lfdirstate._ignore = origignore

                    for standin in ctx1.manifest():
                        if not lfutil.isstandin(standin):
                            continue
                        lfile = lfutil.splitstandin(standin)
                        if not match(lfile):
                            continue
                        if lfile not in lfdirstate:
                            removed.append(lfile)

                    # Filter result lists
                    result = list(result)

                    # Largefiles are not really removed when they're
                    # still in the normal dirstate. Likewise, normal
                    # files are not really removed if it's still in
                    # lfdirstate. This happens in merges where files
                    # change type.
                    removed = [f for f in removed if f not in repo.dirstate]
                    result[2] = [f for f in result[2] if f not in lfdirstate]

                    # Unknown files
                    unknown = set(unknown).difference(ignored)
                    result[4] = [f for f in unknown
                                 if (repo.dirstate[f] == '?' and
                                     not lfutil.isstandin(f))]
                    # Ignored files were calculated earlier by the dirstate,
                    # and we already stripped out the largefiles from the list
                    result[5] = ignored
                    # combine normal files and largefiles
                    normals = [[fn for fn in filelist
                                if not lfutil.isstandin(fn)]
                               for filelist in result]
                    lfiles = (modified, added, removed, missing, [], [], clean)
                    result = [sorted(list1 + list2)
                              for (list1, list2) in zip(normals, lfiles)]
                else:
                    def toname(f):
                        if lfutil.isstandin(f):
                            return lfutil.splitstandin(f)
                        return f
                    result = [[toname(f) for f in items] for items in result]

                if not listunknown:
                    result[4] = []
                if not listignored:
                    result[5] = []
                if not listclean:
                    result[6] = []
                self.lfstatus = True
                return result
Exemplo n.º 55
0
 def lfmatchfn(f):
     lf = lfutil.splitstandin(f)
     if lf is not None and origmatchfn(lf):
         return True
     r = origmatchfn(f)
     return r
Exemplo n.º 56
0
def decodepath(orig, path):
    return lfutil.splitstandin(path) or path
Exemplo n.º 57
0
 def lfmatchfn(f):
     lf = lfutil.splitstandin(f)
     if lf is None:
         return origmatchfn(f)
     notbad.add(lf)
     return origmatchfn(lf)