Exemple #1
0
def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
    # Use better variable names here. Because this is a wrapper we cannot change
    # the variable names in the function declaration.
    fcdest, fcother, fcancestor = fcd, fco, fca
    if not bfutil.is_standin(orig):
        return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
    else:
        if not fcother.cmp(fcdest): # files identical?
            return None

        if fcancestor == fcother: # backwards, use working dir parent as ancestor
            fcancestor = fcdest.parents()[0]

        if orig != fcother.path():
            repo.ui.status(_('merging %s and %s to %s\n')
                    % (bfutil.split_standin(orig), bfutil.split_standin(fcother.path()), bfutil.split_standin(fcdest.path())))
        else:
            repo.ui.status(_('merging %s\n') % bfutil.split_standin(fcdest.path()))

        if fcancestor.path() != fcother.path() and fcother.data() == fcancestor.data():
            return 0
        if fcancestor.path() != fcdest.path() and fcdest.data() == fcancestor.data():
            repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
            return 0

        if repo.ui.promptchoice(_('bfile %s has a merge conflict\n'
                             'keep (l)ocal or take (o)ther?') % bfutil.split_standin(orig),
                             (_('&Local'), _('&Other')), 0) == 0:
            return 0
        else:
            repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
            return 0
Exemple #2
0
    def _verifyfile(self, cctx, cset, contents, standin, verified):
        filename = bfutil.split_standin(standin)
        if not filename:
            return False
        fctx = cctx[standin]
        key = (filename, fctx.filenode())
        if key in verified:
            return False

        expect_hash = fctx.data()[0:40]
        verified.add(key)
        if not bfutil.in_system_cache(self.ui, expect_hash):
            self.ui.warn(
                _('changeset %s: %s missing\n'
                  '  (%s: %s)\n') %
                (cset, filename, expect_hash, err.strerror))
            return True  # failed

        if contents:
            store_path = bfutil.system_cache_path(self.ui, expect_hash)
            actual_hash = bfutil.hashfile(store_path)
            if actual_hash != expect_hash:
                self.ui.warn(
                    _('changeset %s: %s: contents differ\n'
                      '  (%s:\n'
                      '  expected hash %s,\n'
                      '  but got %s)\n') %
                    (cset, filename, store_path, expect_hash, actual_hash))
                return True  # failed
        return False
Exemple #3
0
    def getfilectx(repo, memctx, f):
        if bfutil.is_standin(f):
            # if the file isn't in the manifest then it was removed
            # or renamed, raise IOError to indicate this
            srcfname = bfutil.split_standin(f)
            try:
                fctx = ctx.filectx(srcfname)
            except error.LookupError:
                raise IOError()
            renamed = fctx.renamed()
            if renamed:
                # standin is always a bfile because bfileness
                # doesn't change after rename or copy
                renamed = bfutil.standin(renamed[0])

            return context.memfilectx(f, bfiletohash[srcfname], 'l' in fctx.flags(),
                                      'x' in fctx.flags(), renamed)
        else:
            try:
                fctx = ctx.filectx(f)
            except error.LookupError:
                raise IOError()
            renamed = fctx.renamed()
            if renamed:
                renamed = renamed[0]

            data = fctx.data()
            if f == '.hgtags':
                newdata = []
                for line in data.splitlines():
                    id, name = line.split(' ', 1)
                    newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]), name))
                data = ''.join(newdata)
            return context.memfilectx(f, data, 'l' in fctx.flags(),
                                      'x' in fctx.flags(), renamed)
Exemple #4
0
        def commitctx(self, *args, **kwargs):
            node = super(bfiles_repo, self).commitctx(*args, **kwargs)
            ctx = self[node]
            for filename in ctx.files():
                if bfutil.is_standin(filename) and filename in ctx.manifest():
                    realfile = bfutil.split_standin(filename)
                    bfutil.copy_to_cache(self, ctx.node(), realfile)

            return node
Exemple #5
0
def override_outgoing(orig, ui, repo, dest=None, **opts):
    orig(ui, repo, dest, **opts)

    if opts.pop('bf', None):
        toupload = get_outgoing_bfiles(ui, repo, dest, **opts)
        if toupload is None:
            ui.status(_('kbfiles: No remote repo\n'))
        else:
            ui.status(_('kbfiles to upload:\n'))
            for file in toupload:
                ui.status(bfutil.split_standin(file) + '\n')
            ui.status('\n')
Exemple #6
0
 def override_match(repo, pats=[], opts={}, globbed=False, default='relpath'):
     newpats = []
     # The patterns were previously mangled to add .hgbfiles, we need to remove that now
     for pat in pats:
         if match_.patkind(pat) == None and bfutil.short_name in pat:
             newpats.append(pat.replace( bfutil.short_name, ''))
         else:
             newpats.append(pat)
     match = oldmatch(repo, newpats, opts, globbed, default)
     m = copy.copy(match)
     bfile = lambda f: bfutil.standin(f) in manifest
     m._files = [bfutil.standin(f) for f in m._files if bfile(f)]
     m._fmap = set(m._files)
     orig_matchfn = m.matchfn
     m.matchfn = lambda f: bfutil.is_standin(f) and bfile(bfutil.split_standin(f)) and orig_matchfn(bfutil.split_standin(f)) or None
     return m
Exemple #7
0
    def _verifyfile(self, cctx, cset, contents, standin, verified):
        baseurl, authinfo = url_.getauthinfo(self.url)
        filename = bfutil.split_standin(standin)
        if not filename:
            return False
        fctx = cctx[standin]
        key = (filename, fctx.filenode())
        if key in verified:
            return False

        expect_hash = fctx.data()[0:40]
        store_path = bfutil.urljoin(baseurl, expect_hash)
        verified.add(key)

        request = urllib2.Request(store_path)
        request.add_header('SHA1-Request', expect_hash)
        try:
            url = self.opener.open(request)
            if 'Content-SHA1' in url.info():
                rhash = url.info()['Content-SHA1']
                if rhash == expect_hash:
                    return False
                else:
                    self.ui.warn(
                        _('changeset %s: %s: contents differ\n (%s)\n') %
                        (cset, filename, store_path))
                    return True  # failed
            else:
                self.ui.warn(
                    _('remote did not send a hash, '
                      'it probably does not understand this protocol\n'))
                return False
        except urllib2.HTTPError, e:
            if e.code == 404:
                self.ui.warn(
                    _('changeset %s: %s missing\n (%s)\n') %
                    (cset, filename, store_path))
                return True  # failed
            else:
                raise util.Abort(
                    _('check failed, unexpected response'
                      'status: %d: %s') % (e.code, e.msg))
Exemple #8
0
    def _verifyfile(self, cctx, cset, contents, standin, verified):
        baseurl, authinfo = url_.getauthinfo(self.url)
        filename = bfutil.split_standin(standin)
        if not filename:
            return False
        fctx = cctx[standin]
        key = (filename, fctx.filenode())
        if key in verified:
            return False

        expect_hash = fctx.data()[0:40]
        store_path = bfutil.urljoin(baseurl, expect_hash)
        verified.add(key)

        request = urllib2.Request(store_path)
        request.add_header('SHA1-Request',expect_hash)
        try:
            url = self.opener.open(request)
            if 'Content-SHA1' in url.info():
                rhash = url.info()['Content-SHA1']
                if rhash == expect_hash:
                    return False
                else:
                    self.ui.warn(
                        _('changeset %s: %s: contents differ\n (%s)\n')
                        % (cset, filename, store_path))
                    return True             # failed
            else:
                self.ui.warn(_('remote did not send a hash, '
                    'it probably does not understand this protocol\n'))
                return False
        except urllib2.HTTPError, e:
            if e.code == 404:
                self.ui.warn(
                    _('changeset %s: %s missing\n (%s)\n')
                    % (cset, filename, store_path))
                return True                 # failed
            else:
                raise util.Abort(_('check failed, unexpected response'
                                   'status: %d: %s') % (e.code, e.msg))
Exemple #9
0
    def getfilectx(repo, memctx, f):
        if bfutil.standin(f) in files:
            # if the file isn't in the manifest then it was removed
            # or renamed, raise IOError to indicate this
            try:
                fctx = ctx.filectx(bfutil.standin(f))
            except error.LookupError:
                raise IOError()
            renamed = fctx.renamed()
            if renamed:
                renamed = bfutil.split_standin(renamed[0])

            hash = fctx.data().strip()
            path = bfutil.find_file(rsrc, hash)
            ### TODO: What if the file is not cached?
            data = ''
            with open(path, 'rb') as fd:
                data = fd.read()
            return context.memfilectx(f, data, 'l' in fctx.flags(), 'x'
                                      in fctx.flags(), renamed)
        else:
            try:
                fctx = ctx.filectx(f)
            except error.LookupError:
                raise IOError()
            renamed = fctx.renamed()
            if renamed:
                renamed = renamed[0]
            data = fctx.data()
            if f == '.hgtags':
                newdata = []
                for line in data.splitlines():
                    id, name = line.split(' ', 1)
                    newdata.append('%s %s\n' %
                                   (node.hex(revmap[node.bin(id)]), name))
                data = ''.join(newdata)
            return context.memfilectx(f, data, 'l' in fctx.flags(), 'x'
                                      in fctx.flags(), renamed)
Exemple #10
0
    def getfilectx(repo, memctx, f):
        if bfutil.standin(f) in files:
            # if the file isn't in the manifest then it was removed
            # or renamed, raise IOError to indicate this
            try:
                fctx = ctx.filectx(bfutil.standin(f))
            except error.LookupError:
                raise IOError()
            renamed = fctx.renamed()
            if renamed:
                renamed = bfutil.split_standin(renamed[0])

            hash = fctx.data().strip()
            path = bfutil.find_file(rsrc, hash)
            ### TODO: What if the file is not cached?
            data = ''
            with open(path, 'rb') as fd:
                data = fd.read()
            return context.memfilectx(f, data, 'l' in fctx.flags(),
                                      'x' in fctx.flags(), renamed)
        else:
            try:
                fctx = ctx.filectx(f)
            except error.LookupError:
                raise IOError()
            renamed = fctx.renamed()
            if renamed:
                renamed = renamed[0]
            data = fctx.data()
            if f == '.hgtags':
                newdata = []
                for line in data.splitlines():
                    id, name = line.split(' ', 1)
                    newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]), name))
                data = ''.join(newdata)
            return context.memfilectx(f, data, 'l' in fctx.flags(),
                                      'x' in fctx.flags(), renamed)
Exemple #11
0
    def getfilectx(repo, memctx, f):
        if bfutil.is_standin(f):
            # if the file isn't in the manifest then it was removed
            # or renamed, raise IOError to indicate this
            srcfname = bfutil.split_standin(f)
            try:
                fctx = ctx.filectx(srcfname)
            except error.LookupError:
                raise IOError()
            renamed = fctx.renamed()
            if renamed:
                # standin is always a bfile because bfileness
                # doesn't change after rename or copy
                renamed = bfutil.standin(renamed[0])

            return context.memfilectx(f, bfiletohash[srcfname], 'l'
                                      in fctx.flags(), 'x' in fctx.flags(),
                                      renamed)
        else:
            try:
                fctx = ctx.filectx(f)
            except error.LookupError:
                raise IOError()
            renamed = fctx.renamed()
            if renamed:
                renamed = renamed[0]

            data = fctx.data()
            if f == '.hgtags':
                newdata = []
                for line in data.splitlines():
                    id, name = line.split(' ', 1)
                    newdata.append('%s %s\n' %
                                   (node.hex(revmap[node.bin(id)]), name))
                data = ''.join(newdata)
            return context.memfilectx(f, data, 'l' in fctx.flags(), 'x'
                                      in fctx.flags(), renamed)
Exemple #12
0
def _addchangeset(ui, rsrc, rdst, ctx, revmap):
 # Convert src parents to dst parents
    parents = []
    for p in ctx.parents():
        parents.append(revmap[p.node()])
    while len(parents) < 2:
        parents.append(node.nullid)

    # Generate list of changed files
    files = set(ctx.files())
    if node.nullid not in parents:
        mc = ctx.manifest()
        mp1 = ctx.parents()[0].manifest()
        mp2 = ctx.parents()[1].manifest()
        for f in mp1:
            if f not in mc:
                files.add(f)
        for f in mp2:
            if f not in mc:
                files.add(f)
        for f in mc:
            if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
                files.add(f)

    def getfilectx(repo, memctx, f):
        if bfutil.standin(f) in files:
            # if the file isn't in the manifest then it was removed
            # or renamed, raise IOError to indicate this
            try:
                fctx = ctx.filectx(bfutil.standin(f))
            except error.LookupError:
                raise IOError()
            renamed = fctx.renamed()
            if renamed:
                renamed = bfutil.split_standin(renamed[0])

            hash = fctx.data().strip()
            path = bfutil.find_file(rsrc, hash)
            ### TODO: What if the file is not cached?
            data = ''
            with open(path, 'rb') as fd:
                data = fd.read()
            return context.memfilectx(f, data, 'l' in fctx.flags(),
                                      'x' in fctx.flags(), renamed)
        else:
            try:
                fctx = ctx.filectx(f)
            except error.LookupError:
                raise IOError()
            renamed = fctx.renamed()
            if renamed:
                renamed = renamed[0]
            data = fctx.data()
            if f == '.hgtags':
                newdata = []
                for line in data.splitlines():
                    id, name = line.split(' ', 1)
                    newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]), name))
                data = ''.join(newdata)
            return context.memfilectx(f, data, 'l' in fctx.flags(),
                                      'x' in fctx.flags(), renamed)

    dstfiles = []
    for file in files:
        if bfutil.is_standin(file):
            dstfiles.append(bfutil.split_standin(file))
        else:
            dstfiles.append(file)
    # Commit
    mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
                          getfilectx, ctx.user(), ctx.date(), ctx.extra())
    ret = rdst.commitctx(mctx)
    rdst.dirstate.setparents(ret)
    revmap[ctx.node()] = rdst.changelog.tip()
Exemple #13
0
def _addchangeset(ui, rsrc, rdst, ctx, revmap):
    # Convert src parents to dst parents
    parents = []
    for p in ctx.parents():
        parents.append(revmap[p.node()])
    while len(parents) < 2:
        parents.append(node.nullid)

    # Generate list of changed files
    files = set(ctx.files())
    if node.nullid not in parents:
        mc = ctx.manifest()
        mp1 = ctx.parents()[0].manifest()
        mp2 = ctx.parents()[1].manifest()
        for f in mp1:
            if f not in mc:
                files.add(f)
        for f in mp2:
            if f not in mc:
                files.add(f)
        for f in mc:
            if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
                files.add(f)

    def getfilectx(repo, memctx, f):
        if bfutil.standin(f) in files:
            # if the file isn't in the manifest then it was removed
            # or renamed, raise IOError to indicate this
            try:
                fctx = ctx.filectx(bfutil.standin(f))
            except error.LookupError:
                raise IOError()
            renamed = fctx.renamed()
            if renamed:
                renamed = bfutil.split_standin(renamed[0])

            hash = fctx.data().strip()
            path = bfutil.find_file(rsrc, hash)
            ### TODO: What if the file is not cached?
            data = ''
            with open(path, 'rb') as fd:
                data = fd.read()
            return context.memfilectx(f, data, 'l' in fctx.flags(), 'x'
                                      in fctx.flags(), renamed)
        else:
            try:
                fctx = ctx.filectx(f)
            except error.LookupError:
                raise IOError()
            renamed = fctx.renamed()
            if renamed:
                renamed = renamed[0]
            data = fctx.data()
            if f == '.hgtags':
                newdata = []
                for line in data.splitlines():
                    id, name = line.split(' ', 1)
                    newdata.append('%s %s\n' %
                                   (node.hex(revmap[node.bin(id)]), name))
                data = ''.join(newdata)
            return context.memfilectx(f, data, 'l' in fctx.flags(), 'x'
                                      in fctx.flags(), renamed)

    dstfiles = []
    for file in files:
        if bfutil.is_standin(file):
            dstfiles.append(bfutil.split_standin(file))
        else:
            dstfiles.append(file)
    # Commit
    mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
                          getfilectx, ctx.user(), ctx.date(), ctx.extra())
    ret = rdst.commitctx(mctx)
    rdst.dirstate.setparents(ret)
    revmap[ctx.node()] = rdst.changelog.tip()
Exemple #14
0
 def toname(f):
     if bfutil.is_standin(f):
         return bfutil.split_standin(f)
     return f
Exemple #15
0
        def commit(self, text="", user=None, date=None, match=None, force=False,
                   editor=False, extra={}):
            orig = super(bfiles_repo, self).commit

            wlock = repo.wlock()
            try:
                # Case 1: user calls commit with no specific files or
                # include/exclude patterns: refresh and commit everything.
                if (match is None) or (not match.anypats() and not match.files()):
                    bfiles = bfutil.list_bfiles(self)
                    bfdirstate = bfutil.open_bfdirstate(ui, self)
                    # this only loops through bfiles that exist (not removed/renamed)
                    for bfile in bfiles:
                        if os.path.exists(self.wjoin(bfutil.standin(bfile))):
                            bfutil.update_standin(self, bfutil.standin(bfile))
                            bfdirstate.normal(bfutil.unixpath(bfile))
                    for bfile in bfdirstate:
                        if not os.path.exists(repo.wjoin(bfutil.standin(bfile))):
                            bfdirstate.forget(bfutil.unixpath(bfile))
                    bfdirstate.write()

                    return orig(text=text, user=user, date=date, match=match,
                                    force=force, editor=editor, extra=extra)

                for file in match.files():
                    if bfutil.is_standin(file):
                        raise util.Abort("Don't commit bfile standin. Commit bfile.")

                # Case 2: user calls commit with specified patterns: refresh any
                # matching big files.
                smatcher = bfutil.compose_standin_matcher(self, match)
                standins = bfutil.dirstate_walk(self.dirstate, smatcher)

                # No matching big files: get out of the way and pass control to
                # the usual commit() method.
                if not standins:
                    return orig(text=text, user=user, date=date, match=match,
                                    force=force, editor=editor, extra=extra)

                # Refresh all matching big files.  It's possible that the commit
                # will end up failing, in which case the big files will stay
                # refreshed.  No harm done: the user modified them and asked to
                # commit them, so sooner or later we're going to refresh the
                # standins.  Might as well leave them refreshed.
                bfdirstate = bfutil.open_bfdirstate(ui, self)
                for standin in standins:
                    bfile = bfutil.split_standin(standin)
                    if bfdirstate[bfile] is not 'r':
                        bfutil.update_standin(self, standin)
                        bfdirstate.normal(bfutil.unixpath(bfile))
                    else:
                        bfdirstate.forget(bfutil.unixpath(bfile))
                bfdirstate.write()

                # Cook up a new matcher that only matches regular files or
                # standins corresponding to the big files requested by the user.
                # Have to modify _files to prevent commit() from complaining
                # "not tracked" for big files.
                bfiles = bfutil.list_bfiles(repo)
                match = copy.copy(match)
                orig_matchfn = match.matchfn

                # Check both the list of bfiles and the list of standins because if a bfile was removed, it
                # won't be in the list of bfiles at this point
                match._files += sorted(standins)

                actualfiles = []
                for f in match._files:
                    fstandin = bfutil.standin(f)

                    # Ignore known bfiles and standins
                    if f in bfiles or fstandin in standins:
                        continue

                    # Append directory separator to avoid collisions
                    if not fstandin.endswith(os.sep):
                        fstandin += os.sep

                    # Prevalidate matching standin directories
                    if any(st for st in match._files if st.startswith(fstandin)):
                        continue
                    actualfiles.append(f)
                match._files = actualfiles

                def matchfn(f):
                    if orig_matchfn(f):
                        return f not in bfiles
                    else:
                        return f in standins

                match.matchfn = matchfn
                return orig(text=text, user=user, date=date, match=match,
                                force=force, editor=editor, extra=extra)
            finally:
                wlock.release()
Exemple #16
0
def override_add(orig, ui, repo, *pats, **opts):
    bf = opts.pop('bf', None)

    bfsize = opts.pop('bfsize', None)
    if bfsize:
        try:
            bfsize = int(bfsize)
        except ValueError:
            raise util.Abort(_('size must be an integer, was %s\n') % bfsize)
    else:
        if os.path.exists(repo.wjoin(bfutil.short_name)):
            bfsize = ui.config(bfutil.long_name, 'size', default='10')
            if bfsize:
                try:
                    bfsize = int(bfsize)
                except ValueError:
                    raise util.Abort(_('bfiles.size must be integer, was %s\n') % bfsize)

    bfmatcher = None
    if os.path.exists(repo.wjoin(bfutil.short_name)):
        bfpats = ui.config(bfutil.long_name, 'patterns', default=())
        if bfpats:
            bfpats = bfpats.split(' ')
            bfmatcher = match_.match(repo.root, '', list(bfpats))

    bfnames = []
    m = cmdutil.match(repo, pats, opts)
    m.bad = lambda x,y: None
    wctx = repo[None]
    for f in repo.walk(m):
        exact = m.exact(f)
        bfile = bfutil.standin(f) in wctx
        nfile = f in wctx

        if exact and bfile:
            ui.warn(_('%s already a bfile\n') % f)
            continue
        # Don't warn the user when they attempt to add a normal tracked file. The normal add code
        # will do that for us.
        if exact and nfile:
            continue
        if exact or (not bfile and not nfile):
            if bf or (bfsize and os.path.getsize(repo.wjoin(f)) >= bfsize*1024*1024) \
                                            or (bfmatcher and bfmatcher(f)):
                bfnames.append(f)
                if ui.verbose or not exact:
                    ui.status(_('adding %s as bfile\n') % m.rel(f))

    bad = []
    standins = []

    # Need to lock otherwise there could be a race condition inbetween when standins are created
    # and added to the repo
    wlock = repo.wlock()
    try:
        if not opts.get('dry_run'):
            bfdirstate = bfutil.open_bfdirstate(ui, repo)
            for f in bfnames:
                standinname = bfutil.standin(f)
                bfutil.write_standin(repo, standinname, hash='', executable=bfutil.get_executable(repo.wjoin(f)))
                standins.append(standinname)
                if bfdirstate[bfutil.unixpath(f)] == 'r':
                    bfdirstate.normallookup(bfutil.unixpath(f))
                else:
                    bfdirstate.add(bfutil.unixpath(f))
            bfdirstate.write()
            bad += [bfutil.split_standin(f) for f in bfutil.repo_add(repo, standins) if f in m.files()]
    finally:
        wlock.release()

    oldmatch = cmdutil.match
    manifest = repo[None].manifest()
    def override_match(repo, pats=[], opts={}, globbed=False, default='relpath'):
        match = oldmatch(repo, pats, opts, globbed, default)
        m = copy.copy(match)
        notbfile = lambda f: not bfutil.is_standin(f) and bfutil.standin(f) not in manifest
        m._files = [f for f in m._files if notbfile(f)]
        m._fmap = set(m._files)
        orig_matchfn = m.matchfn
        m.matchfn = lambda f: notbfile(f) and orig_matchfn(f) or None
        return m
    cmdutil.match = override_match
    result = orig(ui, repo, *pats, **opts)
    cmdutil.match = oldmatch

    return (result is 1 or bad) and 1 or 0
Exemple #17
0
        def status(self, node1='.', node2=None, match=None, ignored=False, clean=False, unknown=False, subrepos=None):
            listignored, listclean, listunknown = ignored, clean, unknown
            if not self.bfstatus:
                try:
                    return super(bfiles_repo, self).status(node1, node2, match, listignored, listclean, listunknown, subrepos)
                except TypeError:
                    return super(bfiles_repo, self).status(node1, node2, match, listignored, listclean, listunknown)
            else:
                # some calls in this function rely on the old version of status
                self.bfstatus = False
                if isinstance(node1, context.changectx):
                    ctx1 = node1
                else:
                    ctx1 = repo[node1]
                if isinstance(node2, context.changectx):
                    ctx2 = node2
                else:
                    ctx2 = repo[node2]
                working = ctx2.rev() is None
                parentworking = working and ctx1 == self['.']

                def inctx(file, ctx):
                    try:
                        if ctx.rev() is None:
                            return file in ctx.manifest()
                        ctx[file]
                        return True
                    except:
                        return False

                # create a copy of match that matches standins instead of bfiles
                # if matcher not set then it is the always matcher so overwrite that
                if match is None:
                    match = match_.always(self.root, self.getcwd())

                def tostandin(file):
                    if inctx(bfutil.standin(file), ctx2):
                        return bfutil.standin(file)
                    return file

                m = copy.copy(match)
                m._files = [tostandin(f) for f in m._files]

                # get ignored clean and unknown but remove them later if they were not asked for
                try:
                    result = super(bfiles_repo, self).status(node1, node2, m, True, True, True, subrepos)
                except TypeError:
                    result = super(bfiles_repo, self).status(node1, node2, m, True, True, True)
                if working:
                    # Hold the wlock while we read bfiles and update the bfdirstate
                    wlock = repo.wlock()
                    try:
                        # Any non bfiles that were explicitly listed must be taken out or
                        # bfdirstate.status will report an error. The status of these files
                        # was already computed using super's status.
                        bfdirstate = bfutil.open_bfdirstate(ui, self)
                        match._files = [f for f in match._files if f in bfdirstate]
                        s = bfdirstate.status(match, [], listignored, listclean, listunknown)
                        (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
                        if parentworking:
                            for bfile in unsure:
                                if ctx1[bfutil.standin(bfile)].data().strip() != bfutil.hashfile(self.wjoin(bfile)):
                                    modified.append(bfile)
                                else:
                                    clean.append(bfile)
                                    bfdirstate.normal(bfutil.unixpath(bfile))
                            bfdirstate.write()
                        else:
                            tocheck = unsure + modified + added + clean
                            modified, added, clean = [], [], []

                            for bfile in tocheck:
                                standin = bfutil.standin(bfile)
                                if inctx(standin, ctx1):
                                    if ctx1[standin].data().strip() != bfutil.hashfile(self.wjoin(bfile)):
                                        modified.append(bfile)
                                    else:
                                        clean.append(bfile)
                                else:
                                    added.append(bfile)
                    finally:
                        wlock.release()

                    for standin in ctx1.manifest():
                        if not bfutil.is_standin(standin):
                            continue
                        bfile = bfutil.split_standin(standin)
                        if not match(bfile):
                            continue
                        if bfile not in bfdirstate:
                            removed.append(bfile)
                    # Handle unknown and ignored differently
                    bfiles = (modified, added, removed, missing, [], [], clean)
                    result = list(result)
                    # Unknown files
                    result[4] = [f for f in unknown if repo.dirstate[f] == '?' and not bfutil.is_standin(f)]
                    # Ignored files must be ignored by both the dirstate and bfdirstate
                    result[5] = set(ignored).intersection(set(result[5]))
                    # combine normal files and bfiles
                    normals = [[fn for fn in filelist if not bfutil.is_standin(fn)] for filelist in result]
                    result = [sorted(list1 + list2) for (list1, list2) in zip(normals, bfiles)]
                else:
                    def toname(f):
                        if bfutil.is_standin(f):
                            return bfutil.split_standin(f)
                        return f
                    result = [[toname(f) for f in items] for items in result]

                if not listunknown:
                    result[4] = []
                if not listignored:
                    result[5] = []
                if not listclean:
                    result[6] = []
                self.bfstatus = True
                return result
Exemple #18
0
 def matchfn(f):
     if bfutil.is_standin(f):
         return orig_matchfn(bfutil.split_standin(f)) and (f in repo[None] or f in ctx)
     return orig_matchfn(f)
Exemple #19
0
def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
            prefix=None, mtime=None, subrepos=None):
    # No need to lock because we are only reading history and bfile caches
    # neither of which are modified

    if kind not in archival.archivers:
        raise util.Abort(_("unknown archive type '%s'") % kind)

    ctx = repo[node]

    # In Mercurial <= 1.5 the prefix is passed to the archiver so try that
    # if that doesn't work we are probably in Mercurial >= 1.6 where the
    # prefix is not handled by the archiver
    try:
        archiver = archival.archivers[kind](dest, prefix, mtime or ctx.date()[0])

        def write(name, mode, islink, getdata):
            if matchfn and not matchfn(name):
                return
            data = getdata()
            if decode:
                data = repo.wwritedata(name, data)
            archiver.addfile(name, mode, islink, data)
    except TypeError:
        if kind == 'files':
            if prefix:
                raise util.Abort(_('cannot give prefix when archiving to files'))
        else:
            prefix = archival.tidyprefix(dest, kind, prefix)

        def write(name, mode, islink, getdata):
            if matchfn and not matchfn(name):
                return
            data = getdata()
            if decode:
                data = repo.wwritedata(name, data)
            archiver.addfile(prefix + name, mode, islink, data)

        archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])

    if repo.ui.configbool("ui", "archivemeta", True):
        def metadata():
            base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
                hex(repo.changelog.node(0)), hex(node), ctx.branch())

            tags = ''.join('tag: %s\n' % t for t in ctx.tags()
                           if repo.tagtype(t) == 'global')
            if not tags:
                repo.ui.pushbuffer()
                opts = {'template': '{latesttag}\n{latesttagdistance}',
                        'style': '', 'patch': None, 'git': None}
                cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
                ltags, dist = repo.ui.popbuffer().split('\n')
                tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
                tags += 'latesttagdistance: %s\n' % dist

            return base + tags

        write('.hg_archival.txt', 0644, False, metadata)

    for f in ctx:
        ff = ctx.flags(f)
        getdata = ctx[f].data
        if bfutil.is_standin(f):
            path = bfutil.find_file(repo, getdata().strip())
            ### TODO: What if the file is not cached?
            f = bfutil.split_standin(f)

            def getdatafn():
                with open(path, 'rb') as fd:
                    return fd.read()

            getdata = getdatafn
        write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)

    if subrepos:
        for subpath in ctx.substate:
            sub = ctx.sub(subpath)
            try:
                sub.archive(repo.ui, archiver, prefix)
            except TypeError:
                sub.archive(archiver, prefix)

    archiver.done()