Beispiel #1
0
 def ancestor(self, c2, warn=False):
     """
     return the "best" ancestor context of self and c2
     """
     # deal with workingctxs
     n2 = c2._node
     if n2 is None:
         n2 = c2._parents[0]._node
     cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
     if not cahs:
         anc = nullid
     elif len(cahs) == 1:
         anc = cahs[0]
     else:
         for r in self._repo.ui.configlist('merge', 'preferancestor'):
             ctx = changectx(self._repo, r)
             anc = ctx.node()
             if anc in cahs:
                 break
         else:
             anc = self._repo.changelog.ancestor(self._node, n2)
         if warn:
             self._repo.ui.status(
                 (_("note: using %s as ancestor of %s and %s\n") %
                  (short(anc), short(self._node), short(n2))) +
                 ''.join(_("      alternatively, use --config "
                           "merge.preferancestor=%s\n") %
                         short(n) for n in sorted(cahs) if n != anc))
     return changectx(self._repo, anc)
Beispiel #2
0
    def checkentry(obj, i, node, seen, linkrevs, f):
        lr = obj.linkrev(obj.rev(node))
        if lr < 0 or (havecl and lr not in linkrevs):
            if lr < 0 or lr >= len(cl):
                msg = _("rev %d points to nonexistent changeset %d")
            else:
                msg = _("rev %d points to unexpected changeset %d")
            err(None, msg % (i, lr), f)
            if linkrevs:
                if f and len(linkrevs) > 1:
                    try:
                        # attempt to filter down to real linkrevs
                        linkrevs = [l for l in linkrevs
                                    if lrugetctx(l)[f].filenode() == node]
                    except:
                        pass
                warn(_(" (expected %s)") % " ".join(map(str, linkrevs)))
            lr = None # can't be trusted

        try:
            p1, p2 = obj.parents(node)
            if p1 not in seen and p1 != nullid:
                err(lr, _("unknown parent 1 %s of %s") %
                    (short(p1), short(n)), f)
            if p2 not in seen and p2 != nullid:
                err(lr, _("unknown parent 2 %s of %s") %
                    (short(p2), short(p1)), f)
        except Exception, inst:
            exc(lr, _("checking parents of %s") % short(node), inst, f)
Beispiel #3
0
    def filterunknown(self, repo):
        """remove unknown nodes from the phase boundary

        Nothing is lost as unknown nodes only hold data for their descendants.
        """
        filtered = False
        nodemap = repo.changelog.nodemap # to filter unknown nodes
        for phase, nodes in enumerate(self.phaseroots):
            missing = sorted(node for node in nodes if node not in nodemap)
            if missing:
                for mnode in missing:
                    repo.ui.debug(
                        'removing unknown node %s from %i-phase boundary\n'
                        % (short(mnode), phase))
                nodes.symmetric_difference_update(missing)
                filtered = True
        if filtered:
            self.dirty = True
        # filterunknown is called by repo.destroyed, we may have no changes in
        # root but phaserevs contents is certainly invalid (or at least we
        # have not proper way to check that). related to issue 3858.
        #
        # The other caller is __init__ that have no _phaserevs initialized
        # anyway. If this change we should consider adding a dedicated
        # "destroyed" function to phasecache or a proper cache key mechanism
        # (see branchmap one)
        self.invalidate()
Beispiel #4
0
def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
    r = _premerge(repo, toolconf, files, labels=labels)
    if r:
        tool, toolpath, binary, symlink = toolconf
        a, b, c, back = files
        out = ""
        env = {
            "HG_FILE": fcd.path(),
            "HG_MY_NODE": short(mynode),
            "HG_OTHER_NODE": str(fco.changectx()),
            "HG_BASE_NODE": str(fca.changectx()),
            "HG_MY_ISLINK": "l" in fcd.flags(),
            "HG_OTHER_ISLINK": "l" in fco.flags(),
            "HG_BASE_ISLINK": "l" in fca.flags(),
        }

        ui = repo.ui

        args = _toolstr(ui, tool, "args", "$local $base $other")
        if "$output" in args:
            out, a = a, back  # read input from backup, write to original
        replace = {"local": a, "base": b, "other": c, "output": out}
        args = util.interpolate(r"\$", replace, args, lambda s: util.shellquote(util.localpath(s)))
        cmd = toolpath + " " + args
        repo.ui.debug("launching merge tool: %s\n" % cmd)
        r = ui.system(cmd, cwd=repo.root, environ=env)
        repo.ui.debug("merge tool returned: %s\n" % r)
        return True, r
    return False, 0
def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
    r = _premerge(repo, toolconf, files, labels=labels)
    if r:
        tool, toolpath, binary, symlink = toolconf
        a, b, c, back = files
        out = ""
        env = {'HG_FILE': fcd.path(),
               'HG_MY_NODE': short(mynode),
               'HG_OTHER_NODE': str(fco.changectx()),
               'HG_BASE_NODE': str(fca.changectx()),
               'HG_MY_ISLINK': 'l' in fcd.flags(),
               'HG_OTHER_ISLINK': 'l' in fco.flags(),
               'HG_BASE_ISLINK': 'l' in fca.flags(),
               }

        ui = repo.ui

        args = _toolstr(ui, tool, "args", '$local $base $other')
        if "$output" in args:
            out, a = a, back # read input from backup, write to original
        replace = {'local': a, 'base': b, 'other': c, 'output': out}
        args = util.interpolate(r'\$', replace, args,
                                lambda s: util.shellquote(util.localpath(s)))
        r = util.system(toolpath + ' ' + args, cwd=repo.root, environ=env,
                        out=ui.fout)
        return True, r
    return False, 0
Beispiel #6
0
def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files):
    r = _premerge(repo, toolconf, files)
    if r:
        tool, toolpath, binary, symlink = toolconf
        a, b, c, back = files
        out = ""
        env = dict(HG_FILE=fcd.path(),
                   HG_MY_NODE=short(mynode),
                   HG_OTHER_NODE=str(fco.changectx()),
                   HG_BASE_NODE=str(fca.changectx()),
                   HG_MY_ISLINK='l' in fcd.flags(),
                   HG_OTHER_ISLINK='l' in fco.flags(),
                   HG_BASE_ISLINK='l' in fca.flags())

        ui = repo.ui

        args = _toolstr(ui, tool, "args", '$local $base $other')
        if "$output" in args:
            out, a = a, back # read input from backup, write to original
        replace = dict(local=a, base=b, other=c, output=out)
        args = util.interpolate(r'\$', replace, args,
                                lambda s: '"%s"' % util.localpath(s))
        r = util.system(toolpath + ' ' + args, cwd=repo.root, environ=env,
                        out=ui.fout)
        return True, r
    return False, 0
Beispiel #7
0
def _writetagcache(ui, repo, heads, tagfnode, cachetags):

    try:
        cachefile = repo.opener('cache/tags', 'w', atomictemp=True)
    except (OSError, IOError):
        return

    ui.log('tagscache', 'writing tags cache file with %d heads and %d tags\n',
            len(heads), len(cachetags))

    realheads = repo.heads()            # for sanity checks below
    for head in heads:
        # temporary sanity checks; these can probably be removed
        # once this code has been in crew for a few weeks
        assert head in repo.changelog.nodemap, \
               'trying to write non-existent node %s to tag cache' % short(head)
        assert head in realheads, \
               'trying to write non-head %s to tag cache' % short(head)
        assert head != nullid, \
               'trying to write nullid to tag cache'

        # This can't fail because of the first assert above.  When/if we
        # remove that assert, we might want to catch LookupError here
        # and downgrade it to a warning.
        rev = repo.changelog.rev(head)

        fnode = tagfnode.get(head)
        if fnode:
            cachefile.write('%d %s %s\n' % (rev, hex(head), hex(fnode)))
        else:
            cachefile.write('%d %s\n' % (rev, hex(head)))

    # Tag names in the cache are in UTF-8 -- which is the whole reason
    # we keep them in UTF-8 throughout this module.  If we converted
    # them local encoding on input, we would lose info writing them to
    # the cache.
    cachefile.write('\n')
    for (name, (node, hist)) in cachetags.iteritems():
        for n in hist:
            cachefile.write("%s %s\n" % (hex(n), name))
        cachefile.write("%s %s\n" % (hex(node), name))

    try:
        cachefile.close()
    except (OSError, IOError):
        pass
Beispiel #8
0
def _bundle(repo, bases, heads, node, suffix, extranodes=None):
    """create a bundle with the specified revisions as a backup"""
    cg = repo.changegroupsubset(bases, heads, 'strip', extranodes)
    backupdir = repo.join("strip-backup")
    if not os.path.isdir(backupdir):
        os.mkdir(backupdir)
    name = os.path.join(backupdir, "%s-%s" % (short(node), suffix))
    repo.ui.warn(_("saving bundle to %s\n") % name)
    return changegroup.writebundle(cg, name, "HG10BZ")
Beispiel #9
0
 def __init__(self, name, index, message):
     self.name = name
     self.index = index
     # this can't be called 'message' because at least some installs of
     # Python 2.6+ complain about the 'message' property being deprecated
     self.lookupmessage = message
     if isinstance(name, str) and len(name) == 20:
         from node import short
         name = short(name)
     RevlogError.__init__(self, '%s@%s: %s' % (index, name, message))
Beispiel #10
0
def _bundle(repo, bases, heads, node, suffix, extranodes=None, compress=True):
    """create a bundle with the specified revisions as a backup"""
    cg = repo.changegroupsubset(bases, heads, 'strip', extranodes)
    backupdir = repo.join("strip-backup")
    if not os.path.isdir(backupdir):
        os.mkdir(backupdir)
    name = os.path.join(backupdir, "%s-%s.hg" % (short(node), suffix))
    if compress:
        bundletype = "HG10BZ"
    else:
        bundletype = "HG10UN"
    return changegroup.writebundle(cg, name, bundletype)
Beispiel #11
0
    def checkentry(obj, i, node, seen, linkrevs, f):
        lr = obj.linkrev(obj.rev(node))
        if lr < 0 or (havecl and lr not in linkrevs):
            t = "unexpected"
            if lr < 0 or lr >= len(cl):
                t = "nonexistent"
            err(None, _("rev %d point to %s changeset %d") % (i, t, lr), f)
            if linkrevs:
                warn(_(" (expected %s)") % " ".join(map(str,linkrevs)))
            lr = None # can't be trusted

        try:
            p1, p2 = obj.parents(node)
            if p1 not in seen and p1 != nullid:
                err(lr, _("unknown parent 1 %s of %s") %
                    (short(p1), short(n)), f)
            if p2 not in seen and p2 != nullid:
                err(lr, _("unknown parent 2 %s of %s") %
                    (short(p2), short(p1)), f)
        except Exception, inst:
            exc(lr, _("checking parents of %s") % short(node), inst, f)
Beispiel #12
0
def _writetagcache(ui, repo, heads, tagfnode, cachetags):

    try:
        cachefile = repo.opener('tags.cache', 'w', atomictemp=True)
    except (OSError, IOError):
        return
    _debug(ui, 'writing cache file %s\n' % cachefile.name)

    realheads = repo.heads()  # for sanity checks below
    for head in heads:
        # temporary sanity checks; these can probably be removed
        # once this code has been in crew for a few weeks
        assert head in repo.changelog.nodemap, \
               'trying to write non-existent node %s to tag cache' % short(head)
        assert head in realheads, \
               'trying to write non-head %s to tag cache' % short(head)
        assert head != nullid, \
               'trying to write nullid to tag cache'

        # This can't fail because of the first assert above.  When/if we
        # remove that assert, we might want to catch LookupError here
        # and downgrade it to a warning.
        rev = repo.changelog.rev(head)

        fnode = tagfnode.get(head)
        if fnode:
            cachefile.write('%d %s %s\n' % (rev, hex(head), hex(fnode)))
        else:
            cachefile.write('%d %s\n' % (rev, hex(head)))

    # Tag names in the cache are in UTF-8 -- which is the whole reason
    # we keep them in UTF-8 throughout this module.  If we converted
    # them local encoding on input, we would lose info writing them to
    # the cache.
    cachefile.write('\n')
    for (name, (node, hist)) in cachetags.iteritems():
        cachefile.write("%s %s\n" % (hex(node), name))

    cachefile.rename()
    cachefile.close()
Beispiel #13
0
    def checkentry(obj, i, node, seen, linkrevs, f):
        lr = obj.linkrev(obj.rev(node))
        if lr < 0 or (havecl and lr not in linkrevs):
            if lr < 0 or lr >= len(cl):
                msg = _("rev %d points to nonexistent changeset %d")
            else:
                msg = _("rev %d points to unexpected changeset %d")
            err(None, msg % (i, lr), f)
            if linkrevs:
                warn(_(" (expected %s)") % " ".join(map(str, linkrevs)))
            lr = None # can't be trusted

        try:
            p1, p2 = obj.parents(node)
            if p1 not in seen and p1 != nullid:
                err(lr, _("unknown parent 1 %s of %s") %
                    (short(p1), short(n)), f)
            if p2 not in seen and p2 != nullid:
                err(lr, _("unknown parent 2 %s of %s") %
                    (short(p2), short(p1)), f)
        except Exception, inst:
            exc(lr, _("checking parents of %s") % short(node), inst, f)
def makefilename(repo,
                 pat,
                 node,
                 desc=None,
                 total=None,
                 seqno=None,
                 revwidth=None,
                 pathname=None):
    node_expander = {
        'H': lambda: hex(node),
        'R': lambda: str(repo.changelog.rev(node)),
        'h': lambda: short(node),
        'm': lambda: re.sub('[^\w]', '_', str(desc))
    }
    expander = {
        '%': lambda: '%',
        'b': lambda: os.path.basename(repo.root),
    }

    try:
        if node:
            expander.update(node_expander)
        if node:
            expander['r'] = (
                lambda: str(repo.changelog.rev(node)).zfill(revwidth or 0))
        if total is not None:
            expander['N'] = lambda: str(total)
        if seqno is not None:
            expander['n'] = lambda: str(seqno)
        if total is not None and seqno is not None:
            expander['n'] = lambda: str(seqno).zfill(len(str(total)))
        if pathname is not None:
            expander['s'] = lambda: os.path.basename(pathname)
            expander['d'] = lambda: os.path.dirname(pathname) or '.'
            expander['p'] = lambda: pathname

        newname = []
        patlen = len(pat)
        i = 0
        while i < patlen:
            c = pat[i]
            if c == '%':
                i += 1
                c = pat[i]
                c = expander[c]()
            newname.append(c)
            i += 1
        return ''.join(newname)
    except KeyError, inst:
        raise util.Abort(
            _("invalid format spec '%%%s' in output filename") % inst.args[0])
Beispiel #15
0
    def checkentry(obj, i, node, seen, linkrevs, f):
        lr = obj.linkrev(obj.rev(node))
        if lr < 0 or (havecl and lr not in linkrevs):
            if lr < 0 or lr >= len(cl):
                msg = _("rev %d points to nonexistent changeset %d")
            else:
                msg = _("rev %d points to unexpected changeset %d")
            err(None, msg % (i, lr), f)
            if linkrevs:
                if f and len(linkrevs) > 1:
                    try:
                        # attempt to filter down to real linkrevs
                        linkrevs = [
                            l for l in linkrevs
                            if lrugetctx(l)[f].filenode() == node
                        ]
                    except Exception:
                        pass
                warn(_(" (expected %s)") % " ".join(map(str, linkrevs)))
            lr = None  # can't be trusted

        try:
            p1, p2 = obj.parents(node)
            if p1 not in seen and p1 != nullid:
                err(lr,
                    _("unknown parent 1 %s of %s") % (short(p1), short(node)),
                    f)
            if p2 not in seen and p2 != nullid:
                err(lr,
                    _("unknown parent 2 %s of %s") % (short(p2), short(node)),
                    f)
        except Exception as inst:
            exc(lr, _("checking parents of %s") % short(node), inst, f)

        if node in seen:
            err(lr, _("duplicate revision %d (%d)") % (i, seen[node]), f)
        seen[node] = i
        return lr
Beispiel #16
0
def filterunknown(repo, phaseroots=None):
    """remove unknown nodes from the phase boundary

    no data is lost as unknown node only old data for their descentants
    """
    if phaseroots is None:
        phaseroots = repo._phaseroots
    nodemap = repo.changelog.nodemap # to filter unknown nodes
    for phase, nodes in enumerate(phaseroots):
        missing = [node for node in nodes if node not in nodemap]
        if missing:
            for mnode in missing:
                msg = 'Removing unknown node %(n)s from %(p)i-phase boundary'
                repo.ui.debug(msg, {'n': short(mnode), 'p': phase})
            nodes.symmetric_difference_update(missing)
            repo._dirtyphases = True
Beispiel #17
0
def findglobaltags(ui, repo, alltags, tagtypes):
    '''Find global tags in a repo.

    "alltags" maps tag name to (node, hist) 2-tuples.

    "tagtypes" maps tag name to tag type. Global tags always have the
    "global" tag type.

    The "alltags" and "tagtypes" dicts are updated in place. Empty dicts
    should be passed in.

    The tags cache is read and updated as a side-effect of calling.
    '''
    # This is so we can be lazy and assume alltags contains only global
    # tags when we pass it to _writetagcache().
    assert len(alltags) == len(tagtypes) == 0, \
           "findglobaltags() should be called first"

    (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
    if cachetags is not None:
        assert not shouldwrite
        # XXX is this really 100% correct?  are there oddball special
        # cases where a global tag should outrank a local tag but won't,
        # because cachetags does not contain rank info?
        _updatetags(cachetags, 'global', alltags, tagtypes)
        return

    seen = set()  # set of fnode
    fctx = None
    for head in reversed(heads):  # oldest to newest
        assert head in repo.changelog.nodemap, \
               "tag cache returned bogus head %s" % short(head)

        fnode = tagfnode.get(head)
        if fnode and fnode not in seen:
            seen.add(fnode)
            if not fctx:
                fctx = repo.filectx('.hgtags', fileid=fnode)
            else:
                fctx = fctx.filectx(fnode)

            filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
            _updatetags(filetags, 'global', alltags, tagtypes)

    # and update the cache (if necessary)
    if shouldwrite:
        _writetagcache(ui, repo, valid, alltags)
Beispiel #18
0
def findglobaltags(ui, repo, alltags, tagtypes):
    '''Find global tags in a repo.

    "alltags" maps tag name to (node, hist) 2-tuples.

    "tagtypes" maps tag name to tag type. Global tags always have the
    "global" tag type.

    The "alltags" and "tagtypes" dicts are updated in place. Empty dicts
    should be passed in.

    The tags cache is read and updated as a side-effect of calling.
    '''
    # This is so we can be lazy and assume alltags contains only global
    # tags when we pass it to _writetagcache().
    assert len(alltags) == len(tagtypes) == 0, \
           "findglobaltags() should be called first"

    (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
    if cachetags is not None:
        assert not shouldwrite
        # XXX is this really 100% correct?  are there oddball special
        # cases where a global tag should outrank a local tag but won't,
        # because cachetags does not contain rank info?
        _updatetags(cachetags, 'global', alltags, tagtypes)
        return

    seen = set()  # set of fnode
    fctx = None
    for head in reversed(heads):  # oldest to newest
        assert head in repo.changelog.nodemap, \
               "tag cache returned bogus head %s" % short(head)

        fnode = tagfnode.get(head)
        if fnode and fnode not in seen:
            seen.add(fnode)
            if not fctx:
                fctx = repo.filectx('.hgtags', fileid=fnode)
            else:
                fctx = fctx.filectx(fnode)

            filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
            _updatetags(filetags, 'global', alltags, tagtypes)

    # and update the cache (if necessary)
    if shouldwrite:
        _writetagcache(ui, repo, valid, alltags)
Beispiel #19
0
def filterunknown(repo, phaseroots=None):
    """remove unknown nodes from the phase boundary

    no data is lost as unknown node only old data for their descentants
    """
    if phaseroots is None:
        phaseroots = repo._phaseroots
    nodemap = repo.changelog.nodemap  # to filter unknown nodes
    for phase, nodes in enumerate(phaseroots):
        missing = [node for node in nodes if node not in nodemap]
        if missing:
            for mnode in missing:
                repo.ui.debug(
                    'removing unknown node %s from %i-phase boundary\n' %
                    (short(mnode), phase))
            nodes.symmetric_difference_update(missing)
            repo._dirtyphases = True
Beispiel #20
0
def makefilename(repo, pat, node, desc=None,
                  total=None, seqno=None, revwidth=None, pathname=None):
    node_expander = {
        'H': lambda: hex(node),
        'R': lambda: str(repo.changelog.rev(node)),
        'h': lambda: short(node),
        'm': lambda: re.sub('[^\w]', '_', str(desc))
        }
    expander = {
        '%': lambda: '%',
        'b': lambda: os.path.basename(repo.root),
        }

    try:
        if node:
            expander.update(node_expander)
        if node:
            expander['r'] = (lambda:
                    str(repo.changelog.rev(node)).zfill(revwidth or 0))
        if total is not None:
            expander['N'] = lambda: str(total)
        if seqno is not None:
            expander['n'] = lambda: str(seqno)
        if total is not None and seqno is not None:
            expander['n'] = lambda: str(seqno).zfill(len(str(total)))
        if pathname is not None:
            expander['s'] = lambda: os.path.basename(pathname)
            expander['d'] = lambda: os.path.dirname(pathname) or '.'
            expander['p'] = lambda: pathname

        newname = []
        patlen = len(pat)
        i = 0
        while i < patlen:
            c = pat[i]
            if c == '%':
                i += 1
                c = pat[i]
                c = expander[c]()
            newname.append(c)
            i += 1
        return ''.join(newname)
    except KeyError, inst:
        raise util.Abort(_("invalid format spec '%%%s' in output filename") %
                         inst.args[0])
def findglobaltags(ui, repo, alltags, tagtypes):
    '''Find global tags in repo by reading .hgtags from every head that
    has a distinct version of it, using a cache to avoid excess work.
    Updates the dicts alltags, tagtypes in place: alltags maps tag name
    to (node, hist) pair (see _readtags() below), and tagtypes maps tag
    name to tag type ("global" in this case).'''
    # This is so we can be lazy and assume alltags contains only global
    # tags when we pass it to _writetagcache().
    assert len(alltags) == len(tagtypes) == 0, \
           "findglobaltags() should be called first"

    (heads, tagfnode, cachetags, shouldwrite) = _readtagcache(ui, repo)
    if cachetags is not None:
        assert not shouldwrite
        # XXX is this really 100% correct?  are there oddball special
        # cases where a global tag should outrank a local tag but won't,
        # because cachetags does not contain rank info?
        _updatetags(cachetags, 'global', alltags, tagtypes)
        return

    seen = set()  # set of fnode
    fctx = None
    for head in reversed(heads):  # oldest to newest
        assert head in repo.changelog.nodemap, \
               "tag cache returned bogus head %s" % short(head)

        fnode = tagfnode.get(head)
        if fnode and fnode not in seen:
            seen.add(fnode)
            if not fctx:
                fctx = repo.filectx('.hgtags', fileid=fnode)
            else:
                fctx = fctx.filectx(fnode)

            filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
            _updatetags(filetags, 'global', alltags, tagtypes)

    # and update the cache (if necessary)
    if shouldwrite:
        _writetagcache(ui, repo, heads, tagfnode, alltags)
Beispiel #22
0
def findglobaltags(ui, repo, alltags, tagtypes):
    '''Find global tags in repo by reading .hgtags from every head that
    has a distinct version of it, using a cache to avoid excess work.
    Updates the dicts alltags, tagtypes in place: alltags maps tag name
    to (node, hist) pair (see _readtags() below), and tagtypes maps tag
    name to tag type ("global" in this case).'''
    # This is so we can be lazy and assume alltags contains only global
    # tags when we pass it to _writetagcache().
    assert len(alltags) == len(tagtypes) == 0, \
           "findglobaltags() should be called first"

    (heads, tagfnode, cachetags, shouldwrite) = _readtagcache(ui, repo)
    if cachetags is not None:
        assert not shouldwrite
        # XXX is this really 100% correct?  are there oddball special
        # cases where a global tag should outrank a local tag but won't,
        # because cachetags does not contain rank info?
        _updatetags(cachetags, 'global', alltags, tagtypes)
        return

    seen = set()                    # set of fnode
    fctx = None
    for head in reversed(heads):        # oldest to newest
        assert head in repo.changelog.nodemap, \
               "tag cache returned bogus head %s" % short(head)

        fnode = tagfnode.get(head)
        if fnode and fnode not in seen:
            seen.add(fnode)
            if not fctx:
                fctx = repo.filectx('.hgtags', fileid=fnode)
            else:
                fctx = fctx.filectx(fnode)

            filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
            _updatetags(filetags, 'global', alltags, tagtypes)

    # and update the cache (if necessary)
    if shouldwrite:
        _writetagcache(ui, repo, heads, tagfnode, alltags)
Beispiel #23
0
    def _show(self, ctx, copies, props):
        '''show a single changeset or file revision'''
        changenode = ctx.node()
        rev = ctx.rev()

        if self.ui.quiet:
            self.ui.write("%d:%s\n" % (rev, short(changenode)))
            return

        log = self.repo.changelog
        date = util.datestr(ctx.date())

        hexfunc = self.ui.debugflag and hex or short

        parents = [(p, hexfunc(log.node(p)))
                   for p in self._meaningful_parentrevs(log, rev)]

        self.ui.write(_("changeset:   %d:%s\n") % (rev, hexfunc(changenode)))

        branch = ctx.branch()
        # don't show the default branch name
        if branch != 'default':
            branch = encoding.tolocal(branch)
            self.ui.write(_("branch:      %s\n") % branch)
        for tag in self.repo.nodetags(changenode):
            self.ui.write(_("tag:         %s\n") % tag)
        for parent in parents:
            self.ui.write(_("parent:      %d:%s\n") % parent)

        if self.ui.debugflag:
            mnode = ctx.manifestnode()
            self.ui.write(_("manifest:    %d:%s\n") %
                          (self.repo.manifest.rev(mnode), hex(mnode)))
        self.ui.write(_("user:        %s\n") % ctx.user())
        self.ui.write(_("date:        %s\n") % date)

        if self.ui.debugflag:
            files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
            for k
Beispiel #24
0
def verify(repo):
    """verify the consistency of a repository"""
    ret = verifymod.verify(repo)

    # Broken subrepo references in hidden csets don't seem worth worrying about,
    # since they can't be pushed/pulled, and --hidden can be used if they are a
    # concern.

    # pathto() is needed for -R case
    revs = repo.revs("filelog(%s)", util.pathto(repo.root, repo.getcwd(), ".hgsubstate"))

    if revs:
        repo.ui.status(_("checking subrepo links\n"))
        for rev in revs:
            ctx = repo[rev]
            try:
                for subpath in ctx.substate:
                    ret = ctx.sub(subpath).verify() or ret
            except Exception:
                repo.ui.warn(_(".hgsubstate is corrupt in revision %s\n") % node.short(ctx.node()))

    return ret
Beispiel #25
0
def findglobaltags2(ui, repo, alltags, tagtypes):
    '''Same as findglobaltags1(), but with caching.'''
    # This is so we can be lazy and assume alltags contains only global
    # tags when we pass it to _writetagcache().
    assert len(alltags) == len(tagtypes) == 0, \
           "findglobaltags() should be called first"

    (heads, tagfnode, cachetags, shouldwrite) = _readtagcache(ui, repo)
    if cachetags is not None:
        assert not shouldwrite
        # XXX is this really 100% correct?  are there oddball special
        # cases where a global tag should outrank a local tag but won't,
        # because cachetags does not contain rank info?
        _updatetags(cachetags, 'global', alltags, tagtypes)
        return

    _debug(
        ui, "reading tags from %d head(s): %s\n" %
        (len(heads), map(short, reversed(heads))))
    seen = set()  # set of fnode
    fctx = None
    for head in reversed(heads):  # oldest to newest
        assert head in repo.changelog.nodemap, \
               "tag cache returned bogus head %s" % short(head)

        fnode = tagfnode.get(head)
        if fnode and fnode not in seen:
            seen.add(fnode)
            if not fctx:
                fctx = repo.filectx('.hgtags', fileid=fnode)
            else:
                fctx = fctx.filectx(fnode)

            filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
            _updatetags(filetags, 'global', alltags, tagtypes)

    # and update the cache (if necessary)
    if shouldwrite:
        _writetagcache(ui, repo, heads, tagfnode, alltags)
Beispiel #26
0
def findglobaltags2(ui, repo, alltags, tagtypes):
    '''Same as findglobaltags1(), but with caching.'''
    # This is so we can be lazy and assume alltags contains only global
    # tags when we pass it to _writetagcache().
    assert len(alltags) == len(tagtypes) == 0, \
           "findglobaltags() should be called first"

    (heads, tagfnode, cachetags, shouldwrite) = _readtagcache(ui, repo)
    if cachetags is not None:
        assert not shouldwrite
        # XXX is this really 100% correct?  are there oddball special
        # cases where a global tag should outrank a local tag but won't,
        # because cachetags does not contain rank info?
        _updatetags(cachetags, 'global', alltags, tagtypes)
        return

    _debug(ui, "reading tags from %d head(s): %s\n"
           % (len(heads), map(short, reversed(heads))))
    seen = set()                    # set of fnode
    fctx = None
    for head in reversed(heads):        # oldest to newest
        assert head in repo.changelog.nodemap, \
               "tag cache returned bogus head %s" % short(head)

        fnode = tagfnode.get(head)
        if fnode and fnode not in seen:
            seen.add(fnode)
            if not fctx:
                fctx = repo.filectx('.hgtags', fileid=fnode)
            else:
                fctx = fctx.filectx(fnode)

            filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
            _updatetags(filetags, 'global', alltags, tagtypes)

    # and update the cache (if necessary)
    if shouldwrite:
        _writetagcache(ui, repo, heads, tagfnode, alltags)
Beispiel #27
0
def verify(repo):
    """verify the consistency of a repository"""
    ret = verifymod.verify(repo)

    # Broken subrepo references in hidden csets don't seem worth worrying about,
    # since they can't be pushed/pulled, and --hidden can be used if they are a
    # concern.

    # pathto() is needed for -R case
    revs = repo.revs("filelog(%s)",
                     util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))

    if revs:
        repo.ui.status(_('checking subrepo links\n'))
        for rev in revs:
            ctx = repo[rev]
            try:
                for subpath in ctx.substate:
                    ret = ctx.sub(subpath).verify() or ret
            except Exception:
                repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
                             node.short(ctx.node()))

    return ret
Beispiel #28
0
def _verify(repo):
    repo = repo.unfiltered()
    mflinkrevs = {}
    filelinkrevs = {}
    filenodes = {}
    revisions = 0
    badrevs = set()
    errors = [0]
    warnings = [0]
    ui = repo.ui
    cl = repo.changelog
    mf = repo.manifest
    lrugetctx = util.lrucachefunc(repo.changectx)

    if not repo.url().startswith('file:'):
        raise util.Abort(_("cannot verify bundle or remote repos"))

    def err(linkrev, msg, filename=None):
        if linkrev is not None:
            badrevs.add(linkrev)
        else:
            linkrev = '?'
        msg = "%s: %s" % (linkrev, msg)
        if filename:
            msg = "%s@%s" % (filename, msg)
        ui.warn(" " + msg + "\n")
        errors[0] += 1

    def exc(linkrev, msg, inst, filename=None):
        if isinstance(inst, KeyboardInterrupt):
            ui.warn(_("interrupted"))
            raise
        if not str(inst):
            inst = repr(inst)
        err(linkrev, "%s: %s" % (msg, inst), filename)

    def warn(msg):
        ui.warn(msg + "\n")
        warnings[0] += 1

    def checklog(obj, name, linkrev):
        if not len(obj) and (havecl or havemf):
            err(linkrev, _("empty or missing %s") % name)
            return

        d = obj.checksize()
        if d[0]:
            err(None, _("data length off by %d bytes") % d[0], name)
        if d[1]:
            err(None, _("index contains %d extra bytes") % d[1], name)

        if obj.version != revlog.REVLOGV0:
            if not revlogv1:
                warn(_("warning: `%s' uses revlog format 1") % name)
        elif revlogv1:
            warn(_("warning: `%s' uses revlog format 0") % name)

    def checkentry(obj, i, node, seen, linkrevs, f):
        lr = obj.linkrev(obj.rev(node))
        if lr < 0 or (havecl and lr not in linkrevs):
            if lr < 0 or lr >= len(cl):
                msg = _("rev %d points to nonexistent changeset %d")
            else:
                msg = _("rev %d points to unexpected changeset %d")
            err(None, msg % (i, lr), f)
            if linkrevs:
                if f and len(linkrevs) > 1:
                    try:
                        # attempt to filter down to real linkrevs
                        linkrevs = [
                            l for l in linkrevs
                            if lrugetctx(l)[f].filenode() == node
                        ]
                    except Exception:
                        pass
                warn(_(" (expected %s)") % " ".join(map(str, linkrevs)))
            lr = None  # can't be trusted

        try:
            p1, p2 = obj.parents(node)
            if p1 not in seen and p1 != nullid:
                err(lr,
                    _("unknown parent 1 %s of %s") % (short(p1), short(node)),
                    f)
            if p2 not in seen and p2 != nullid:
                err(lr,
                    _("unknown parent 2 %s of %s") % (short(p2), short(node)),
                    f)
        except Exception as inst:
            exc(lr, _("checking parents of %s") % short(node), inst, f)

        if node in seen:
            err(lr, _("duplicate revision %d (%d)") % (i, seen[node]), f)
        seen[node] = i
        return lr

    if os.path.exists(repo.sjoin("journal")):
        ui.warn(_("abandoned transaction found - run hg recover\n"))

    revlogv1 = cl.version != revlog.REVLOGV0
    if ui.verbose or not revlogv1:
        ui.status(
            _("repository uses revlog format %d\n") % (revlogv1 and 1 or 0))

    havecl = len(cl) > 0
    havemf = len(mf) > 0

    ui.status(_("checking changesets\n"))
    refersmf = False
    seen = {}
    checklog(cl, "changelog", 0)
    total = len(repo)
    for i in repo:
        ui.progress(_('checking'), i, total=total, unit=_('changesets'))
        n = cl.node(i)
        checkentry(cl, i, n, seen, [i], "changelog")

        try:
            changes = cl.read(n)
            if changes[0] != nullid:
                mflinkrevs.setdefault(changes[0], []).append(i)
                refersmf = True
            for f in changes[3]:
                filelinkrevs.setdefault(_normpath(f), []).append(i)
        except Exception as inst:
            refersmf = True
            exc(i, _("unpacking changeset %s") % short(n), inst)
    ui.progress(_('checking'), None)

    ui.status(_("checking manifests\n"))
    seen = {}
    if refersmf:
        # Do not check manifest if there are only changelog entries with
        # null manifests.
        checklog(mf, "manifest", 0)
    total = len(mf)
    for i in mf:
        ui.progress(_('checking'), i, total=total, unit=_('manifests'))
        n = mf.node(i)
        lr = checkentry(mf, i, n, seen, mflinkrevs.get(n, []), "manifest")
        if n in mflinkrevs:
            del mflinkrevs[n]
        else:
            err(lr, _("%s not in changesets") % short(n), "manifest")

        try:
            for f, fn in mf.readdelta(n).iteritems():
                if not f:
                    err(lr, _("file without name in manifest"))
                elif f != "/dev/null":  # ignore this in very old repos
                    filenodes.setdefault(_normpath(f), {}).setdefault(fn, lr)
        except Exception as inst:
            exc(lr, _("reading manifest delta %s") % short(n), inst)
    ui.progress(_('checking'), None)

    ui.status(_("crosschecking files in changesets and manifests\n"))

    total = len(mflinkrevs) + len(filelinkrevs) + len(filenodes)
    count = 0
    if havemf:
        for c, m in sorted([(c, m) for m in mflinkrevs
                            for c in mflinkrevs[m]]):
            count += 1
            if m == nullid:
                continue
            ui.progress(_('crosschecking'), count, total=total)
            err(c, _("changeset refers to unknown manifest %s") % short(m))
        mflinkrevs = None  # del is bad here due to scope issues

        for f in sorted(filelinkrevs):
            count += 1
            ui.progress(_('crosschecking'), count, total=total)
            if f not in filenodes:
                lr = filelinkrevs[f][0]
                err(lr, _("in changeset but not in manifest"), f)

    if havecl:
        for f in sorted(filenodes):
            count += 1
            ui.progress(_('crosschecking'), count, total=total)
            if f not in filelinkrevs:
                try:
                    fl = repo.file(f)
                    lr = min([fl.linkrev(fl.rev(n)) for n in filenodes[f]])
                except Exception:
                    lr = None
                err(lr, _("in manifest but not in changeset"), f)

    ui.progress(_('crosschecking'), None)

    ui.status(_("checking files\n"))

    storefiles = set()
    for f, f2, size in repo.store.datafiles():
        if not f:
            err(None, _("cannot decode filename '%s'") % f2)
        elif size > 0 or not revlogv1:
            storefiles.add(_normpath(f))

    fncachewarned = False
    files = sorted(set(filenodes) | set(filelinkrevs))
    total = len(files)
    for i, f in enumerate(files):
        ui.progress(_('checking'), i, item=f, total=total)
        try:
            linkrevs = filelinkrevs[f]
        except KeyError:
            # in manifest but not in changelog
            linkrevs = []

        if linkrevs:
            lr = linkrevs[0]
        else:
            lr = None

        try:
            fl = repo.file(f)
        except error.RevlogError as e:
            err(lr, _("broken revlog! (%s)") % e, f)
            continue

        for ff in fl.files():
            try:
                storefiles.remove(ff)
            except KeyError:
                warn(_(" warning: revlog '%s' not in fncache!") % ff)
                fncachewarned = True

        checklog(fl, f, lr)
        seen = {}
        rp = None
        for i in fl:
            revisions += 1
            n = fl.node(i)
            lr = checkentry(fl, i, n, seen, linkrevs, f)
            if f in filenodes:
                if havemf and n not in filenodes[f]:
                    err(lr, _("%s not in manifests") % (short(n)), f)
                else:
                    del filenodes[f][n]

            # verify contents
            try:
                l = len(fl.read(n))
                rp = fl.renamed(n)
                if l != fl.size(i):
                    if len(fl.revision(n)) != fl.size(i):
                        err(
                            lr,
                            _("unpacked size is %s, %s expected") %
                            (l, fl.size(i)), f)
            except error.CensoredNodeError:
                if ui.config("censor", "policy", "abort") == "abort":
                    err(lr, _("censored file data"), f)
            except Exception as inst:
                exc(lr, _("unpacking %s") % short(n), inst, f)

            # check renames
            try:
                if rp:
                    if lr is not None and ui.verbose:
                        ctx = lrugetctx(lr)
                        found = False
                        for pctx in ctx.parents():
                            if rp[0] in pctx:
                                found = True
                                break
                        if not found:
                            warn(
                                _("warning: copy source of '%s' not"
                                  " in parents of %s") % (f, ctx))
                    fl2 = repo.file(rp[0])
                    if not len(fl2):
                        err(
                            lr,
                            _("empty or missing copy source revlog %s:%s") %
                            (rp[0], short(rp[1])), f)
                    elif rp[1] == nullid:
                        ui.note(
                            _("warning: %s@%s: copy source"
                              " revision is nullid %s:%s\n") %
                            (f, lr, rp[0], short(rp[1])))
                    else:
                        fl2.rev(rp[1])
            except Exception as inst:
                exc(lr, _("checking rename of %s") % short(n), inst, f)

        # cross-check
        if f in filenodes:
            fns = [(lr, n) for n, lr in filenodes[f].iteritems()]
            for lr, node in sorted(fns):
                err(lr, _("%s in manifests not found") % short(node), f)
    ui.progress(_('checking'), None)

    for f in storefiles:
        warn(_("warning: orphan revlog '%s'") % f)

    ui.status(
        _("%d files, %d changesets, %d total revisions\n") %
        (len(files), len(cl), revisions))
    if warnings[0]:
        ui.warn(_("%d warnings encountered!\n") % warnings[0])
    if fncachewarned:
        ui.warn(
            _('hint: run "hg debugrebuildfncache" to recover from '
              'corrupt fncache\n'))
    if errors[0]:
        ui.warn(_("%d integrity errors encountered!\n") % errors[0])
        if badrevs:
            ui.warn(
                _("(first damaged changeset appears to be %d)\n") %
                min(badrevs))
        return 1
Beispiel #29
0
def prepush(repo, remote, force, revs, newbranch):
    '''Analyze the local and remote repositories and determine which
    changesets need to be pushed to the remote. Return value depends
    on circumstances:

    If we are not going to push anything, return a tuple (None,
    outgoing) where outgoing is 0 if there are no outgoing
    changesets and 1 if there are, but we refuse to push them
    (e.g. would create new remote heads).

    Otherwise, return a tuple (changegroup, remoteheads), where
    changegroup is a readable file-like object whose read() returns
    successive changegroup chunks ready to be sent over the wire and
    remoteheads is the list of remote heads.'''
    remoteheads = remote.heads()
    common, inc, rheads = findcommonincoming(repo, remote, heads=remoteheads,
                                             force=force)

    cl = repo.changelog
    update = findoutgoing(repo, remote, common, remoteheads)
    outg, bases, heads = cl.nodesbetween(update, revs)

    if not bases:
        repo.ui.status(_("no changes found\n"))
        return None, 1

    if not force and remoteheads != [nullid]:
        if remote.capable('branchmap'):
            # Check for each named branch if we're creating new remote heads.
            # To be a remote head after push, node must be either:
            # - unknown locally
            # - a local outgoing head descended from update
            # - a remote head that's known locally and not
            #   ancestral to an outgoing head

            # 1. Create set of branches involved in the push.
            branches = set(repo[n].branch() for n in outg)

            # 2. Check for new branches on the remote.
            remotemap = remote.branchmap()
            newbranches = branches - set(remotemap)
            if newbranches and not newbranch: # new branch requires --new-branch
                branchnames = ', '.join(sorted(newbranches))
                raise util.Abort(_("push creates new remote branches: %s!")
                                   % branchnames,
                                 hint=_("use 'hg push --new-branch' to create"
                                        " new remote branches"))
            branches.difference_update(newbranches)

            # 3. Construct the initial oldmap and newmap dicts.
            # They contain information about the remote heads before and
            # after the push, respectively.
            # Heads not found locally are not included in either dict,
            # since they won't be affected by the push.
            # unsynced contains all branches with incoming changesets.
            oldmap = {}
            newmap = {}
            unsynced = set()
            for branch in branches:
                remotebrheads = remotemap[branch]
                prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
                oldmap[branch] = prunedbrheads
                newmap[branch] = list(prunedbrheads)
                if len(remotebrheads) > len(prunedbrheads):
                    unsynced.add(branch)

            # 4. Update newmap with outgoing changes.
            # This will possibly add new heads and remove existing ones.
            ctxgen = (repo[n] for n in outg)
            repo._updatebranchcache(newmap, ctxgen)

        else:
            # 1-4b. old servers: Check for new topological heads.
            # Construct {old,new}map with branch = None (topological branch).
            # (code based on _updatebranchcache)
            oldheads = set(h for h in remoteheads if h in cl.nodemap)
            newheads = oldheads.union(outg)
            if len(newheads) > 1:
                for latest in reversed(outg):
                    if latest not in newheads:
                        continue
                    minhrev = min(cl.rev(h) for h in newheads)
                    reachable = cl.reachable(latest, cl.node(minhrev))
                    reachable.remove(latest)
                    newheads.difference_update(reachable)
            branches = set([None])
            newmap = {None: newheads}
            oldmap = {None: oldheads}
            unsynced = inc and branches or set()

        # 5. Check for new heads.
        # If there are more heads after the push than before, a suitable
        # error message, depending on unsynced status, is displayed.
        error = None
        for branch in branches:
            newhs = set(newmap[branch])
            oldhs = set(oldmap[branch])
            if len(newhs) > len(oldhs):
                if error is None:
                    if branch:
                        error = _("push creates new remote heads "
                                  "on branch '%s'!") % branch
                    else:
                        error = _("push creates new remote heads!")
                    if branch in unsynced:
                        hint = _("you should pull and merge or "
                                 "use push -f to force")
                    else:
                        hint = _("did you forget to merge? "
                                 "use push -f to force")
                if branch:
                    repo.ui.debug("new remote heads on branch '%s'\n" % branch)
                for h in (newhs - oldhs):
                    repo.ui.debug("new remote head %s\n" % short(h))
        if error:
            raise util.Abort(error, hint=hint)

        # 6. Check for unsynced changes on involved branches.
        if unsynced:
            repo.ui.warn(_("note: unsynced remote changes!\n"))

    if revs is None:
        # use the fast path, no race possible on push
        nodes = repo.changelog.findmissing(common)
        cg = repo._changegroup(nodes, 'push')
    else:
        cg = repo.changegroupsubset(update, revs, 'push')
    return cg, remoteheads
Beispiel #30
0
def bisect(changelog, state):
    """find the next node (if any) for testing during a bisect search.
    returns a (nodes, number, good) tuple.

    'nodes' is the final result of the bisect if 'number' is 0.
    Otherwise 'number' indicates the remaining possible candidates for
    the search and 'nodes' contains the next bisect target.
    'good' is True if bisect is searching for a first good changeset, False
    if searching for a first bad one.
    """

    clparents = changelog.parentrevs
    skip = set([changelog.rev(n) for n in state['skip']])

    def buildancestors(bad, good):
        # only the earliest bad revision matters
        badrev = min([changelog.rev(n) for n in bad])
        goodrevs = [changelog.rev(n) for n in good]
        goodrev = min(goodrevs)
        # build visit array
        ancestors = [None] * (len(changelog) + 1) # an extra for [-1]

        # set nodes descended from goodrevs
        for rev in goodrevs:
            ancestors[rev] = []
        for rev in xrange(goodrev + 1, len(changelog)):
            for prev in clparents(rev):
                if ancestors[prev] == []:
                    ancestors[rev] = []

        # clear good revs from array
        for rev in goodrevs:
            ancestors[rev] = None
        for rev in xrange(len(changelog), goodrev, -1):
            if ancestors[rev] is None:
                for prev in clparents(rev):
                    ancestors[prev] = None

        if ancestors[badrev] is None:
            return badrev, None
        return badrev, ancestors

    good = False
    badrev, ancestors = buildancestors(state['bad'], state['good'])
    if not ancestors: # looking for bad to good transition?
        good = True
        badrev, ancestors = buildancestors(state['good'], state['bad'])
    bad = changelog.node(badrev)
    if not ancestors: # now we're confused
        if len(state['bad']) == 1 and len(state['good']) == 1:
            raise util.Abort(_("starting revisions are not directly related"))
        raise util.Abort(_("inconsistent state, %s:%s is good and bad")
                         % (badrev, short(bad)))

    # build children dict
    children = {}
    visit = [badrev]
    candidates = []
    while visit:
        rev = visit.pop(0)
        if ancestors[rev] == []:
            candidates.append(rev)
            for prev in clparents(rev):
                if prev != -1:
                    if prev in children:
                        children[prev].append(rev)
                    else:
                        children[prev] = [rev]
                        visit.append(prev)

    candidates.sort()
    # have we narrowed it down to one entry?
    # or have all other possible candidates besides 'bad' have been skipped?
    tot = len(candidates)
    unskipped = [c for c in candidates if (c not in skip) and (c != badrev)]
    if tot == 1 or not unskipped:
        return ([changelog.node(rev) for rev in candidates], 0, good)
    perfect = tot // 2

    # find the best node to test
    best_rev = None
    best_len = -1
    poison = set()
    for rev in candidates:
        if rev in poison:
            # poison children
            poison.update(children.get(rev, []))
            continue

        a = ancestors[rev] or [rev]
        ancestors[rev] = None

        x = len(a) # number of ancestors
        y = tot - x # number of non-ancestors
        value = min(x, y) # how good is this test?
        if value > best_len and rev not in skip:
            best_len = value
            best_rev = rev
            if value == perfect: # found a perfect candidate? quit early
                break

        if y < perfect and rev not in skip: # all downhill from here?
            # poison children
            poison.update(children.get(rev, []))
            continue

        for c in children.get(rev, []):
            if ancestors[c]:
                ancestors[c] = list(set(ancestors[c] + a))
            else:
                ancestors[c] = a + [c]

    assert best_rev is not None
    best_node = changelog.node(best_rev)

    return ([best_node], tot, good)
Beispiel #31
0
    def _show(self, ctx, copies, props):
        '''show a single changeset or file revision'''
        changenode = ctx.node()
        rev = ctx.rev()

        if self.ui.quiet:
            self.ui.write("%d:%s\n" % (rev, short(changenode)))
            return

        log = self.repo.changelog
        date = util.datestr(ctx.date())

        hexfunc = self.ui.debugflag and hex or short

        parents = [(p, hexfunc(log.node(p)))
                   for p in self._meaningful_parentrevs(log, rev)]

        self.ui.write(_("changeset:   %d:%s\n") % (rev, hexfunc(changenode)))

        branch = ctx.branch()
        # don't show the default branch name
        if branch != 'default':
            branch = encoding.tolocal(branch)
            self.ui.write(_("branch:      %s\n") % branch)
        for tag in self.repo.nodetags(changenode):
            self.ui.write(_("tag:         %s\n") % tag)
        for parent in parents:
            self.ui.write(_("parent:      %d:%s\n") % parent)

        if self.ui.debugflag:
            mnode = ctx.manifestnode()
            self.ui.write(
                _("manifest:    %d:%s\n") %
                (self.repo.manifest.rev(mnode), hex(mnode)))
        self.ui.write(_("user:        %s\n") % ctx.user())
        self.ui.write(_("date:        %s\n") % date)

        if self.ui.debugflag:
            files = self.repo.status(log.parents(changenode)[0],
                                     changenode)[:3]
            for key, value in zip(
                [_("files:"), _("files+:"),
                 _("files-:")], files):
                if value:
                    self.ui.write("%-12s %s\n" % (key, " ".join(value)))
        elif ctx.files() and self.ui.verbose:
            self.ui.write(_("files:       %s\n") % " ".join(ctx.files()))
        if copies and self.ui.verbose:
            copies = ['%s (%s)' % c for c in copies]
            self.ui.write(_("copies:      %s\n") % ' '.join(copies))

        extra = ctx.extra()
        if extra and self.ui.debugflag:
            for key, value in sorted(extra.items()):
                self.ui.write(
                    _("extra:       %s=%s\n") %
                    (key, value.encode('string_escape')))

        description = ctx.description().strip()
        if description:
            if self.ui.verbose:
                self.ui.write(_("description:\n"))
                self.ui.write(description)
                self.ui.write("\n\n")
            else:
                self.ui.write(
                    _("summary:     %s\n") % description.splitlines()[0])
        self.ui.write("\n")

        self.showpatch(changenode)
def filemerge(repo, mynode, orig, fcd, fco, fca):
    """perform a 3-way merge in the working directory

    mynode = parent node before merge
    orig = original local filename before merge
    fco = other file context
    fca = ancestor file context
    fcd = local file context for current/destination file
    """
    def temp(prefix, ctx):
        pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
        (fd, name) = tempfile.mkstemp(prefix=pre)
        data = repo.wwritedata(ctx.path(), ctx.data())
        f = os.fdopen(fd, "wb")
        f.write(data)
        f.close()
        return name

    if not fco.cmp(fcd):  # files identical?
        return None

    ui = repo.ui
    fd = fcd.path()
    binary = fcd.isbinary() or fco.isbinary() or fca.isbinary()
    symlink = 'l' in fcd.flags() + fco.flags()
    tool, toolpath = _picktool(repo, ui, fd, binary, symlink)
    ui.debug("picked tool '%s' for %s (binary %s symlink %s)\n" %
             (tool, fd, binary, symlink))

    if not tool or tool == 'internal:prompt':
        tool = "internal:local"
        if ui.promptchoice(
                _(" no tool found to merge %s\n"
                  "keep (l)ocal or take (o)ther?") % fd,
            (_("&Local"), _("&Other")), 0):
            tool = "internal:other"
    if tool == "internal:local":
        return 0
    if tool == "internal:other":
        repo.wwrite(fd, fco.data(), fco.flags())
        return 0
    if tool == "internal:fail":
        return 1

    # do the actual merge
    a = repo.wjoin(fd)
    b = temp("base", fca)
    c = temp("other", fco)
    out = ""
    back = a + ".orig"
    util.copyfile(a, back)

    if orig != fco.path():
        ui.status(_("merging %s and %s to %s\n") % (orig, fco.path(), fd))
    else:
        ui.status(_("merging %s\n") % fd)

    ui.debug("my %s other %s ancestor %s\n" % (fcd, fco, fca))

    # do we attempt to simplemerge first?
    try:
        premerge = _toolbool(ui, tool, "premerge", not (binary or symlink))
    except error.ConfigError:
        premerge = _toolstr(ui, tool, "premerge").lower()
        valid = 'keep'.split()
        if premerge not in valid:
            _valid = ', '.join(["'" + v + "'" for v in valid])
            raise error.ConfigError(
                _("%s.premerge not valid "
                  "('%s' is neither boolean nor %s)") %
                (tool, premerge, _valid))

    if premerge:
        r = simplemerge.simplemerge(ui, a, b, c, quiet=True)
        if not r:
            ui.debug(" premerge successful\n")
            os.unlink(back)
            os.unlink(b)
            os.unlink(c)
            return 0
        if premerge != 'keep':
            util.copyfile(back, a)  # restore from backup and try again

    env = dict(HG_FILE=fd,
               HG_MY_NODE=short(mynode),
               HG_OTHER_NODE=str(fco.changectx()),
               HG_BASE_NODE=str(fca.changectx()),
               HG_MY_ISLINK='l' in fcd.flags(),
               HG_OTHER_ISLINK='l' in fco.flags(),
               HG_BASE_ISLINK='l' in fca.flags())

    if tool == "internal:merge":
        r = simplemerge.simplemerge(ui, a, b, c, label=['local', 'other'])
    elif tool == 'internal:dump':
        a = repo.wjoin(fd)
        util.copyfile(a, a + ".local")
        repo.wwrite(fd + ".other", fco.data(), fco.flags())
        repo.wwrite(fd + ".base", fca.data(), fca.flags())
        os.unlink(b)
        os.unlink(c)
        return 1  # unresolved
    else:
        args = _toolstr(ui, tool, "args", '$local $base $other')
        if "$output" in args:
            out, a = a, back  # read input from backup, write to original
        replace = dict(local=a, base=b, other=c, output=out)
        args = util.interpolate(r'\$', replace, args,
                                lambda s: '"%s"' % util.localpath(s))
        r = util.system(toolpath + ' ' + args,
                        cwd=repo.root,
                        environ=env,
                        out=ui.fout)

    if not r and (_toolbool(ui, tool, "checkconflicts")
                  or 'conflicts' in _toollist(ui, tool, "check")):
        if re.search("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data(),
                     re.MULTILINE):
            r = 1

    checked = False
    if 'prompt' in _toollist(ui, tool, "check"):
        checked = True
        if ui.promptchoice(
                _("was merge of '%s' successful (yn)?") % fd,
            (_("&Yes"), _("&No")), 1):
            r = 1

    if not r and not checked and (_toolbool(ui, tool, "checkchanged") or
                                  'changed' in _toollist(ui, tool, "check")):
        if filecmp.cmp(repo.wjoin(fd), back):
            if ui.promptchoice(
                    _(" output file %s appears unchanged\n"
                      "was merge successful (yn)?") % fd,
                (_("&Yes"), _("&No")), 1):
                r = 1

    if _toolbool(ui, tool, "fixeol"):
        _matcheol(repo.wjoin(fd), back)

    if r:
        if tool == "internal:merge":
            ui.warn(
                _("merging %s incomplete! "
                  "(edit conflicts, then use 'hg resolve --mark')\n") % fd)
        else:
            ui.warn(_("merging %s failed!\n") % fd)
    else:
        os.unlink(back)

    os.unlink(b)
    os.unlink(c)
    return r
 def __str__(self):
     return short(self.node())
Beispiel #34
0
def _readtagcache(ui, repo):
    '''Read the tag cache and return a tuple (heads, fnodes, cachetags,
    shouldwrite).  If the cache is completely up-to-date, cachetags is a
    dict of the form returned by _readtags(); otherwise, it is None and
    heads and fnodes are set.  In that case, heads is the list of all
    heads currently in the repository (ordered from tip to oldest) and
    fnodes is a mapping from head to .hgtags filenode.  If those two are
    set, caller is responsible for reading tag info from each head.'''

    try:
        cachefile = repo.opener('tags.cache', 'r')
        _debug(ui, 'reading tag cache from %s\n' % cachefile.name)
    except IOError:
        cachefile = None

    # The cache file consists of lines like
    #   <headrev> <headnode> [<tagnode>]
    # where <headrev> and <headnode> redundantly identify a repository
    # head from the time the cache was written, and <tagnode> is the
    # filenode of .hgtags on that head.  Heads with no .hgtags file will
    # have no <tagnode>.  The cache is ordered from tip to oldest (which
    # is part of why <headrev> is there: a quick visual check is all
    # that's required to ensure correct order).
    #
    # This information is enough to let us avoid the most expensive part
    # of finding global tags, which is looking up <tagnode> in the
    # manifest for each head.
    cacherevs = []  # list of headrev
    cacheheads = []  # list of headnode
    cachefnode = {}  # map headnode to filenode
    if cachefile:
        for line in cachefile:
            if line == "\n":
                break
            line = line.rstrip().split()
            cacherevs.append(int(line[0]))
            headnode = bin(line[1])
            cacheheads.append(headnode)
            if len(line) == 3:
                fnode = bin(line[2])
                cachefnode[headnode] = fnode

    tipnode = repo.changelog.tip()
    tiprev = len(repo.changelog) - 1

    # Case 1 (common): tip is the same, so nothing has changed.
    # (Unchanged tip trivially means no changesets have been added.
    # But, thanks to localrepository.destroyed(), it also means none
    # have been destroyed by strip or rollback.)
    if cacheheads and cacheheads[0] == tipnode and cacherevs[0] == tiprev:
        _debug(ui, "tag cache: tip unchanged\n")
        tags = _readtags(ui, repo, cachefile, cachefile.name)
        cachefile.close()
        return (None, None, tags, False)
    if cachefile:
        cachefile.close()  # ignore rest of file

    repoheads = repo.heads()
    # Case 2 (uncommon): empty repo; get out quickly and don't bother
    # writing an empty cache.
    if repoheads == [nullid]:
        return ([], {}, {}, False)

    # Case 3 (uncommon): cache file missing or empty.
    if not cacheheads:
        _debug(ui, 'tag cache: cache file missing or empty\n')

    # Case 4 (uncommon): tip rev decreased.  This should only happen
    # when we're called from localrepository.destroyed().  Refresh the
    # cache so future invocations will not see disappeared heads in the
    # cache.
    elif cacheheads and tiprev < cacherevs[0]:
        _debug(
            ui, 'tag cache: tip rev decremented (from %d to %d), '
            'so we must be destroying nodes\n' % (cacherevs[0], tiprev))

    # Case 5 (common): tip has changed, so we've added/replaced heads.
    else:
        _debug(
            ui, 'tag cache: tip has changed (%d:%s); must find new heads\n' %
            (tiprev, short(tipnode)))

    # Luckily, the code to handle cases 3, 4, 5 is the same.  So the
    # above if/elif/else can disappear once we're confident this thing
    # actually works and we don't need the debug output.

    # N.B. in case 4 (nodes destroyed), "new head" really means "newly
    # exposed".
    newheads = [head for head in repoheads if head not in set(cacheheads)]
    _debug(
        ui, 'tag cache: found %d head(s) not in cache: %s\n' %
        (len(newheads), map(short, newheads)))

    # Now we have to lookup the .hgtags filenode for every new head.
    # This is the most expensive part of finding tags, so performance
    # depends primarily on the size of newheads.  Worst case: no cache
    # file, so newheads == repoheads.
    for head in newheads:
        cctx = repo[head]
        try:
            fnode = cctx.filenode('.hgtags')
            cachefnode[head] = fnode
        except error.LookupError:
            # no .hgtags file on this head
            pass

    # Caller has to iterate over all heads, but can use the filenodes in
    # cachefnode to get to each .hgtags revision quickly.
    return (repoheads, cachefnode, None, True)
Beispiel #35
0
def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
    """Check that a push won't add any outgoing head

    raise Abort error and display ui message as needed.
    """
    if remoteheads == [nullid]:
        # remote is empty, nothing to check.
        return

    cl = repo.changelog
    if remote.capable('branchmap'):
        # Check for each named branch if we're creating new remote heads.
        # To be a remote head after push, node must be either:
        # - unknown locally
        # - a local outgoing head descended from update
        # - a remote head that's known locally and not
        #   ancestral to an outgoing head

        # 1. Create set of branches involved in the push.
        branches = set(repo[n].branch() for n in outgoing.missing)

        # 2. Check for new branches on the remote.
        remotemap = remote.branchmap()
        newbranches = branches - set(remotemap)
        if newbranches and not newbranch: # new branch requires --new-branch
            branchnames = ', '.join(sorted(newbranches))
            raise util.Abort(_("push creates new remote branches: %s!")
                               % branchnames,
                             hint=_("use 'hg push --new-branch' to create"
                                    " new remote branches"))
        branches.difference_update(newbranches)

        # 3. Construct the initial oldmap and newmap dicts.
        # They contain information about the remote heads before and
        # after the push, respectively.
        # Heads not found locally are not included in either dict,
        # since they won't be affected by the push.
        # unsynced contains all branches with incoming changesets.
        oldmap = {}
        newmap = {}
        unsynced = set()
        for branch in branches:
            remotebrheads = remotemap[branch]
            prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
            oldmap[branch] = prunedbrheads
            newmap[branch] = list(prunedbrheads)
            if len(remotebrheads) > len(prunedbrheads):
                unsynced.add(branch)

        # 4. Update newmap with outgoing changes.
        # This will possibly add new heads and remove existing ones.
        ctxgen = (repo[n] for n in outgoing.missing)
        repo._updatebranchcache(newmap, ctxgen)

    else:
        # 1-4b. old servers: Check for new topological heads.
        # Construct {old,new}map with branch = None (topological branch).
        # (code based on _updatebranchcache)
        oldheads = set(h for h in remoteheads if h in cl.nodemap)
        newheads = oldheads.union(outgoing.missing)
        if len(newheads) > 1:
            for latest in reversed(outgoing.missing):
                if latest not in newheads:
                    continue
                minhrev = min(cl.rev(h) for h in newheads)
                reachable = cl.reachable(latest, cl.node(minhrev))
                reachable.remove(latest)
                newheads.difference_update(reachable)
        branches = set([None])
        newmap = {None: newheads}
        oldmap = {None: oldheads}
        unsynced = inc and branches or set()

    # 5. Check for new heads.
    # If there are more heads after the push than before, a suitable
    # error message, depending on unsynced status, is displayed.
    error = None
    for branch in branches:
        newhs = set(newmap[branch])
        oldhs = set(oldmap[branch])
        if len(newhs) > len(oldhs):
            dhs = list(newhs - oldhs)
            if error is None:
                if branch not in ('default', None):
                    error = _("push creates new remote head %s "
                              "on branch '%s'!") % (short(dhs[0]), branch)
                else:
                    error = _("push creates new remote head %s!"
                              ) % short(dhs[0])
                if branch in unsynced:
                    hint = _("you should pull and merge or "
                             "use push -f to force")
                else:
                    hint = _("did you forget to merge? "
                             "use push -f to force")
            if branch is not None:
                repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
            for h in dhs:
                repo.ui.note(_("new remote head %s\n") % short(h))
    if error:
        raise util.Abort(error, hint=hint)

    # 6. Check for unsynced changes on involved branches.
    if unsynced:
        repo.ui.warn(_("note: unsynced remote changes!\n"))
Beispiel #36
0
 def __init__(self, name, index, message):
     self.name = name
     if isinstance(name, str) and len(name) == 20:
         from node import short
         name = short(name)
     RevlogError.__init__(self, '%s@%s: %s' % (index, name, message))
def findcommonincoming(repo, remote, heads=None, force=False):
    """Return a tuple (common, fetch, heads) used to identify the common
    subset of nodes between repo and remote.

    "common" is a list of (at least) the heads of the common subset.
    "fetch" is a list of roots of the nodes that would be incoming, to be
      supplied to changegroupsubset.
    "heads" is either the supplied heads, or else the remote's heads.
    """

    m = repo.changelog.nodemap
    search = []
    fetch = set()
    seen = set()
    seenbranch = set()
    base = set()

    if not heads:
        heads = remote.heads()

    if repo.changelog.tip() == nullid:
        base.add(nullid)
        if heads != [nullid]:
            return [nullid], [nullid], list(heads)
        return [nullid], [], heads

    # assume we're closer to the tip than the root
    # and start by examining the heads
    repo.ui.status(_("searching for changes\n"))

    unknown = []
    for h in heads:
        if h not in m:
            unknown.append(h)
        else:
            base.add(h)

    if not unknown:
        return list(base), [], list(heads)

    req = set(unknown)
    reqcnt = 0

    # search through remote branches
    # a 'branch' here is a linear segment of history, with four parts:
    # head, root, first parent, second parent
    # (a branch always has two parents (or none) by definition)
    unknown = util.deque(remote.branches(unknown))
    while unknown:
        r = []
        while unknown:
            n = unknown.popleft()
            if n[0] in seen:
                continue

            repo.ui.debug("examining %s:%s\n"
                          % (short(n[0]), short(n[1])))
            if n[0] == nullid: # found the end of the branch
                pass
            elif n in seenbranch:
                repo.ui.debug("branch already found\n")
                continue
            elif n[1] and n[1] in m: # do we know the base?
                repo.ui.debug("found incomplete branch %s:%s\n"
                              % (short(n[0]), short(n[1])))
                search.append(n[0:2]) # schedule branch range for scanning
                seenbranch.add(n)
            else:
                if n[1] not in seen and n[1] not in fetch:
                    if n[2] in m and n[3] in m:
                        repo.ui.debug("found new changeset %s\n" %
                                      short(n[1]))
                        fetch.add(n[1]) # earliest unknown
                    for p in n[2:4]:
                        if p in m:
                            base.add(p) # latest known

                for p in n[2:4]:
                    if p not in req and p not in m:
                        r.append(p)
                        req.add(p)
            seen.add(n[0])

        if r:
            reqcnt += 1
            repo.ui.progress(_('searching'), reqcnt, unit=_('queries'))
            repo.ui.debug("request %d: %s\n" %
                        (reqcnt, " ".join(map(short, r))))
            for p in xrange(0, len(r), 10):
                for b in remote.branches(r[p:p + 10]):
                    repo.ui.debug("received %s:%s\n" %
                                  (short(b[0]), short(b[1])))
                    unknown.append(b)

    # do binary search on the branches we found
    while search:
        newsearch = []
        reqcnt += 1
        repo.ui.progress(_('searching'), reqcnt, unit=_('queries'))
        for n, l in zip(search, remote.between(search)):
            l.append(n[1])
            p = n[0]
            f = 1
            for i in l:
                repo.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
                if i in m:
                    if f <= 2:
                        repo.ui.debug("found new branch changeset %s\n" %
                                          short(p))
                        fetch.add(p)
                        base.add(i)
                    else:
                        repo.ui.debug("narrowed branch search to %s:%s\n"
                                      % (short(p), short(i)))
                        newsearch.append((p, i))
                    break
                p, f = i, f * 2
            search = newsearch

    # sanity check our fetch list
    for f in fetch:
        if f in m:
            raise error.RepoError(_("already have changeset ")
                                  + short(f[:4]))

    base = list(base)
    if base == [nullid]:
        if force:
            repo.ui.warn(_("warning: repository is unrelated\n"))
        else:
            raise util.Abort(_("repository is unrelated"))

    repo.ui.debug("found new changesets starting at " +
                 " ".join([short(f) for f in fetch]) + "\n")

    repo.ui.progress(_('searching'), None)
    repo.ui.debug("%d total queries\n" % reqcnt)

    return base, list(fetch), heads
Beispiel #38
0
 def __init__(self, filename, node):
     from node import short
     RevlogError.__init__(self, '%s:%s' % (filename, short(node)))
Beispiel #39
0
def walkchangerevs(ui, repo, pats, change, opts):
    '''Iterate over files and the revs they changed in.

    Callers most commonly need to iterate backwards over the history
    it is interested in.  Doing so has awful (quadratic-looking)
    performance, so we use iterators in a "windowed" way.

    We walk a window of revisions in the desired order.  Within the
    window, we first walk forwards to gather data, then in the desired
    order (usually backwards) to display it.

    This function returns an (iterator, matchfn) tuple. The iterator
    yields 3-tuples. They will be of one of the following forms:

    "window", incrementing, lastrev: stepping through a window,
    positive if walking forwards through revs, last rev in the
    sequence iterated over - use to reset state for the current window

    "add", rev, fns: out-of-order traversal of the given file names
    fns, which changed during revision rev - use to gather data for
    possible display

    "iter", rev, None: in-order traversal of the revs earlier iterated
    over with "add" - use to display data'''
    def increasing_windows(start, end, windowsize=8, sizelimit=512):
        if start < end:
            while start < end:
                yield start, min(windowsize, end - start)
                start += windowsize
                if windowsize < sizelimit:
                    windowsize *= 2
        else:
            while start > end:
                yield start, min(windowsize, start - end - 1)
                start -= windowsize
                if windowsize < sizelimit:
                    windowsize *= 2

    files, matchfn, anypats = matchpats(repo, pats, opts)
    follow = opts.get('follow') or opts.get('follow_first')

    if repo.changelog.count() == 0:
        return [], matchfn

    if follow:
        defrange = '%s:0' % repo.changectx().rev()
    else:
        defrange = '-1:0'
    revs = revrange(repo, opts['rev'] or [defrange])
    wanted = {}
    slowpath = anypats or opts.get('removed')
    fncache = {}

    if not slowpath and not files:
        # No files, no patterns.  Display all revs.
        wanted = dict.fromkeys(revs)
    copies = []
    if not slowpath:
        # Only files, no patterns.  Check the history of each file.
        def filerevgen(filelog, node):
            cl_count = repo.changelog.count()
            if node is None:
                last = filelog.count() - 1
            else:
                last = filelog.rev(node)
            for i, window in increasing_windows(last, nullrev):
                revs = []
                for j in xrange(i - window, i + 1):
                    n = filelog.node(j)
                    revs.append((filelog.linkrev(n), follow
                                 and filelog.renamed(n)))
                revs.reverse()
                for rev in revs:
                    # only yield rev for which we have the changelog, it can
                    # happen while doing "hg log" during a pull or commit
                    if rev[0] < cl_count:
                        yield rev

        def iterfiles():
            for filename in files:
                yield filename, None
            for filename_node in copies:
                yield filename_node

        minrev, maxrev = min(revs), max(revs)
        for file_, node in iterfiles():
            filelog = repo.file(file_)
            if filelog.count() == 0:
                if node is None:
                    # A zero count may be a directory or deleted file, so
                    # try to find matching entries on the slow path.
                    slowpath = True
                    break
                else:
                    ui.warn(
                        _('%s:%s copy source revision cannot be found!\n') %
                        (file_, short(node)))
                    continue
            for rev, copied in filerevgen(filelog, node):
                if rev <= maxrev:
                    if rev < minrev:
                        break
                    fncache.setdefault(rev, [])
                    fncache[rev].append(file_)
                    wanted[rev] = 1
                    if follow and copied:
                        copies.append(copied)
    if slowpath:
        if follow:
            raise util.Abort(
                _('can only follow copies/renames for explicit '
                  'file names'))

        # The slow path checks files modified in every changeset.
        def changerevgen():
            for i, window in increasing_windows(repo.changelog.count() - 1,
                                                nullrev):
                for j in xrange(i - window, i + 1):
                    yield j, change(j)[3]

        for rev, changefiles in changerevgen():
            matches = filter(matchfn, changefiles)
            if matches:
                fncache[rev] = matches
                wanted[rev] = 1

    class followfilter:
        def __init__(self, onlyfirst=False):
            self.startrev = nullrev
            self.roots = []
            self.onlyfirst = onlyfirst

        def match(self, rev):
            def realparents(rev):
                if self.onlyfirst:
                    return repo.changelog.parentrevs(rev)[0:1]
                else:
                    return filter(lambda x: x != nullrev,
                                  repo.changelog.parentrevs(rev))

            if self.startrev == nullrev:
                self.startrev = rev
                return True

            if rev > self.startrev:
                # forward: all descendants
                if not self.roots:
                    self.roots.append(self.startrev)
                for parent in realparents(rev):
                    if parent in self.roots:
                        self.roots.append(rev)
                        return True
            else:
                # backwards: all parents
                if not self.roots:
                    self.roots.extend(realparents(self.startrev))
                if rev in self.roots:
                    self.roots.remove(rev)
                    self.roots.extend(realparents(rev))
                    return True

            return False

    # it might be worthwhile to do this in the iterator if the rev range
    # is descending and the prune args are all within that range
    for rev in opts.get('prune', ()):
        rev = repo.changelog.rev(repo.lookup(rev))
        ff = followfilter()
        stop = min(revs[0], revs[-1])
        for x in xrange(rev, stop - 1, -1):
            if ff.match(x) and x in wanted:
                del wanted[x]

    def iterate():
        if follow and not files:
            ff = followfilter(onlyfirst=opts.get('follow_first'))

            def want(rev):
                if ff.match(rev) and rev in wanted:
                    return True
                return False
        else:

            def want(rev):
                return rev in wanted

        for i, window in increasing_windows(0, len(revs)):
            yield 'window', revs[0] < revs[-1], revs[-1]
            nrevs = [rev for rev in revs[i:i + window] if want(rev)]
            srevs = list(nrevs)
            srevs.sort()
            for rev in srevs:
                fns = fncache.get(rev)
                if not fns:

                    def fns_generator():
                        for f in change(rev)[3]:
                            if matchfn(f):
                                yield f

                    fns = fns_generator()
                yield 'add', rev, fns
            for rev in nrevs:
                yield 'iter', rev, None

    return iterate(), matchfn
Beispiel #40
0
def filemerge(repo, mynode, orig, fcd, fco, fca):
    """perform a 3-way merge in the working directory

    mynode = parent node before merge
    orig = original local filename before merge
    fco = other file context
    fca = ancestor file context
    fcd = local file context for current/destination file
    """
    def temp(prefix, ctx):
        pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
        (fd, name) = tempfile.mkstemp(prefix=pre)
        data = repo.wwritedata(ctx.path(), ctx.data())
        f = os.fdopen(fd, "wb")
        f.write(data)
        f.close()
        return name

    def isbin(ctx):
        try:
            return util.binary(ctx.data())
        except IOError:
            return False

    if not fco.cmp(fcd.data()):  # files identical?
        return None

    if fca == fco:  # backwards, use working dir parent as ancestor
        fca = fcd.parents()[0]

    ui = repo.ui
    fd = fcd.path()
    binary = isbin(fcd) or isbin(fco) or isbin(fca)
    symlink = 'l' in fcd.flags() + fco.flags()
    tool, toolpath = _picktool(repo, ui, fd, binary, symlink)
    ui.debug("picked tool '%s' for %s (binary %s symlink %s)\n" %
             (tool, fd, binary, symlink))

    if not tool or tool == 'internal:prompt':
        tool = "internal:local"
        if ui.promptchoice(
                _(" no tool found to merge %s\n"
                  "keep (l)ocal or take (o)ther?") % fd,
            (_("&Local"), _("&Other")), 0):
            tool = "internal:other"
    if tool == "internal:local":
        return 0
    if tool == "internal:other":
        repo.wwrite(fd, fco.data(), fco.flags())
        return 0
    if tool == "internal:fail":
        return 1

    # do the actual merge
    a = repo.wjoin(fd)
    b = temp("base", fca)
    c = temp("other", fco)
    out = ""
    back = a + ".orig"
    util.copyfile(a, back)

    if orig != fco.path():
        ui.status(_("merging %s and %s to %s\n") % (orig, fco.path(), fd))
    else:
        ui.status(_("merging %s\n") % fd)

    ui.debug("my %s other %s ancestor %s\n" % (fcd, fco, fca))

    # do we attempt to simplemerge first?
    if _toolbool(ui, tool, "premerge", not (binary or symlink)):
        r = simplemerge.simplemerge(ui, a, b, c, quiet=True)
        if not r:
            ui.debug(" premerge successful\n")
            os.unlink(back)
            os.unlink(b)
            os.unlink(c)
            return 0
        util.copyfile(back, a)  # restore from backup and try again

    env = dict(HG_FILE=fd,
               HG_MY_NODE=short(mynode),
               HG_OTHER_NODE=str(fco.changectx()),
               HG_BASE_NODE=str(fca.changectx()),
               HG_MY_ISLINK='l' in fcd.flags(),
               HG_OTHER_ISLINK='l' in fco.flags(),
               HG_BASE_ISLINK='l' in fca.flags())

    if tool == "internal:merge":
        r = simplemerge.simplemerge(ui, a, b, c, label=['local', 'other'])
    elif tool == 'internal:dump':
        a = repo.wjoin(fd)
        util.copyfile(a, a + ".local")
        repo.wwrite(fd + ".other", fco.data(), fco.flags())
        repo.wwrite(fd + ".base", fca.data(), fca.flags())
        return 1  # unresolved
    else:
        args = _toolstr(ui, tool, "args", '$local $base $other')
        if "$output" in args:
            out, a = a, back  # read input from backup, write to original
        replace = dict(local=a, base=b, other=c, output=out)
        args = re.sub(
            "\$(local|base|other|output)",
            lambda x: '"%s"' % util.localpath(replace[x.group()[1:]]), args)
        r = util.system(toolpath + ' ' + args, cwd=repo.root, environ=env)

    if not r and _toolbool(ui, tool, "checkconflicts"):
        if re.match("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data()):
            r = 1

    if not r and _toolbool(ui, tool, "checkchanged"):
        if filecmp.cmp(repo.wjoin(fd), back):
            if ui.promptchoice(
                    _(" output file %s appears unchanged\n"
                      "was merge successful (yn)?") % fd,
                (_("&Yes"), _("&No")), 1):
                r = 1

    if _toolbool(ui, tool, "fixeol"):
        _matcheol(repo.wjoin(fd), back)

    if r:
        ui.warn(_("merging %s failed!\n") % fd)
    else:
        os.unlink(back)

    os.unlink(b)
    os.unlink(c)
    return r
Beispiel #41
0
    def __init__(self, repo, changeid=''):
        """changeid is a revision number, node, or tag"""

        # since basectx.__new__ already took care of copying the object, we
        # don't need to do anything in __init__, so we just exit here
        if isinstance(changeid, basectx):
            return

        if changeid == '':
            changeid = '.'
        self._repo = repo

        if isinstance(changeid, int):
            try:
                self._node = repo.changelog.node(changeid)
            except IndexError:
                raise error.RepoLookupError(
                    _("unknown revision '%s'") % changeid)
            self._rev = changeid
            return
        if isinstance(changeid, long):
            changeid = str(changeid)
        if changeid == '.':
            self._node = repo.dirstate.p1()
            self._rev = repo.changelog.rev(self._node)
            return
        if changeid == 'null':
            self._node = nullid
            self._rev = nullrev
            return
        if changeid == 'tip':
            self._node = repo.changelog.tip()
            self._rev = repo.changelog.rev(self._node)
            return
        if len(changeid) == 20:
            try:
                self._node = changeid
                self._rev = repo.changelog.rev(changeid)
                return
            except LookupError:
                pass

        try:
            r = int(changeid)
            if str(r) != changeid:
                raise ValueError
            l = len(repo.changelog)
            if r < 0:
                r += l
            if r < 0 or r >= l:
                raise ValueError
            self._rev = r
            self._node = repo.changelog.node(r)
            return
        except (ValueError, OverflowError, IndexError):
            pass

        if len(changeid) == 40:
            try:
                self._node = bin(changeid)
                self._rev = repo.changelog.rev(self._node)
                return
            except (TypeError, LookupError):
                pass

        if changeid in repo._bookmarks:
            self._node = repo._bookmarks[changeid]
            self._rev = repo.changelog.rev(self._node)
            return
        if changeid in repo._tagscache.tags:
            self._node = repo._tagscache.tags[changeid]
            self._rev = repo.changelog.rev(self._node)
            return
        try:
            self._node = repo.branchtip(changeid)
            self._rev = repo.changelog.rev(self._node)
            return
        except error.RepoLookupError:
            pass

        self._node = repo.changelog._partialmatch(changeid)
        if self._node is not None:
            self._rev = repo.changelog.rev(self._node)
            return

        # lookup failed
        # check if it might have come from damaged dirstate
        #
        # XXX we could avoid the unfiltered if we had a recognizable exception
        # for filtered changeset access
        if changeid in repo.unfiltered().dirstate.parents():
            raise error.Abort(
                _("working directory has unknown parent '%s'!") %
                short(changeid))
        try:
            if len(changeid) == 20:
                changeid = hex(changeid)
        except TypeError:
            pass
        raise error.RepoLookupError(_("unknown revision '%s'") % changeid)
Beispiel #42
0
def bisect(changelog, state):
    clparents = changelog.parentrevs
    skip = dict.fromkeys([changelog.rev(n) for n in state['skip']])

    def buildancestors(bad, good):
        # only the earliest bad revision matters
        badrev = min([changelog.rev(n) for n in bad])
        goodrevs = [changelog.rev(n) for n in good]
        # build ancestors array
        ancestors = [[]] * (changelog.count() + 1) # an extra for [-1]

        # clear good revs from array
        for node in goodrevs:
            ancestors[node] = None
        for rev in xrange(changelog.count(), -1, -1):
            if ancestors[rev] is None:
                for prev in clparents(rev):
                    ancestors[prev] = None

        if ancestors[badrev] is None:
            return badrev, None
        return badrev, ancestors

    good = 0
    badrev, ancestors = buildancestors(state['bad'], state['good'])
    if not ancestors: # looking for bad to good transition?
        good = 1
        badrev, ancestors = buildancestors(state['good'], state['bad'])
    bad = changelog.node(badrev)
    if not ancestors: # now we're confused
        raise util.Abort(_("Inconsistent state, %s:%s is good and bad")
                         % (badrev, short(bad)))

    # build children dict
    children = {}
    visit = [badrev]
    candidates = []
    while visit:
        rev = visit.pop(0)
        if ancestors[rev] == []:
            candidates.append(rev)
            for prev in clparents(rev):
                if prev != -1:
                    if prev in children:
                        children[prev].append(rev)
                    else:
                        children[prev] = [rev]
                        visit.append(prev)

    candidates.sort()
    # have we narrowed it down to one entry?
    tot = len(candidates)
    if tot == 1:
        return (bad, 0, good)
    perfect = tot / 2

    # find the best node to test
    best_rev = None
    best_len = -1
    poison = {}
    for rev in candidates:
        if rev in poison:
            for c in children.get(rev, []):
                poison[c] = True # poison children
            continue

        a = ancestors[rev] or [rev]
        ancestors[rev] = None

        x = len(a) # number of ancestors
        y = tot - x # number of non-ancestors
        value = min(x, y) # how good is this test?
        if value > best_len and rev not in skip:
            best_len = value
            best_rev = rev
            if value == perfect: # found a perfect candidate? quit early
                break

        if y < perfect: # all downhill from here?
            for c in children.get(rev, []):
                poison[c] = True # poison children
            continue

        for c in children.get(rev, []):
            if ancestors[c]:
                ancestors[c] = dict.fromkeys(ancestors[c] + a).keys()
            else:
                ancestors[c] = a + [c]

    assert best_rev is not None
    best_node = changelog.node(best_rev)

    return (best_node, tot, good)
Beispiel #43
0
 def __init__(self, name, index, message):
     self.name = name
     if isinstance(name, str) and len(name) == 20:
         name = short(name)
     RevlogError.__init__(self, _('%s@%s: %s') % (index, name, message))
Beispiel #44
0
def bisect(changelog, state):
    """find the next node (if any) for testing during a bisect search.
    returns a (nodes, number, good) tuple.

    'nodes' is the final result of the bisect if 'number' is 0.
    Otherwise 'number' indicates the remaining possible candidates for
    the search and 'nodes' contains the next bisect target.
    'good' is True if bisect is searching for a first good changeset, False
    if searching for a first bad one.
    """

    clparents = changelog.parentrevs
    skip = dict.fromkeys([changelog.rev(n) for n in state['skip']])

    def buildancestors(bad, good):
        # only the earliest bad revision matters
        badrev = min([changelog.rev(n) for n in bad])
        goodrevs = [changelog.rev(n) for n in good]
        # build ancestors array
        ancestors = [[]] * (changelog.count() + 1)  # an extra for [-1]

        # clear good revs from array
        for node in goodrevs:
            ancestors[node] = None
        for rev in xrange(changelog.count(), -1, -1):
            if ancestors[rev] is None:
                for prev in clparents(rev):
                    ancestors[prev] = None

        if ancestors[badrev] is None:
            return badrev, None
        return badrev, ancestors

    good = 0
    badrev, ancestors = buildancestors(state['bad'], state['good'])
    if not ancestors:  # looking for bad to good transition?
        good = 1
        badrev, ancestors = buildancestors(state['good'], state['bad'])
    bad = changelog.node(badrev)
    if not ancestors:  # now we're confused
        raise util.Abort(
            _("Inconsistent state, %s:%s is good and bad") %
            (badrev, short(bad)))

    # build children dict
    children = {}
    visit = [badrev]
    candidates = []
    while visit:
        rev = visit.pop(0)
        if ancestors[rev] == []:
            candidates.append(rev)
            for prev in clparents(rev):
                if prev != -1:
                    if prev in children:
                        children[prev].append(rev)
                    else:
                        children[prev] = [rev]
                        visit.append(prev)

    candidates.sort()
    # have we narrowed it down to one entry?
    # or have all other possible candidates besides 'bad' have been skipped?
    tot = len(candidates)
    unskipped = [c for c in candidates if (c not in skip) and (c != badrev)]
    if tot == 1 or not unskipped:
        return ([changelog.node(rev) for rev in candidates], 0, good)
    perfect = tot / 2

    # find the best node to test
    best_rev = None
    best_len = -1
    poison = {}
    for rev in candidates:
        if rev in poison:
            for c in children.get(rev, []):
                poison[c] = True  # poison children
            continue

        a = ancestors[rev] or [rev]
        ancestors[rev] = None

        x = len(a)  # number of ancestors
        y = tot - x  # number of non-ancestors
        value = min(x, y)  # how good is this test?
        if value > best_len and rev not in skip:
            best_len = value
            best_rev = rev
            if value == perfect:  # found a perfect candidate? quit early
                break

        if y < perfect:  # all downhill from here?
            for c in children.get(rev, []):
                poison[c] = True  # poison children
            continue

        for c in children.get(rev, []):
            if ancestors[c]:
                ancestors[c] = dict.fromkeys(ancestors[c] + a).keys()
            else:
                ancestors[c] = a + [c]

    assert best_rev is not None
    best_node = changelog.node(best_rev)

    return ([best_node], tot, good)
def checkheads(repo,
               remote,
               outgoing,
               remoteheads,
               newbranch=False,
               inc=False,
               newbookmarks=[]):
    """Check that a push won't add any outgoing head

    raise Abort error and display ui message as needed.
    """
    # Check for each named branch if we're creating new remote heads.
    # To be a remote head after push, node must be either:
    # - unknown locally
    # - a local outgoing head descended from update
    # - a remote head that's known locally and not
    #   ancestral to an outgoing head
    if remoteheads == [nullid]:
        # remote is empty, nothing to check.
        return

    if remote.capable('branchmap'):
        headssum = _headssummary(repo, remote, outgoing)
    else:
        headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
    newbranches = [
        branch for branch, heads in headssum.iteritems() if heads[0] is None
    ]
    # 1. Check for new branches on the remote.
    if newbranches and not newbranch:  # new branch requires --new-branch
        branchnames = ', '.join(sorted(newbranches))
        raise util.Abort(_("push creates new remote branches: %s!") %
                         branchnames,
                         hint=_("use 'hg push --new-branch' to create"
                                " new remote branches"))

    # 2. Compute newly pushed bookmarks. We don't warn about bookmarked heads.
    localbookmarks = repo._bookmarks
    remotebookmarks = remote.listkeys('bookmarks')
    bookmarkedheads = set()
    for bm in localbookmarks:
        rnode = remotebookmarks.get(bm)
        if rnode and rnode in repo:
            lctx, rctx = repo[bm], repo[rnode]
            if bookmarks.validdest(repo, rctx, lctx):
                bookmarkedheads.add(lctx.node())
        else:
            if bm in newbookmarks:
                bookmarkedheads.add(repo[bm].node())

    # 3. Check for new heads.
    # If there are more heads after the push than before, a suitable
    # error message, depending on unsynced status, is displayed.
    error = None
    allmissing = set(outgoing.missing)
    allfuturecommon = set(c.node() for c in repo.set('%ld', outgoing.common))
    allfuturecommon.update(allmissing)
    for branch, heads in sorted(headssum.iteritems()):
        remoteheads, newheads, unsyncedheads = heads
        candidate_newhs = set(newheads)
        # add unsynced data
        if remoteheads is None:
            oldhs = set()
        else:
            oldhs = set(remoteheads)
        oldhs.update(unsyncedheads)
        candidate_newhs.update(unsyncedheads)
        dhs = None  # delta heads, the new heads on branch
        discardedheads = set()
        if repo.obsstore:
            # remove future heads which are actually obsoleted by another
            # pushed element:
            #
            # XXX as above, There are several cases this case does not handle
            # XXX properly
            #
            # (1) if <nh> is public, it won't be affected by obsolete marker
            #     and a new is created
            #
            # (2) if the new heads have ancestors which are not obsolete and
            #     not ancestors of any other heads we will have a new head too.
            #
            # These two cases will be easy to handle for known changeset but
            # much more tricky for unsynced changes.
            newhs = set()
            for nh in candidate_newhs:
                if nh in repo and repo[nh].phase() <= phases.public:
                    newhs.add(nh)
                else:
                    for suc in obsolete.allsuccessors(repo.obsstore, [nh]):
                        if suc != nh and suc in allfuturecommon:
                            discardedheads.add(nh)
                            break
                    else:
                        newhs.add(nh)
        else:
            newhs = candidate_newhs
        unsynced = sorted(h for h in unsyncedheads if h not in discardedheads)
        if unsynced:
            if None in unsynced:
                # old remote, no heads data
                heads = None
            elif len(unsynced) <= 4 or repo.ui.verbose:
                heads = ' '.join(short(h) for h in unsynced)
            else:
                heads = (' '.join(short(h) for h in unsynced[:4]) + ' ' +
                         _("and %s others") % (len(unsynced) - 4))
            if heads is None:
                repo.ui.status(
                    _("remote has heads that are "
                      "not known locally\n"))
            elif branch is None:
                repo.ui.status(
                    _("remote has heads that are "
                      "not known locally: %s\n") % heads)
            else:
                repo.ui.status(
                    _("remote has heads on branch '%s' that are "
                      "not known locally: %s\n") % (branch, heads))
        if remoteheads is None:
            if len(newhs) > 1:
                dhs = list(newhs)
                if error is None:
                    error = (_("push creates new branch '%s' "
                               "with multiple heads") % (branch))
                    hint = _("merge or"
                             " see \"hg help push\" for details about"
                             " pushing new heads")
        elif len(newhs) > len(oldhs):
            # remove bookmarked or existing remote heads from the new heads list
            dhs = sorted(newhs - bookmarkedheads - oldhs)
        if dhs:
            if error is None:
                if branch not in ('default', None):
                    error = _("push creates new remote head %s "
                              "on branch '%s'!") % (short(dhs[0]), branch)
                elif repo[dhs[0]].bookmarks():
                    error = _("push creates new remote head %s "
                              "with bookmark '%s'!") % (short(
                                  dhs[0]), repo[dhs[0]].bookmarks()[0])
                else:
                    error = _("push creates new remote head %s!") % short(
                        dhs[0])
                if unsyncedheads:
                    hint = _("pull and merge or"
                             " see \"hg help push\" for details about"
                             " pushing new heads")
                else:
                    hint = _("merge or"
                             " see \"hg help push\" for details about"
                             " pushing new heads")
            if branch is None:
                repo.ui.note(_("new remote heads:\n"))
            else:
                repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
            for h in dhs:
                repo.ui.note((" %s\n") % short(h))
    if error:
        raise util.Abort(error, hint=hint)
Beispiel #46
0
    def __init__(self, repo, changeid=''):
        """changeid is a revision number, node, or tag"""

        # since basectx.__new__ already took care of copying the object, we
        # don't need to do anything in __init__, so we just exit here
        if isinstance(changeid, basectx):
            return

        if changeid == '':
            changeid = '.'
        self._repo = repo

        if isinstance(changeid, int):
            try:
                self._node = repo.changelog.node(changeid)
            except IndexError:
                raise error.RepoLookupError(
                    _("unknown revision '%s'") % changeid)
            self._rev = changeid
            return
        if isinstance(changeid, long):
            changeid = str(changeid)
        if changeid == '.':
            self._node = repo.dirstate.p1()
            self._rev = repo.changelog.rev(self._node)
            return
        if changeid == 'null':
            self._node = nullid
            self._rev = nullrev
            return
        if changeid == 'tip':
            self._node = repo.changelog.tip()
            self._rev = repo.changelog.rev(self._node)
            return
        if len(changeid) == 20:
            try:
                self._node = changeid
                self._rev = repo.changelog.rev(changeid)
                return
            except LookupError:
                pass

        try:
            r = int(changeid)
            if str(r) != changeid:
                raise ValueError
            l = len(repo.changelog)
            if r < 0:
                r += l
            if r < 0 or r >= l:
                raise ValueError
            self._rev = r
            self._node = repo.changelog.node(r)
            return
        except (ValueError, OverflowError, IndexError):
            pass

        if len(changeid) == 40:
            try:
                self._node = bin(changeid)
                self._rev = repo.changelog.rev(self._node)
                return
            except (TypeError, LookupError):
                pass

        if changeid in repo._bookmarks:
            self._node = repo._bookmarks[changeid]
            self._rev = repo.changelog.rev(self._node)
            return
        if changeid in repo._tagscache.tags:
            self._node = repo._tagscache.tags[changeid]
            self._rev = repo.changelog.rev(self._node)
            return
        try:
            self._node = repo.branchtip(changeid)
            self._rev = repo.changelog.rev(self._node)
            return
        except error.RepoLookupError:
            pass

        self._node = repo.changelog._partialmatch(changeid)
        if self._node is not None:
            self._rev = repo.changelog.rev(self._node)
            return

        # lookup failed
        # check if it might have come from damaged dirstate
        #
        # XXX we could avoid the unfiltered if we had a recognizable exception
        # for filtered changeset access
        if changeid in repo.unfiltered().dirstate.parents():
            raise error.Abort(_("working directory has unknown parent '%s'!")
                              % short(changeid))
        try:
            if len(changeid) == 20:
                changeid = hex(changeid)
        except TypeError:
            pass
        raise error.RepoLookupError(
            _("unknown revision '%s'") % changeid)
Beispiel #47
0
 def __str__(self):
     return short(self.node())
Beispiel #48
0
    total = len(repo)
    for i in repo:
        ui.progress(_('checking'), i, total=total, unit=_('changesets'))
        n = cl.node(i)
        checkentry(cl, i, n, seen, [i], "changelog")

        try:
            changes = cl.read(n)
            if changes[0] != nullid:
                mflinkrevs.setdefault(changes[0], []).append(i)
                refersmf = True
            for f in changes[3]:
                filelinkrevs.setdefault(_normpath(f), []).append(i)
        except Exception, inst:
            refersmf = True
            exc(i, _("unpacking changeset %s") % short(n), inst)
    ui.progress(_('checking'), None)

    ui.status(_("checking manifests\n"))
    seen = {}
    if refersmf:
        # Do not check manifest if there are only changelog entries with
        # null manifests.
        checklog(mf, "manifest", 0)
    total = len(mf)
    for i in mf:
        ui.progress(_('checking'), i, total=total, unit=_('manifests'))
        n = mf.node(i)
        lr = checkentry(mf, i, n, seen, mflinkrevs.get(n, []), "manifest")
        if n in mflinkrevs:
            del mflinkrevs[n]
Beispiel #49
0
    def __init__(self, repo, changeid=''):
        """changeid is a revision number, node, or tag"""
        if changeid == '':
            changeid = '.'
        self._repo = repo

        if isinstance(changeid, int):
            self._rev = changeid
            self._node = repo.changelog.node(changeid)
            return
        if isinstance(changeid, long):
            changeid = str(changeid)
        if changeid == '.':
            self._node = repo.dirstate.p1()
            self._rev = repo.changelog.rev(self._node)
            return
        if changeid == 'null':
            self._node = nullid
            self._rev = nullrev
            return
        if changeid == 'tip':
            self._rev = len(repo.changelog) - 1
            self._node = repo.changelog.node(self._rev)
            return
        if len(changeid) == 20:
            try:
                self._node = changeid
                self._rev = repo.changelog.rev(changeid)
                return
            except LookupError:
                pass

        try:
            r = int(changeid)
            if str(r) != changeid:
                raise ValueError
            l = len(repo.changelog)
            if r < 0:
                r += l
            if r < 0 or r >= l:
                raise ValueError
            self._rev = r
            self._node = repo.changelog.node(r)
            return
        except (ValueError, OverflowError):
            pass

        if len(changeid) == 40:
            try:
                self._node = bin(changeid)
                self._rev = repo.changelog.rev(self._node)
                return
            except (TypeError, LookupError):
                pass

        if changeid in repo._bookmarks:
            self._node = repo._bookmarks[changeid]
            self._rev = repo.changelog.rev(self._node)
            return
        if changeid in repo._tagscache.tags:
            self._node = repo._tagscache.tags[changeid]
            self._rev = repo.changelog.rev(self._node)
            return
        if changeid in repo.branchtags():
            self._node = repo.branchtags()[changeid]
            self._rev = repo.changelog.rev(self._node)
            return

        self._node = repo.changelog._partialmatch(changeid)
        if self._node is not None:
            self._rev = repo.changelog.rev(self._node)
            return

        # lookup failed
        # check if it might have come from damaged dirstate
        if changeid in repo.dirstate.parents():
            raise error.Abort(
                _("working directory has unknown parent '%s'!") %
                short(changeid))
        try:
            if len(changeid) == 20:
                changeid = hex(changeid)
        except TypeError:
            pass
        raise error.RepoLookupError(_("unknown revision '%s'") % changeid)
Beispiel #50
0
 def __init__(self, name, index, message):
     self.name = name
     if isinstance(name, str) and len(name) == 20:
         from node import short
         name = short(name)
     RevlogError.__init__(self, '%s@%s: %s' % (index, name, message))
Beispiel #51
0
 def __str__(self):
     return "%s@%s" % (self.path(), short(self.node()))
 def __str__(self):
     return "%s@%s" % (self.path(), short(self.node()))
Beispiel #53
0
def filemerge(repo, mynode, orig, fcd, fco, fca):
    """perform a 3-way merge in the working directory

    mynode = parent node before merge
    orig = original local filename before merge
    fco = other file context
    fca = ancestor file context
    fcd = local file context for current/destination file
    """

    def temp(prefix, ctx):
        pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
        (fd, name) = tempfile.mkstemp(prefix=pre)
        data = repo.wwritedata(ctx.path(), ctx.data())
        f = os.fdopen(fd, "wb")
        f.write(data)
        f.close()
        return name

    def isbin(ctx):
        try:
            return util.binary(ctx.data())
        except IOError:
            return False

    if not fco.cmp(fcd.data()): # files identical?
        return None

    ui = repo.ui
    fd = fcd.path()
    binary = isbin(fcd) or isbin(fco) or isbin(fca)
    symlink = 'l' in fcd.flags() + fco.flags()
    tool, toolpath = _picktool(repo, ui, fd, binary, symlink)
    ui.debug(_("picked tool '%s' for %s (binary %s symlink %s)\n") %
               (tool, fd, binary, symlink))

    if not tool or tool == 'internal:prompt':
        tool = "internal:local"
        if ui.prompt(_(" no tool found to merge %s\n"
                       "keep (l)ocal or take (o)ther?") % fd,
                     (_("&Local"), _("&Other")), _("l")) != _("l"):
            tool = "internal:other"
    if tool == "internal:local":
        return 0
    if tool == "internal:other":
        repo.wwrite(fd, fco.data(), fco.flags())
        return 0
    if tool == "internal:fail":
        return 1

    # do the actual merge
    a = repo.wjoin(fd)
    b = temp("base", fca)
    c = temp("other", fco)
    out = ""
    back = a + ".orig"
    util.copyfile(a, back)

    if orig != fco.path():
        ui.status(_("merging %s and %s to %s\n") % (orig, fco.path(), fd))
    else:
        ui.status(_("merging %s\n") % fd)

    ui.debug(_("my %s other %s ancestor %s\n") % (fcd, fco, fca))

    # do we attempt to simplemerge first?
    if _toolbool(ui, tool, "premerge", not (binary or symlink)):
        r = simplemerge.simplemerge(ui, a, b, c, quiet=True)
        if not r:
            ui.debug(_(" premerge successful\n"))
            os.unlink(back)
            os.unlink(b)
            os.unlink(c)
            return 0
        util.copyfile(back, a) # restore from backup and try again

    env = dict(HG_FILE=fd,
               HG_MY_NODE=short(mynode),
               HG_OTHER_NODE=str(fco.changectx()),
               HG_MY_ISLINK='l' in fcd.flags(),
               HG_OTHER_ISLINK='l' in fco.flags(),
               HG_BASE_ISLINK='l' in fca.flags())

    if tool == "internal:merge":
        r = simplemerge.simplemerge(ui, a, b, c, label=['local', 'other'])
    elif tool == 'internal:dump':
        a = repo.wjoin(fd)
        util.copyfile(a, a + ".local")
        repo.wwrite(fd + ".other", fco.data(), fco.flags())
        repo.wwrite(fd + ".base", fca.data(), fca.flags())
        return 1 # unresolved
    else:
        args = _toolstr(ui, tool, "args", '$local $base $other')
        if "$output" in args:
            out, a = a, back # read input from backup, write to original
        replace = dict(local=a, base=b, other=c, output=out)
        args = re.sub("\$(local|base|other|output)",
                      lambda x: '"%s"' % replace[x.group()[1:]], args)
        r = util.system(toolpath + ' ' + args, cwd=repo.root, environ=env)

    if not r and _toolbool(ui, tool, "checkconflicts"):
        if re.match("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data()):
            r = 1

    if not r and _toolbool(ui, tool, "checkchanged"):
        if filecmp.cmp(repo.wjoin(fd), back):
            if ui.prompt(_(" output file %s appears unchanged\n"
                "was merge successful (yn)?") % fd,
                (_("&Yes"), _("&No")), _("n")) != _("y"):
                r = 1

    if _toolbool(ui, tool, "fixeol"):
        _matcheol(repo.wjoin(fd), back)

    if r:
        ui.warn(_("merging %s failed!\n") % fd)
    else:
        os.unlink(back)

    os.unlink(b)
    os.unlink(c)
    return r
Beispiel #54
0
def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
    """Check that a push won't add any outgoing head

    raise Abort error and display ui message as needed.
    """
    # Check for each named branch if we're creating new remote heads.
    # To be a remote head after push, node must be either:
    # - unknown locally
    # - a local outgoing head descended from update
    # - a remote head that's known locally and not
    #   ancestral to an outgoing head
    if remoteheads == [nullid]:
        # remote is empty, nothing to check.
        return

    if remote.capable('branchmap'):
        headssum = _headssummary(repo, remote, outgoing)
    else:
        headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
    newbranches = [branch for branch, heads in headssum.iteritems()
                   if heads[0] is None]
    # 1. Check for new branches on the remote.
    if newbranches and not newbranch:  # new branch requires --new-branch
        branchnames = ', '.join(sorted(newbranches))
        raise util.Abort(_("push creates new remote branches: %s!")
                           % branchnames,
                         hint=_("use 'hg push --new-branch' to create"
                                " new remote branches"))

    # 2 compute newly pushed bookmarks. We
    # we don't warned about bookmarked heads.
    localbookmarks = repo._bookmarks
    remotebookmarks = remote.listkeys('bookmarks')
    bookmarkedheads = set()
    for bm in localbookmarks:
        rnode = remotebookmarks.get(bm)
        if rnode and rnode in repo:
            lctx, rctx = repo[bm], repo[rnode]
            if bookmarks.validdest(repo, rctx, lctx):
                bookmarkedheads.add(lctx.node())

    # 3. Check for new heads.
    # If there are more heads after the push than before, a suitable
    # error message, depending on unsynced status, is displayed.
    error = None
    unsynced = False
    allmissing = set(outgoing.missing)
    allfuturecommon = set(c.node() for c in repo.set('%ld', outgoing.common))
    allfuturecommon.update(allmissing)
    for branch, heads in sorted(headssum.iteritems()):
        if heads[0] is None:
            # Maybe we should abort if we push more that one head
            # for new branches ?
            continue
        candidate_newhs = set(heads[1])
        # add unsynced data
        oldhs = set(heads[0])
        oldhs.update(heads[2])
        candidate_newhs.update(heads[2])
        dhs = None
        discardedheads = set()
        if repo.obsstore:
            # remove future heads which are actually obsolete by another
            # pushed element:
            #
            # XXX as above, There are several cases this case does not handle
            # XXX properly
            #
            # (1) if <nh> is public, it won't be affected by obsolete marker
            #     and a new is created
            #
            # (2) if the new heads have ancestors which are not obsolete and
            #     not ancestors of any other heads we will have a new head too.
            #
            # This two case will be easy to handle for know changeset but much
            # more tricky for unsynced changes.
            newhs = set()
            for nh in candidate_newhs:
                if nh in repo and repo[nh].phase() <= phases.public:
                    newhs.add(nh)
                else:
                    for suc in obsolete.allsuccessors(repo.obsstore, [nh]):
                        if suc != nh and suc in allfuturecommon:
                            discardedheads.add(nh)
                            break
                    else:
                        newhs.add(nh)
        else:
            newhs = candidate_newhs
        if [h for h in heads[2] if h not in discardedheads]:
            unsynced = True
        if len(newhs) > len(oldhs):
            # strip updates to existing remote heads from the new heads list
            dhs = sorted(newhs - bookmarkedheads - oldhs)
        if dhs:
            if error is None:
                if branch not in ('default', None):
                    error = _("push creates new remote head %s "
                              "on branch '%s'!") % (short(dhs[0]), branch)
                else:
                    error = _("push creates new remote head %s!"
                              ) % short(dhs[0])
                if heads[2]: # unsynced
                    hint = _("you should pull and merge or "
                             "use push -f to force")
                else:
                    hint = _("did you forget to merge? "
                             "use push -f to force")
            if branch is not None:
                repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
            for h in dhs:
                repo.ui.note(_("new remote head %s\n") % short(h))
    if error:
        raise util.Abort(error, hint=hint)

    # 6. Check for unsynced changes on involved branches.
    if unsynced:
        repo.ui.warn(_("note: unsynced remote changes!\n"))
Beispiel #55
0
def findcommonincoming(repo, remote, heads=None, force=False):
    """Return a tuple (common, fetch, heads) used to identify the common
    subset of nodes between repo and remote.

    "common" is a list of (at least) the heads of the common subset.
    "fetch" is a list of roots of the nodes that would be incoming, to be
      supplied to changegroupsubset.
    "heads" is either the supplied heads, or else the remote's heads.
    """

    knownnode = repo.changelog.hasnode
    search = []
    fetch = set()
    seen = set()
    seenbranch = set()
    base = set()

    if not heads:
        heads = remote.heads()

    if repo.changelog.tip() == nullid:
        base.add(nullid)
        if heads != [nullid]:
            return [nullid], [nullid], list(heads)
        return [nullid], [], heads

    # assume we're closer to the tip than the root
    # and start by examining the heads
    repo.ui.status(_("searching for changes\n"))

    unknown = []
    for h in heads:
        if not knownnode(h):
            unknown.append(h)
        else:
            base.add(h)

    if not unknown:
        return list(base), [], list(heads)

    req = set(unknown)
    reqcnt = 0

    # search through remote branches
    # a 'branch' here is a linear segment of history, with four parts:
    # head, root, first parent, second parent
    # (a branch always has two parents (or none) by definition)
    unknown = util.deque(remote.branches(unknown))
    while unknown:
        r = []
        while unknown:
            n = unknown.popleft()
            if n[0] in seen:
                continue

            repo.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
            if n[0] == nullid:  # found the end of the branch
                pass
            elif n in seenbranch:
                repo.ui.debug("branch already found\n")
                continue
            elif n[1] and knownnode(n[1]):  # do we know the base?
                repo.ui.debug("found incomplete branch %s:%s\n" %
                              (short(n[0]), short(n[1])))
                search.append(n[0:2])  # schedule branch range for scanning
                seenbranch.add(n)
            else:
                if n[1] not in seen and n[1] not in fetch:
                    if knownnode(n[2]) and knownnode(n[3]):
                        repo.ui.debug("found new changeset %s\n" % short(n[1]))
                        fetch.add(n[1])  # earliest unknown
                    for p in n[2:4]:
                        if knownnode(p):
                            base.add(p)  # latest known

                for p in n[2:4]:
                    if p not in req and not knownnode(p):
                        r.append(p)
                        req.add(p)
            seen.add(n[0])

        if r:
            reqcnt += 1
            repo.ui.progress(_('searching'), reqcnt, unit=_('queries'))
            repo.ui.debug("request %d: %s\n" %
                          (reqcnt, " ".join(map(short, r))))
            for p in xrange(0, len(r), 10):
                for b in remote.branches(r[p:p + 10]):
                    repo.ui.debug("received %s:%s\n" %
                                  (short(b[0]), short(b[1])))
                    unknown.append(b)

    # do binary search on the branches we found
    while search:
        newsearch = []
        reqcnt += 1
        repo.ui.progress(_('searching'), reqcnt, unit=_('queries'))
        for n, l in zip(search, remote.between(search)):
            l.append(n[1])
            p = n[0]
            f = 1
            for i in l:
                repo.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
                if knownnode(i):
                    if f <= 2:
                        repo.ui.debug("found new branch changeset %s\n" %
                                      short(p))
                        fetch.add(p)
                        base.add(i)
                    else:
                        repo.ui.debug("narrowed branch search to %s:%s\n" %
                                      (short(p), short(i)))
                        newsearch.append((p, i))
                    break
                p, f = i, f * 2
            search = newsearch

    # sanity check our fetch list
    for f in fetch:
        if knownnode(f):
            raise error.RepoError(_("already have changeset ") + short(f[:4]))

    base = list(base)
    if base == [nullid]:
        if force:
            repo.ui.warn(_("warning: repository is unrelated\n"))
        else:
            raise util.Abort(_("repository is unrelated"))

    repo.ui.debug("found new changesets starting at " +
                  " ".join([short(f) for f in fetch]) + "\n")

    repo.ui.progress(_('searching'), None)
    repo.ui.debug("%d total queries\n" % reqcnt)

    return base, list(fetch), heads
Beispiel #56
0
 def csmap(x):
     repo.ui.debug("add changeset %s\n" % short(x))
     return len(cl)
 def csmap(x):
     repo.ui.debug("add changeset %s\n" % short(x))
     return len(cl)
Beispiel #58
0
    def _show(self, rev, changenode, copies, props):
        '''show a single changeset or file revision'''
        log = self.repo.changelog
        if changenode is None:
            changenode = log.node(rev)
        elif not rev:
            rev = log.rev(changenode)

        if self.ui.quiet:
            self.ui.write("%d:%s\n" % (rev, short(changenode)))
            return

        changes = log.read(changenode)
        date = util.datestr(changes[2])
        extra = changes[5]
        branch = extra.get("branch")

        hexfunc = self.ui.debugflag and hex or short

        parents = [(p, hexfunc(log.node(p)))
                   for p in self._meaningful_parentrevs(log, rev)]

        self.ui.write(_("changeset:   %d:%s\n") % (rev, hexfunc(changenode)))

        # don't show the default branch name
        if branch != 'default':
            branch = util.tolocal(branch)
            self.ui.write(_("branch:      %s\n") % branch)
        for tag in self.repo.nodetags(changenode):
            self.ui.write(_("tag:         %s\n") % tag)
        for parent in parents:
            self.ui.write(_("parent:      %d:%s\n") % parent)

        if self.ui.debugflag:
            self.ui.write(
                _("manifest:    %d:%s\n") %
                (self.repo.manifest.rev(changes[0]), hex(changes[0])))
        self.ui.write(_("user:        %s\n") % changes[1])
        self.ui.write(_("date:        %s\n") % date)

        if self.ui.debugflag:
            files = self.repo.status(log.parents(changenode)[0],
                                     changenode)[:3]
            for key, value in zip(
                [_("files:"), _("files+:"),
                 _("files-:")], files):
                if value:
                    self.ui.write("%-12s %s\n" % (key, " ".join(value)))
        elif changes[3] and self.ui.verbose:
            self.ui.write(_("files:       %s\n") % " ".join(changes[3]))
        if copies and self.ui.verbose:
            copies = ['%s (%s)' % c for c in copies]
            self.ui.write(_("copies:      %s\n") % ' '.join(copies))

        if extra and self.ui.debugflag:
            extraitems = extra.items()
            extraitems.sort()
            for key, value in extraitems:
                self.ui.write(
                    _("extra:       %s=%s\n") %
                    (key, value.encode('string_escape')))

        description = changes[4].strip()
        if description:
            if self.ui.verbose:
                self.ui.write(_("description:\n"))
                self.ui.write(description)
                self.ui.write("\n\n")
            else:
                self.ui.write(
                    _("summary:     %s\n") % description.splitlines()[0])
        self.ui.write("\n")

        self.showpatch(changenode)
Beispiel #59
0
 def __init__(self, filename, node):
     from node import short
     RevlogError.__init__(self, '%s:%s' % (filename, short(node)))