def pathcopies(orig, x, y, match=None):
     func = lambda: orig(x, y, match=match)
     if x._node is not None and y._node is not None and not match:
         key = 'pathcopies:%s:%s' % (
                 node.hex(x._node), node.hex(y._node))
         return memoize(func, key, pathcopiesserializer, ui)
     return func()
Exemple #2
0
    def changeset(self, tmpl, ctx):
        n = ctx.node()
        showtags = self.showtag(tmpl, 'changesettag', n)
        parents = ctx.parents()
        p1 = parents[0].node()

        files = []
        parity = paritygen(self.stripecount)
        for f in ctx.files():
            files.append(tmpl("filenodelink",
                              node=hex(n), file=f,
                              parity=parity.next()))

        def diff(**map):
            yield self.diff(tmpl, p1, n, None)

        return tmpl('changeset',
                    diff=diff,
                    rev=ctx.rev(),
                    node=hex(n),
                    parent=self.siblings(parents),
                    child=self.siblings(ctx.children()),
                    changesettag=showtags,
                    author=ctx.user(),
                    desc=ctx.description(),
                    date=ctx.date(),
                    files=files,
                    archives=self.archivelist(hex(n)),
                    tags=self.nodetagsdict(n),
                    branch=self.nodebranchnodefault(ctx),
                    inbranch=self.nodeinbranch(ctx),
                    branches=self.nodebranchdict(ctx))
def fix_hgtags(ui, repo, head_hgtags, tagsmap):
    for tf in iter(tagsmap):
        ui.debug('fix_hgtags: tagsmap %s -> %s\n' % (tf, tagsmap[tf]))
    for old in iter(head_hgtags):
        new = map_recursive(tagsmap, old)
        ui.debug('fix_hgtags: head %s -> %s\n' % (old, new))
        merge.update(repo, repo[new].node(), False, False, False)
        tfile = open('.hgtags', 'wb')
        lines = StringIO.StringIO(head_hgtags[old])
        for line in lines:
            if not line:
                continue
            (nodehex, name) = line.split(" ", 1)
            name = name.strip()
            nhm = map_recursive(tagsmap, nodehex)
            ui.debug('fix_hgtags: hgtags write: %s %s\n' % (nhm, name))
            tfile.write('%s %s\n' % (nhm, name))
        lines.close()    
        tfile.close()
        wctx = repo[None]
        if '.hgtags' not in wctx:
            wctx.add(['.hgtags'])
        nrev = repo.commit(text="collapse tag fix")
        if nrev:
            nctx = repo[nrev]
            ui.debug(_('fix_hgtags: nctx rev %d node %r files %r\n') % 
                     (nctx.rev(), hex(nctx.node()), nctx.files()))
            ui.debug(_('fix_hgtags: nctx parents %r\n') % 
                      ([hex(p.node()) for p in nctx.parents()]))
        else:
            ui.debug(_('fix_hgtags: nctx: None\n'))
Exemple #4
0
        def add(entry, f, isdest):
            seen.add(f)
            h = entry[3]
            p = (entry[1] == "100755")
            s = (entry[1] == "120000")
            renamesource = (not isdest and entry[4][0] == 'R')

            if f == '.gitmodules':
                if skipsubmodules:
                    return

                subexists[0] = True
                if entry[4] == 'D' or renamesource:
                    subdeleted[0] = True
                    changes.append(('.hgsub', hex(nullid)))
                else:
                    changes.append(('.hgsub', ''))
            elif entry[1] == '160000' or entry[0] == ':160000':
                if not skipsubmodules:
                    subexists[0] = True
            else:
                if renamesource:
                    h = hex(nullid)
                self.modecache[(f, h)] = (p and "x") or (s and "l") or ""
                changes.append((f, h))
Exemple #5
0
    def puttags(self, tags):
        try:
            parentctx = self.repo[self.tagsbranch]
            tagparent = parentctx.node()
        except error.RepoError:
            parentctx = None
            tagparent = nullid

        try:
            oldlines = sorted(parentctx['.hgtags'].data().splitlines(True))
        except:
            oldlines = []

        newlines = sorted([("%s %s\n" % (tags[tag], tag)) for tag in tags])
        if newlines == oldlines:
            return None, None
        data = "".join(newlines)
        def getfilectx(repo, memctx, f):
            return context.memfilectx(f, data, False, False, None)

        self.ui.status(_("updating tags\n"))
        date = "%s 0" % int(time.mktime(time.gmtime()))
        extra = {'branch': self.tagsbranch}
        ctx = context.memctx(self.repo, (tagparent, None), "update tags",
                             [".hgtags"], getfilectx, "convert-repo", date,
                             extra)
        self.repo.commitctx(ctx)
        return hex(self.repo.changelog.tip()), hex(tagparent)
 def test_fresh_fetch_two_revs(self):
     repo = self._load_fixture_and_fetch('two_revs.svndump')
     self.assertEqual(node.hex(repo[0].node()),
                      '434ed487136c1b47c1e8f952edb4dc5a8e6328df')
     self.assertEqual(node.hex(repo['tip'].node()),
                      'c95251e0dd04697deee99b79cc407d7db76e6a5f')
     self.assertEqual(repo['tip'], repo[1])
Exemple #7
0
    def computenonoverlap(orig, repo, c1, c2, *args, **kwargs):
        u1, u2 = orig(repo, c1, c2, *args, **kwargs)
        if shallowrepo.requirement in repo.requirements:
            m1 = c1.manifest()
            m2 = c2.manifest()
            files = []

            sparsematch1 = repo.maybesparsematch(c1.rev())
            if sparsematch1:
                sparseu1 = []
                for f in u1:
                    if sparsematch1(f):
                        files.append((f, hex(m1[f])))
                        sparseu1.append(f)
                u1 = sparseu1

            sparsematch2 = repo.maybesparsematch(c2.rev())
            if sparsematch2:
                sparseu2 = []
                for f in u2:
                    if sparsematch2(f):
                        files.append((f, hex(m2[f])))
                        sparseu2.append(f)
                u2 = sparseu2

            # batch fetch the needed files from the server
            repo.fileservice.prefetch(files)
        return u1, u2
Exemple #8
0
 def export_hg_tags(self):
     for tag, sha in self.repo.tags().iteritems():
         # git doesn't like spaces in tag names
         tag = tag.replace(" ", "_")
         if self.repo.tagtype(tag) in ('global', 'git'):
             self.git.refs['refs/tags/' + tag] = self.map_git_get(hex(sha))
             self.tags[tag] = hex(sha)
 def test_push_to_default(self, commit=True):
     repo = self.repo
     old_tip = repo['tip'].node()
     expected_parent = repo['default'].node()
     def file_callback(repo, memctx, path):
         if path == 'adding_file':
             return context.memfilectx(path=path,
                                       data='foo',
                                       islink=False,
                                       isexec=False,
                                       copied=False)
         raise IOError(errno.EINVAL, 'Invalid operation: ' + path)
     ctx = context.memctx(repo,
                          (repo['default'].node(), node.nullid),
                          'automated test',
                          ['adding_file'],
                          file_callback,
                          'an_author',
                          '2008-10-07 20:59:48 -0500',
                          {'branch': 'default',})
     new_hash = repo.commitctx(ctx)
     if not commit:
         return # some tests use this test as an extended setup.
     hg.update(repo, repo['tip'].node())
     self.pushrevisions()
     tip = self.repo['tip']
     self.assertNotEqual(tip.node(), old_tip)
     self.assertEqual(node.hex(tip.parents()[0].node()),
                      node.hex(expected_parent))
     self.assertEqual(tip['adding_file'].data(), 'foo')
     self.assertEqual(tip.branch(), 'default')
Exemple #10
0
 def save(cls, repo, name, stripnodes):
     fp = repo.opener(cls._filename, 'wb')
     fp.write('%i\n' % cls._version)
     fp.write('%s\n' % name)
     fp.write('%s\n' % ' '.join([hex(p) for p in repo.dirstate.parents()]))
     fp.write('%s\n' % ' '.join([hex(n) for n in stripnodes]))
     fp.close()
def writefirefoxtrees(repo):
    """Write the firefoxtrees node mapping to the filesystem."""
    lines = []
    trees = {}
    for tree, node in sorted(repo.firefoxtrees.items()):
        assert len(node) == 20
        lines.append('%s %s' % (tree, hex(node)))
        trees[tree] = hex(node)

    _firefoxtreesrepo(repo).vfs.write('firefoxtrees', '\n'.join(lines))

    # Old versions of firefoxtrees stored labels in the localtags file. Since
    # this file is read by Mercurial and has no relevance to us any more, we
    # prune relevant entries from this file so the data isn't redundant with
    # what we now write.
    localtags = repo.opener.tryread('localtags')
    havedata = len(localtags) > 0
    taglines  = []
    for line in localtags.splitlines():
        line = line.strip()
        node, tag = line.split()
        tree, uri = resolve_trees_to_uris([tag])[0]
        if not uri:
            taglines.append(line)

    if havedata:
        repo.vfs.write('localtags', '\n'.join(taglines))
Exemple #12
0
    def committags(self, rev, endbranches):
        if not self.addedtags and not self.deletedtags:
            return
        date = self.fixdate(rev.date)
        # determine additions/deletions per branch
        branches = {}
        for tags in (self.addedtags, self.deletedtags):
            for tag, (branch, srcrev) in tags.iteritems():
                op = srcrev is None and 'rm' or 'add'
                branches.setdefault(branch, []).append((op, tag, srcrev))

        for b, tags in branches.iteritems():
            fromtag = self.get_path_tag(self.remotename(b))
            # modify parent's .hgtags source
            parent = self.repo[self.get_parent_revision(rev.revnum, b)]
            if '.hgtags' not in parent:
                src = ''
            else:
                src = parent['.hgtags'].data()
            for op, tag, r in sorted(tags, reverse=True):
                if op == 'add':
                    if fromtag:
                        if fromtag in self.tags:
                            tagged = node.hex(self.tags[fromtag])
                    else:
                        tagged = node.hex(self.revmap[
                            self.get_parent_svn_branch_and_rev(r, b)])
                else:
                    tagged = node.hex(node.nullid)
                src += '%s %s\n' % (tagged, tag)
                self.tags[tag] = node.bin(tagged), rev.revnum

            # add new changeset containing updated .hgtags
            def fctxfun(repo, memctx, path):
                return context.memfilectx(path='.hgtags', data=src,
                                          islink=False, isexec=False,
                                          copied=None)

            extra = self.genextra(rev.revnum, b)
            if fromtag:
                extra['branch'] = parent.extra().get('branch', 'default')
            self.mapbranch(extra, b in endbranches or fromtag)

            ctx = context.memctx(self.repo,
                                 (parent.node(), node.nullid),
                                 rev.message or ' ',
                                 ['.hgtags'],
                                 fctxfun,
                                 self.authors[rev.author],
                                 date,
                                 extra)
            new = self.repo.commitctx(ctx)

            if not fromtag and (rev.revnum, b) not in self.revmap:
                self.revmap[rev.revnum, b] = new
            if b in endbranches:
                endbranches.pop(b)
                bname = b or 'default'
                self.ui.status('Marked branch %s as closed.\n' % bname)
Exemple #13
0
 def __str__(self):
     """String representation for storage"""
     time = ' '.join(map(str, self.timestamp))
     oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes])
     newhashes = ','.join([node.hex(hash) for hash in self.newhashes])
     return '\n'.join((
         time, self.user, self.command, self.namespace, self.name,
         oldhashes, newhashes))
Exemple #14
0
 def runhooks():
     args = hookargs.copy()
     args['node'] = hex(added[0])
     op.repo.hook("changegroup", **args)
     for n in added:
         args = hookargs.copy()
         args['node'] = hex(n)
         op.repo.hook("incoming", **args)
def makecollapsed(ui, repo, parent, revs, branch, tagsmap, parent_hgtags, 
                  movelog, opts):
    'Creates the collapsed revision on top of parent'

    last = max(revs)
    ui.debug(_('updating to revision %d\n') % parent)
    merge.update(repo, parent.node(), False, False, False)
    ui.debug(_('reverting to revision %d\n') % last)
    recreaterev(ui, repo, last)
    repo.dirstate.setbranch(branch)
    msg = ''
    nodelist = []
    if opts['message'] != "" :
        msg = opts['message']
    else:
        first = True
        for r in revs:
            nodelist.append(hex(repo[r].node()))
            if repo[r].files() != ['.hgtags']:
                if not first:
                    if opts['changelog']:
                        msg += '\n'
                    else:
                        msg += '----------------\n'
                first = False
                if opts['changelog']:
                    msg += "* " + ' '.join(repo[r].files()) + ":\n"

                msg += repo[r].description() + "\n"

        msg += "\nHG: Enter commit message.  Lines beginning with 'HG:' are removed.\n"
        msg += "HG: Remove all lines to abort the collapse operation.\n"

        if ui.config('ui', 'interactive') != 'off':
            msg = ui.edit(msg, ui.username())

        pattern = re.compile("^HG:.*\n", re.MULTILINE);
        msg  = re.sub(pattern, "", msg).strip();

    if not msg:
        raise util.Abort(_('empty commit message, collapse won\'t proceed'))

    write_hgtags(parent_hgtags)
    newrev = repo.commit(
        text=msg,
        user=repo[last].user(),
        date=repo[last].date())

    ctx = repo[newrev]

    newhex = hex(ctx.node())
    for n in nodelist:
        ui.debug(_('makecollapsed %s -> %s\n' % (n, newhex))) 
        tagsmap[n] = newhex
        if movelog:
            movelog.write('coll %s -> %s\n' % (n, newhex))
        
    return ctx
def movedescendants(ui, repo, collapsed, tomove, movemap, tagsmap, 
                    parent_hgtags, movelog, debug_delay):
    'Moves the descendants of the source revisions to the collapsed revision'

    sorted_tomove = list(tomove)
    sorted_tomove.sort()

    for r in sorted_tomove:
        ui.debug(_('moving revision %r\n') % r)

        if debug_delay:
            ui.debug(_('sleep debug_delay: %r\n') % debug_delay)
            time.sleep(debug_delay)

        parents = [p.rev() for p in repo[r].parents()]
        nodehex = hex(repo[r].node())
        if repo[r].files() == ['.hgtags'] and len(parents) == 1:
            movemap[r] = movemap[parents[0]]
            phex = hex(repo[parents[0]].node())
            assert phex in tagsmap
            tagsmap[nodehex] = tagsmap[phex]
        else:
            if len(parents) == 1:
                ui.debug(_('setting parent to %d\n') 
                         % movemap[parents[0]].rev())
                repo.dirstate.setparents(movemap[parents[0]].node())
            else:
                ui.debug(_('setting parents to %d and %d\n') %
                    (map_or_rev(repo, movemap, parents[0]).rev(), 
                     map_or_rev(repo, movemap, parents[1]).rev()))
                repo.dirstate.setparents(map_or_rev(repo, movemap, 
                                                    parents[0]).node(),
                                         map_or_rev(repo, movemap, 
                                                    parents[1]).node())

            repo.dirstate.write()
            
            ui.debug(_('reverting to revision %d\n') % r)
            recreaterev(ui, repo, r)

            write_hgtags(parent_hgtags)
            newrev = repo.commit(text=repo[r].description(), 
                                 user=repo[r].user(), date=repo[r].date(),
                                 force=True)

            if newrev == None:
                raise util.Abort(_('no commit done: text=%r, user=%r, date=%r')
                                 % (repo[r].description(), repo[r].user(), 
                                    repo[r].date()))
                
            ctx = repo[newrev]
            movemap[r] = ctx

            newhex = hex(ctx.node())
            tagsmap[nodehex] = newhex
            ui.debug(_('movedescendants %s -> %s\n' % (nodehex, newhex)))
            if movelog:
                movelog.write('move %s -> %s\n' % (nodehex, newhex))
    def generatefiles(self, changedfiles, linknodes, commonrevs, source):
        if requirement in self._repo.requirements:
            repo = self._repo
            if isinstance(repo, bundlerepo.bundlerepository):
                # If the bundle contains filelogs, we can't pull from it, since
                # bundlerepo is heavily tied to revlogs. Instead require that
                # the user use unbundle instead.
                # Force load the filelog data.
                bundlerepo.bundlerepository.file(repo, 'foo')
                if repo._cgfilespos:
                    raise error.Abort("cannot pull from full bundles",
                                      hint="use `hg unbundle` instead")
                return []
            filestosend = self.shouldaddfilegroups(source)
            if filestosend == NoFiles:
                changedfiles = list([f for f in changedfiles
                                     if not repo.shallowmatch(f)])
            else:
                files = []
                # Prefetch the revisions being bundled
                for i, fname in enumerate(sorted(changedfiles)):
                    filerevlog = repo.file(fname)
                    linkrevnodes = linknodes(filerevlog, fname)
                    # Normally we'd prune the linkrevnodes first,
                    # but that would perform the server fetches one by one.
                    for fnode, cnode in list(linkrevnodes.iteritems()):
                        # Adjust linknodes so remote file revisions aren't sent
                        if filestosend == LocalFiles:
                            localkey = fileserverclient.getlocalkey(fname,
                                                                    hex(fnode))
                            localpath = repo.sjoin(os.path.join("data",
                                                                localkey))
                            if (not os.path.exists(localpath)
                                and repo.shallowmatch(fname)):
                                del linkrevnodes[fnode]
                            else:
                                files.append((fname, hex(fnode)))
                        else:
                            files.append((fname, hex(fnode)))

                repo.fileservice.prefetch(files)

                # Prefetch the revisions that are going to be diffed against
                prevfiles = []
                for fname, fnode in files:
                    if repo.shallowmatch(fname):
                        fnode = bin(fnode)
                        filerevlog = repo.file(fname)
                        ancestormap = filerevlog.ancestormap(fnode)
                        p1, p2, linknode, copyfrom = ancestormap[fnode]
                        if p1 != nullid:
                            prevfiles.append((copyfrom or fname, hex(p1)))

                repo.fileservice.prefetch(prevfiles)

        return super(shallowcg1packer, self).generatefiles(changedfiles,
                     linknodes, commonrevs, source)
Exemple #18
0
 def save(cls, repo, name, originalwctx, pendingctx, stripnodes):
     fp = repo.vfs(cls._filename, 'wb')
     fp.write('%i\n' % cls._version)
     fp.write('%s\n' % name)
     fp.write('%s\n' % hex(originalwctx.node()))
     fp.write('%s\n' % hex(pendingctx.node()))
     fp.write('%s\n' % ' '.join([hex(p) for p in repo.dirstate.parents()]))
     fp.write('%s\n' % ' '.join([hex(n) for n in stripnodes]))
     fp.close()
Exemple #19
0
 def test_oldest_not_trunk_and_tag_vendor_branch(self):
     repo = self._load_fixture_and_fetch(
         'tagged_vendor_and_oldest_not_trunk.svndump')
     self.assertEqual(node.hex(repo['oldest'].node()),
                      '926671740dec045077ab20f110c1595f935334fa')
     self.assertEqual(repo['tip'].parents()[0].parents()[0],
                      repo['oldest'])
     self.assertEqual(node.hex(repo['tip'].node()),
                      '1a6c3f30911d57abb67c257ec0df3e7bc44786f7')
def comparison(web, req, tmpl):
    ctx = webutil.changectx(web.repo, req)
    if 'file' not in req.form:
        raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
    path = webutil.cleanpath(web.repo, req.form['file'][0])
    rename = path in ctx and webutil.renamelink(ctx[path]) or []

    parsecontext = lambda v: v == 'full' and -1 or int(v)
    if 'context' in req.form:
        context = parsecontext(req.form['context'][0])
    else:
        context = parsecontext(web.config('web', 'comparisoncontext', '5'))

    def filelines(f):
        if util.binary(f.data()):
            mt = mimetypes.guess_type(f.path())[0]
            if not mt:
                mt = 'application/octet-stream'
            return [_('(binary file %s, hash: %s)') % (mt, hex(f.filenode()))]
        return f.data().splitlines()

    parent = ctx.p1()
    leftrev = parent.rev()
    leftnode = parent.node()
    rightrev = ctx.rev()
    rightnode = ctx.node()
    if path in ctx:
        fctx = ctx[path]
        rightlines = filelines(fctx)
        if path not in parent:
            leftlines = ()
        else:
            pfctx = parent[path]
            leftlines = filelines(pfctx)
    else:
        rightlines = ()
        fctx = ctx.parents()[0][path]
        leftlines = filelines(fctx)

    comparison = webutil.compare(tmpl, context, leftlines, rightlines)
    return tmpl('filecomparison',
                file=path,
                node=hex(ctx.node()),
                rev=ctx.rev(),
                date=ctx.date(),
                desc=ctx.description(),
                extra=ctx.extra(),
                author=ctx.user(),
                rename=rename,
                branch=webutil.nodebranchnodefault(ctx),
                parent=webutil.parents(fctx),
                child=webutil.children(fctx),
                leftrev=leftrev,
                leftnode=hex(leftnode),
                rightrev=rightrev,
                rightnode=hex(rightnode),
                comparison=comparison)
Exemple #21
0
    def export_hg_commit(self, rev):
        self.ui.note(_("converting revision %s\n") % hex(rev))

        oldenc = self.swap_out_encoding()

        ctx = self.repo.changectx(rev)
        extra = ctx.extra()

        commit = Commit()

        (time, timezone) = ctx.date()
        commit.author = self.get_git_author(ctx)
        commit.author_time = int(time)
        commit.author_timezone = -timezone

        if 'committer' in extra:
            # fixup timezone
            (name, timestamp, timezone) = extra['committer'].rsplit(' ', 2)
            commit.committer = name
            commit.commit_time = timestamp

            # work around a timezone format change
            if int(timezone) % 60 != 0: #pragma: no cover
                timezone = parse_timezone(timezone)
                # Newer versions of Dulwich return a tuple here
                if isinstance(timezone, tuple):
                    timezone, neg_utc = timezone
                    commit._commit_timezone_neg_utc = neg_utc
            else:
                timezone = -int(timezone)
            commit.commit_timezone = timezone
        else:
            commit.committer = commit.author
            commit.commit_time = commit.author_time
            commit.commit_timezone = commit.author_timezone

        commit.parents = []
        for parent in self.get_git_parents(ctx):
            hgsha = hex(parent.node())
            git_sha = self.map_git_get(hgsha)
            if git_sha:
                commit.parents.append(git_sha)

        commit.message = self.get_git_message(ctx)

        if 'encoding' in extra:
            commit.encoding = extra['encoding']

        tree_sha = commit_tree(self.git.object_store, self.iterblobs(ctx))
        commit.tree = tree_sha

        self.git.object_store.add_object(commit)
        self.map_set(commit.id, ctx.hex())

        self.swap_out_encoding(oldenc)
        return commit.id
Exemple #22
0
def whereami(ui, repo, *args, **opts):
    """output the current working directory parents

    Outputs the hashes of current working directory parents separated
    by newline.
    """
    parents = repo.dirstate.parents()
    ui.status('%s\n' % hex(parents[0]))
    if parents[1] != nullid:
        ui.status('%s\n' % hex(parents[1]))
 def buildstatus(orig, self, other, status, match, ignored, clean, unknown):
     func = lambda: orig(self, other, status, match, ignored, clean, unknown)
     if not match.always():
         return func()
     if ignored or clean or unknown:
         return func()
     if self._node is None or other._node is None:
         return func()
     key = 'buildstatus:%s:%s' % (
             node.hex(self._node), node.hex(other._node))
     return memoize(func, key, buildstatusserializer, ui)
 def test_file_map(self, stupid=False):
     test_util.load_svndump_fixture(self.repo_path, 'replace_trunk_with_branch.svndump')
     filemap = open(self.filemap, 'w')
     filemap.write("include alpha\n")
     filemap.close()
     ui = self.ui(stupid)
     ui.setconfig('hgsubversion', 'filemap', self.filemap)
     commands.clone(ui, test_util.fileurl(self.repo_path),
                    self.wc_path, filemap=self.filemap)
     self.assertEqual(node.hex(self.repo[0].node()), '88e2c7492d83e4bf30fbb2dcbf6aa24d60ac688d')
     self.assertEqual(node.hex(self.repo['default'].node()), 'e524296152246b3837fe9503c83b727075835155')
 def test_file_map_exclude(self, stupid=False):
     test_util.load_svndump_fixture(self.repo_path, 'replace_trunk_with_branch.svndump')
     filemap = open(self.filemap, 'w')
     filemap.write("exclude alpha\n")
     filemap.close()
     ui = self.ui(stupid)
     ui.setconfig('hgsubversion', 'filemap', self.filemap)
     commands.clone(ui, test_util.fileurl(self.repo_path),
                    self.wc_path, filemap=self.filemap)
     self.assertEqual(node.hex(self.repo[0].node()), '2c48f3525926ab6c8b8424bcf5eb34b149b61841')
     self.assertEqual(node.hex(self.repo['default'].node()), 'b37a3c0297b71f989064d9b545b5a478bbed7cc1')
Exemple #26
0
 def export_hg_tags(self):
     for tag, sha in self.repo.tags().iteritems():
         if self.repo.tagtype(tag) in ('global', 'git'):
             tag = tag.replace(' ', '_')
             target = self.map_git_get(hex(sha))
             if target is not None:
                 self.git.refs['refs/tags/' + tag] = target
                 self.tags[tag] = hex(sha)
             else:
                 self.repo.ui.warn(
                     'Skipping export of tag %s because it '
                     'has no matching git revision.' % tag)
Exemple #27
0
 def getdiff(ui, repo, r, parent, opts):
     '''return diff for the specified revision'''
     output = ""
     if opts.get('git') or ui.configbool('diff', 'git'):
         # Git diffs don't include the revision numbers with each file, so
         # we have to put them in the header instead.
         output += "# Node ID " + node.hex(r.node()) + "\n"
         output += "# Parent  " + node.hex(parent.node()) + "\n"
     diffopts = patch.diffopts(ui, opts)
     for chunk in patch.diff(repo, parent.node(), r.node(), opts=diffopts):
         output += chunk
     return output
Exemple #28
0
 def test_two_branches_with_heads(self):
     repo = self._load_fixture_and_fetch('two_heads.svndump')
     self.assertEqual(node.hex(repo[0].node()),
                      '434ed487136c1b47c1e8f952edb4dc5a8e6328df')
     self.assertEqual(node.hex(repo['tip'].node()),
                      '1083037b18d85cd84fa211c5adbaeff0fea2cd9f')
     self.assertEqual(node.hex(repo['the_branch'].node()),
                      '4e256962fc5df545e2e0a51d0d1dc61c469127e6')
     self.assertEqual(node.hex(repo['the_branch'].parents()[0].node()),
                      'f1ff5b860f5dbb9a59ad0921a79da77f10f25109')
     self.assertEqual(len(repo['tip'].parents()), 1)
     self.assertEqual(repo['tip'], repo['default'])
     self.assertEqual(len(repo.heads()), 2)
Exemple #29
0
 def test_oldest_not_trunk_and_tag_vendor_branch(self):
     repo = test_util.load_fixture_and_fetch(
         'tagged_vendor_and_oldest_not_trunk.svndump',
         self.repo_path,
         self.wc_path,
         True)
     repo = hg.repository(ui.ui(), self.wc_path)
     self.assertEqual(node.hex(repo['oldest'].node()),
                      '926671740dec045077ab20f110c1595f935334fa')
     self.assertEqual(repo['tip'].parents()[0].parents()[0],
                      repo['oldest'])
     self.assertEqual(node.hex(repo['tip'].node()),
                      '1a6c3f30911d57abb67c257ec0df3e7bc44786f7')
Exemple #30
0
 def test_branches(self):
     repo = self._load_fixture_and_fetch('simple_branch.svndump')
     self.assertEqual(node.hex(repo[0].node()),
                      'a1ff9f5d90852ce7f8e607fa144066b0a06bdc57')
     self.assertEqual(node.hex(repo['tip'].node()),
                      '545e36ed13615e39c5c8fb0c325109d8cb8e00c3')
     self.assertEqual(len(repo['tip'].parents()), 1)
     self.assertEqual(repo['tip'].parents()[0], repo['default'])
     self.assertEqual(repo['tip'].extra()['convert_revision'],
                      'svn:3cd547df-371e-4add-bccf-aba732a2baf5/branches/the_branch@4')
     self.assertEqual(repo['default'].extra()['convert_revision'],
                      'svn:3cd547df-371e-4add-bccf-aba732a2baf5/trunk@3')
     self.assertEqual(len(repo.heads()), 1)
def exchangepullpushlog(orig, pullop):
    """This is called during pull to fetch pushlog data.

    The goal of this function is to replicate the entire pushlog. This is
    in contrast to replicating only the pushlog data for changesets the
    client has pulled. Put another way, this attempts complete replication
    as opposed to partial, hole-y replication.
    """
    # check stepsdone for future compatibility with bundle2 pushlog exchange.
    res = orig(pullop)

    if 'pushlog' in pullop.stepsdone or not pullop.remote.capable('pushlog'):
        return res

    pullop.stepsdone.add('pushlog')
    repo = pullop.repo
    urepo = repo.unfiltered()
    fetchfrom = repo.pushlog.lastpushid() + 1
    lines = pullop.remote._call('pushlog', firstpush=str(fetchfrom))
    lines = iter(lines.splitlines())

    statusline = next(lines)
    if statusline[0] == '0':
        raise Abort('remote error fetching pushlog: %s' % next(lines))
    elif statusline != '1':
        raise Abort('error fetching pushlog: unexpected response: %s\n' %
                    statusline)

    pushes = []
    for line in lines:
        pushid, who, when, nodes = line.split(' ', 3)
        nodes = [bin(n) for n in nodes.split()]

        # We stop processing if there is a reference to an unknown changeset.
        # This can happen in a few scenarios.
        #
        # Since the server streams *all* pushlog entries (from a starting
        # number), it could send pushlog entries for changesets the client
        # didn't request or were pushed since the client started pulling.
        #
        # If the remote repo contains obsolete changesets, we may see a
        # reference to a hidden changeset that was never transferred locally.
        #
        # The important thing we want to prevent is a reference to a locally
        # unknown changeset appearing in the pushlog.
        #
        # On hg.mo, there is a hack that transfers hidden changesets during
        # pulls. So when operating in mirror mode on that server, we should
        # never have locally unknown changesets.
        try:
            # Test against unfiltered repo so we can record entries for hidden
            # changesets.
            [urepo[n] for n in nodes]
        except error.RepoLookupError:
            missing = [hex(n) for n in nodes if n not in urepo]
            repo.ui.warn('received pushlog entry for unknown changeset %s; '
                         'ignoring\n' % ', '.join(missing))
            break

        pushes.append((int(pushid), who, int(when), nodes))

    repo.pushlog.recordpushes(pushes, tr=pullop.trmanager.transaction())
    repo.ui.status('added %d pushes\n' % len(pushes))

    return res
Exemple #32
0
    def putcommit(self, files, copies, parents, commit, source, revmap, full,
                  cleanp2):
        files = dict(files)

        def getfilectx(repo, memctx, f):
            if p2ctx and f in p2files and f not in copies:
                self.ui.debug('reusing %s from p2\n' % f)
                try:
                    return p2ctx[f]
                except error.ManifestLookupError:
                    # If the file doesn't exist in p2, then we're syncing a
                    # delete, so just return None.
                    return None
            try:
                v = files[f]
            except KeyError:
                return None
            data, mode = source.getfile(f, v)
            if data is None:
                return None
            if f == '.hgtags':
                data = self._rewritetags(source, revmap, data)
            if f == '.hgsubstate':
                data = self._rewritesubstate(source, data)
            return context.memfilectx(self.repo, f, data, 'l' in mode, 'x'
                                      in mode, copies.get(f))

        pl = []
        for p in parents:
            if p not in pl:
                pl.append(p)
        parents = pl
        nparents = len(parents)
        if self.filemapmode and nparents == 1:
            m1node = self.repo.changelog.read(nodemod.bin(parents[0]))[0]
            parent = parents[0]

        if len(parents) < 2:
            parents.append(nodemod.nullid)
        if len(parents) < 2:
            parents.append(nodemod.nullid)
        p2 = parents.pop(0)

        text = commit.desc

        sha1s = re.findall(sha1re, text)
        for sha1 in sha1s:
            oldrev = source.lookuprev(sha1)
            newrev = revmap.get(oldrev)
            if newrev is not None:
                text = text.replace(sha1, newrev[:len(sha1)])

        extra = commit.extra.copy()

        sourcename = self.repo.ui.config('convert', 'hg.sourcename')
        if sourcename:
            extra['convert_source'] = sourcename

        for label in ('source', 'transplant_source', 'rebase_source',
                      'intermediate-source'):
            node = extra.get(label)

            if node is None:
                continue

            # Only transplant stores its reference in binary
            if label == 'transplant_source':
                node = nodemod.hex(node)

            newrev = revmap.get(node)
            if newrev is not None:
                if label == 'transplant_source':
                    newrev = nodemod.bin(newrev)

                extra[label] = newrev

        if self.branchnames and commit.branch:
            extra['branch'] = commit.branch
        if commit.rev and commit.saverev:
            extra['convert_revision'] = commit.rev

        while parents:
            p1 = p2
            p2 = parents.pop(0)
            p1ctx = self.repo[p1]
            p2ctx = None
            if p2 != nodemod.nullid:
                p2ctx = self.repo[p2]
            fileset = set(files)
            if full:
                fileset.update(self.repo[p1])
                fileset.update(self.repo[p2])

            if p2ctx:
                p2files = set(cleanp2)
                for file in self._calculatemergedfiles(source, p1ctx, p2ctx):
                    p2files.add(file)
                    fileset.add(file)

            ctx = context.memctx(self.repo, (p1, p2), text, fileset,
                                 getfilectx, commit.author, commit.date, extra)

            # We won't know if the conversion changes the node until after the
            # commit, so copy the source's phase for now.
            self.repo.ui.setconfig('phases', 'new-commit',
                                   phases.phasenames[commit.phase], 'convert')

            with self.repo.transaction("convert") as tr:
                node = nodemod.hex(self.repo.commitctx(ctx))

                # If the node value has changed, but the phase is lower than
                # draft, set it back to draft since it hasn't been exposed
                # anywhere.
                if commit.rev != node:
                    ctx = self.repo[node]
                    if ctx.phase() < phases.draft:
                        phases.registernew(self.repo, tr, phases.draft,
                                           [ctx.node()])

            text = "(octopus merge fixup)\n"
            p2 = node

        if self.filemapmode and nparents == 1:
            man = self.repo.manifestlog._revlog
            mnode = self.repo.changelog.read(nodemod.bin(p2))[0]
            closed = 'close' in commit.extra
            if not closed and not man.cmp(m1node, man.revision(mnode)):
                self.ui.status(_("filtering out empty revision\n"))
                self.repo.rollback(force=True)
                return parent
        return p2
Exemple #33
0
 def branchlist(self):
     for br, n in self.hgrepo.branchtags().items():
         self.outs.write("%s %s\n" % (br, node.hex(n)))
     self.outs.write("\n")
     self.outs.flush()
Exemple #34
0
 def getheads(self):
     return [nodemod.hex(h) for h in self._heads if self.keep(h)]
Exemple #35
0
def manifest(web, req, tmpl):
    """
    /manifest[/{revision}[/{path}]]
    -------------------------------

    Show information about a directory.

    If the URL path arguments are omitted, information about the root
    directory for the ``tip`` changeset will be shown.

    Because this handler can only show information for directories, it
    is recommended to use the ``file`` handler instead, as it can handle both
    directories and files.

    The ``manifest`` template will be rendered for this handler.
    """
    if 'node' in req.form:
        ctx = webutil.changectx(web.repo, req)
        symrev = webutil.symrevorshortnode(req, ctx)
    else:
        ctx = web.repo['tip']
        symrev = 'tip'
    path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
    mf = ctx.manifest()
    node = ctx.node()

    files = {}
    dirs = {}
    parity = paritygen(web.stripecount)

    if path and path[-1] != "/":
        path += "/"
    l = len(path)
    abspath = "/" + path

    for full, n in mf.iteritems():
        # the virtual path (working copy path) used for the full
        # (repository) path
        f = decodepath(full)

        if f[:l] != path:
            continue
        remain = f[l:]
        elements = remain.split('/')
        if len(elements) == 1:
            files[remain] = full
        else:
            h = dirs  # need to retain ref to dirs (root)
            for elem in elements[0:-1]:
                if elem not in h:
                    h[elem] = {}
                h = h[elem]
                if len(h) > 1:
                    break
            h[None] = None  # denotes files present

    if mf and not files and not dirs:
        raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path)

    def filelist(**map):
        for f in sorted(files):
            full = files[f]

            fctx = ctx.filectx(full)
            yield {
                "file": full,
                "parity": parity.next(),
                "basename": f,
                "date": fctx.date(),
                "size": fctx.size(),
                "permissions": mf.flags(full)
            }

    def dirlist(**map):
        for d in sorted(dirs):

            emptydirs = []
            h = dirs[d]
            while isinstance(h, dict) and len(h) == 1:
                k, v = h.items()[0]
                if v:
                    emptydirs.append(k)
                h = v

            path = "%s%s" % (abspath, d)
            yield {
                "parity": parity.next(),
                "path": path,
                "emptydirs": "/".join(emptydirs),
                "basename": d
            }

    return tmpl("manifest",
                rev=ctx.rev(),
                symrev=symrev,
                node=hex(node),
                path=abspath,
                up=webutil.up(abspath),
                upparity=parity.next(),
                fentries=filelist,
                dentries=dirlist,
                archives=web.archivelist(hex(node)),
                tags=webutil.nodetagsdict(web.repo, node),
                bookmarks=webutil.nodebookmarksdict(web.repo, node),
                branch=webutil.nodebranchnodefault(ctx),
                inbranch=webutil.nodeinbranch(web.repo, ctx),
                branches=webutil.nodebranchdict(web.repo, ctx))
Exemple #36
0
def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
    # read config parameters
    baseurl = ui.config('hgbuildbot', 'baseurl',
                        ui.config('web', 'baseurl', ''))
    masters = ui.configlist('hgbuildbot', 'master')
    if masters:
        branchtype = ui.config('hgbuildbot', 'branchtype', 'inrepo')
        branch = ui.config('hgbuildbot', 'branch')
        fork = ui.configbool('hgbuildbot', 'fork', False)
        # notify also has this setting
        stripcount = int(ui.config('notify', 'strip') or ui.config('hgbuildbot', 'strip', 3))
        category = ui.config('hgbuildbot', 'category', None)
        project = ui.config('hgbuildbot', 'project', '')
        auth = ui.config('hgbuildbot', 'auth', None)
    else:
        ui.write("* You must add a [hgbuildbot] section to .hg/hgrc in "
                 "order to use buildbot hook\n")
        return

    if hooktype != "changegroup":
        ui.status("hgbuildbot: hooktype %s not supported.\n" % hooktype)
        return

    if fork:
        child_pid = os.fork()
        if child_pid == 0:
            # child
            pass
        else:
            # parent
            ui.status("Notifying buildbot...\n")
            return

    # only import inside the fork if forked
    from buildbot.clients import sendchange
    from twisted.internet import defer, reactor

    if branch is None:
        if branchtype == 'dirname':
            branch = os.path.basename(repo.root)

    if not auth:
        auth = 'change:changepw'
    auth = auth.split(':', 1)

    # process changesets
    def _send(res, s, c):
        if not fork:
            ui.status("rev %s sent\n" % c['revision'])
        return s.send(c['branch'], c['revision'], c['comments'],
                      c['files'], c['username'], category=category,
                      repository=repository, project=project, vc='hg',
                      properties=c['properties'])

    try:    # first try Mercurial 1.1+ api
        start = repo[node].rev()
        end = len(repo)
    except TypeError:   # else fall back to old api
        start = repo.changelog.rev(bin(node))
        end = repo.changelog.count()

    repository = strip(repo.root, stripcount)
    repository = baseurl + repository

    for master in masters:
        s = sendchange.Sender(master, auth=auth)
        d = defer.Deferred()
        reactor.callLater(0, d.callback, None)

        for rev in xrange(start, end):
            # send changeset
            node = repo.changelog.node(rev)
            manifest, user, (time, timezone), files, desc, extra = repo.changelog.read(node)
            parents = filter(lambda p: not p == nullid, repo.changelog.parents(node))
            if branchtype == 'inrepo':
                branch = extra['branch']
            is_merge = len(parents) > 1
            # merges don't always contain files, but at least one file is required by buildbot
            if is_merge and not files:
                files = ["merge"]
            properties = {'is_merge': is_merge}
            if branch:
                branch = fromlocal(branch)
            change = {
                'master': master,
                'username': fromlocal(user),
                'revision': hex(node),
                'comments': fromlocal(desc),
                'files': files,
                'branch': branch,
                'properties': properties
            }
            d.addCallback(_send, s, change)

    def _printSuccess(res):
        ui.status(s.getSuccessString(res) + '\n')

    def _printFailure(why):
        ui.warn(s.getFailureString(why) + '\n')

    d.addCallbacks(_printSuccess, _printFailure)
    d.addBoth(lambda _: reactor.stop())
    reactor.run()

    if fork:
        os._exit(os.EX_OK)
    else:
        return
def doreview(repo, ui, remote, nodes):
    """Do the work of submitting a review to a remote repo.

    :remote is a peerrepository.
    :nodes is a list of nodes to review.
    """
    assert nodes
    assert 'pushreview' in getreviewcaps(remote)

    # Ensure a color for ui.warning is defined.
    try:
        color = extensions.find('color')
        if 'ui.warning' not in color._styles:
            color._styles['ui.warning'] = 'red'
    except Exception:
        pass

    bzauth = getbugzillaauth(ui)
    if not bzauth:
        ui.warn(_('Bugzilla credentials not available. Not submitting review.\n'))
        return

    identifier = None

    # The review identifier can come from a number of places. In order of
    # priority:
    # 1. --reviewid argument passed to push command
    # 2. The active bookmark
    # 3. The active branch (if it isn't default)
    # 4. A bug number extracted from commit messages

    if repo.reviewid:
        identifier = repo.reviewid

    # TODO The server currently requires a bug number for the identifier.
    # Pull bookmark and branch names in once allowed.
    #elif repo._bookmarkcurrent:
    #    identifier = repo._bookmarkcurrent
    #elif repo.dirstate.branch() != 'default':
    #    identifier = repo.dirstate.branch()

    if not identifier:
        identifiers = set()
        for node in nodes:
            ctx = repo[node]
            bugs = parse_bugs(ctx.description().split('\n')[0])
            if bugs:
                identifier = 'bz://%s' % bugs[0]
                identifiers.add(identifier)

        if len(identifiers) > 1:
            raise util.Abort('cannot submit reviews referencing multiple '
                             'bugs', hint='limit reviewed changesets '
                             'with "-c" or "-r" arguments')

    identifier = ReviewID(identifier)

    if not identifier:
        ui.write(_('Unable to determine review identifier. Review '
            'identifiers are extracted from commit messages automatically. '
            'Try to begin one of your commit messages with "Bug XXXXXX -"\n'))
        return

    # Append irc nick to review identifier.
    # This is an ugly workaround to a limitation in ReviewBoard. RB doesn't
    # really support changing the owner of a review. It is doable, but no
    # history is stored and this leads to faulty attribution. More details
    # in bug 1034188.
    if not identifier.user:
        ircnick = ui.config('mozilla', 'ircnick', None)
        identifier.user = ircnick

    if hasattr(repo, 'mq'):
        for patch in repo.mq.applied:
            if patch.node in nodes:
                ui.warn(_('(You are using mq to develop patches. For the best '
                    'code review experience, use bookmark-based development '
                    'with changeset evolution. Read more at '
                    'https://mozilla-version-control-tools.readthedocs.io/en/latest/mozreview-user.html)\n'))
                break

    req = commonrequestdict(ui, bzauth)
    req['identifier'] = identifier.full
    req['changesets'] = []
    req['obsolescence'] = obsolete.isenabled(repo, obsolete.createmarkersopt)
    req['deduce-reviewers'] = ui.configbool('reviewboard', 'deduce-reviewers', True)

    reviews = repo.reviews
    oldparentid = reviews.findparentreview(identifier=identifier.full)

    # Include obsolescence data so server can make intelligent decisions.
    obsstore = repo.obsstore
    for node in nodes:
        precursors = [hex(n) for n in obsolete.allprecursors(obsstore, [node])]
        req['changesets'].append({
            'node': hex(node),
            'precursors': precursors,
        })

    ui.write(_('submitting %d changesets for review\n') % len(nodes))

    res = calljsoncommand(ui, remote, 'pushreview', data=req, httpcap='submithttp',
                          httpcommand='mozreviewsubmitseries')

    # Re-encode all items in res from u'' to utf-8 byte str to avoid
    # exceptions during str operations.
    reencoderesponseinplace(res)

    if 'error' in res:
        raise error.Abort(res['error'])

    for w in res['display']:
        ui.write('%s\n' % w)

    reviews.baseurl = res['rburl']
    newparentid = res['parentrrid']
    reviews.addparentreview(identifier.full, newparentid)

    nodereviews = {}
    reviewdata = {}

    for rid, info in sorted(res['reviewrequests'].iteritems()):
        if 'node' in info:
            node = bin(info['node'])
            nodereviews[node] = rid

        reviewdata[rid] = {
            'status': info['status'],
            'public': info['public'],
        }

        if 'reviewers' in info:
            reviewdata[rid]['reviewers'] = info['reviewers']

    reviews.remoteurl = remote.url()

    for node, rid in nodereviews.items():
        reviews.addnodereview(node, rid, newparentid)

    reviews.write()
    for rid, data in reviewdata.iteritems():
        reviews.savereviewrequest(rid, data)

    havedraft = False

    ui.write('\n')
    for node in nodes:
        rid = nodereviews[node]
        ctx = repo[node]
        # Bug 1065024 use cmdutil.show_changeset() here.
        ui.write('changeset:  %s:%s\n' % (ctx.rev(), ctx.hex()[0:12]))
        ui.write('summary:    %s\n' % ctx.description().splitlines()[0])
        ui.write('review:     %s' % reviews.reviewurl(rid))
        if not reviewdata[rid].get('public'):
            havedraft = True
            ui.write(' (draft)')
        ui.write('\n\n')

    ui.write(_('review id:  %s\n') % identifier.full)
    ui.write(_('review url: %s') % reviews.parentreviewurl(identifier.full))
    if not reviewdata[newparentid].get('public'):
        havedraft = True
        ui.write(' (draft)')
    ui.write('\n')

    # Warn people that they have not assigned reviewers for at least some
    # of their commits.
    for node in nodes:
        rd = reviewdata[nodereviews[node]]
        if not rd.get('reviewers', None):
            ui.write('\n')
            ui.warn(_('(review requests lack reviewers; visit review url '
                      'to assign reviewers)\n'))
            break

    # Make it clear to the user that they need to take action in order for
    # others to see this review series.
    if havedraft:
        # If there is no configuration value specified for
        # reviewboard.autopublish, prompt the user. Otherwise, publish
        # automatically or not based on this value.
        if ui.config('reviewboard', 'autopublish', None) is None:
            ui.write('\n')
            publish = ui.promptchoice(_('publish these review '
                                        'requests now (Yn)? '
                                        '$$ &Yes $$ &No')) == 0
        else:
            publish = ui.configbool('reviewboard', 'autopublish')

        if publish:
            publishreviewrequests(ui, remote, bzauth, [newparentid])
        else:
            ui.status(_('(visit review url to publish these review '
                        'requests so others can see them)\n'))
Exemple #38
0
def localmarkers(ui, repo):
    markers = []

    active_node = repo[b'.'].node()
    all_heads = set(repo.heads())
    current_name = repo.dirstate.branch()

    branch_list = repo.branchmap().iterbranches()
    for branch_name, branch_heads, tip_node, is_closed in branch_list:
        for head_node in branch_heads:

            is_active = False
            if branch_name == current_name:
                if head_node == active_node:
                    is_active = True

            is_tip = (head_node == tip_node)

            if is_closed:
                head_closed = True
            else:
                head_closed = bool(head_node not in all_heads)

            description = repo[head_node].description()

            markers.append({
                'type': 'branch',
                'name': branch_name,
                'node': node.hex(head_node),
                'isActive': is_active,
                'isClosed': head_closed,
                'isTip': is_tip,
                'description': description,
            })

    bookmarks = repo._bookmarks
    active_bookmark = repo._activebookmark

    for bookmark_name, bookmark_node in arc_items(bookmarks):
        is_active = (active_bookmark == bookmark_name)
        description = repo[bookmark_node].description()

        markers.append({
            'type': 'bookmark',
            'name': bookmark_name,
            'node': node.hex(bookmark_node),
            'isActive': is_active,
            'description': description,
        })

    # Add virtual markers for the current commit state and current branch state
    # so callers can figure out exactly where we are.

    # Common cases where this matters include:

    # You run "hg update 123" to update to an older revision. Your working
    # copy commit will not be a branch head or a bookmark.

    # You run "hg branch X" to create a new branch, but have not made any commits
    # yet. Your working copy branch will not be reflected in any commits.

    markers.append({
        'type': 'branch-state',
        'name': current_name,
        'node': None,
        'isActive': True,
        'isClosed': False,
        'isTip': False,
        'description': None,
    })

    markers.append({
        'type': 'commit-state',
        'name': None,
        'node': node.hex(active_node),
        'isActive': True,
        'isClosed': False,
        'isTip': False,
        'description': repo[b'.'].description(),
    })

    return markers
Exemple #39
0
def export_file_contents(ctx,
                         manifest,
                         files,
                         hgtags,
                         encoding='',
                         filter_contents=None):

    count = 0
    max = len(files)
    gitAttribute = None
    for file in files:
        # Skip .hgtags files. They only get us in trouble.
        if not hgtags and file == ".hgtags":
            sys.stderr.write('Skip %s\n' % (file))
            continue
        if encoding:
            filename = file.decode(encoding).encode('utf8')
        else:
            filename = file
        if filename[:5] == ".hglf":
            filename = filename[5:]
            sys.stderr.write("Detected large file named %s\n" % (filename))
            #should detect where the large files are located
            d = ctx.filectx(file).data()
            lfsFileCached = lfutil.findfile(ctx.repo(), d.strip('\n'))
            if lfsFileCached is not None:
                d = generate_git_lfs_pointer(lfsFileCached)
            else:
                # Autodownloading from the mercurial repository would be an issue as there is a good chance that we may
                # need to input some username and password. This will surely break fast-export as there will be
                # some unexpected output.
                sys.stderr.write("Large file wasn't found in local cache.\n")
                sys.stderr.write("Please clone with --all-largefiles\n")
                sys.stderr.write("or pull with --lfrev %s\n" %
                                 (str(ctx.rev())))
                sys.exit(
                    3
                )  # closing in the middle of import will revert everything to the last checkpoint
        else:
            file_ctx = ctx.filectx(file)
            d = file_ctx.data()
        if filter_contents:
            import subprocess
            filter_cmd = filter_contents + [
                filename,
                node.hex(file_ctx.filenode()),
                '1' if file_ctx.isbinary() else '0'
            ]
            try:
                filter_proc = subprocess.Popen(filter_cmd,
                                               stdin=subprocess.PIPE,
                                               stdout=subprocess.PIPE)
                d, _ = filter_proc.communicate(d)
            except:
                sys.stderr.write('Running filter-contents %s:\n' % filter_cmd)
                raise
            filter_ret = filter_proc.poll()
            if filter_ret:
                raise subprocess.CalledProcessError(filter_ret, filter_cmd)

        wr('M %s inline %s' %
           (gitmode(manifest.flags(file)), strip_leading_slash(filename)))
        wr('data %d' % len(d))  # had some trouble with size()
        wr(d)
        count += 1
        if count % cfg_export_boundary == 0:
            sys.stderr.write('Exported %d/%d files\n' % (count, max))
    if max > cfg_export_boundary:
        sys.stderr.write('Exported %d/%d files\n' % (count, max))
Exemple #40
0
def file_mismatch(f1, f2):
    """See if two revisions of a file are not equal."""
    return node.hex(f1) != node.hex(f2)
Exemple #41
0
def getlocalkey(file, id):
    pathhash = node.hex(hashlib.sha1(file).digest())
    return os.path.join(pathhash, id)
Exemple #42
0
repo = hg.repository(u, b'test2', create=1)
os.chdir('test2')

# make some commits
for i in [b'1', b'2', b'3']:
    with open(i, 'wb') as f:
        f.write(i)
    status = scmutil.status([], [i], [], [], [], [], [])
    ctx = context.workingcommitctx(repo,
                                   status,
                                   text=i,
                                   user=b'*****@*****.**',
                                   date=(0, 0))
    ctx.p1().manifest()  # side effect: cache manifestctx
    n = repo.commitctx(ctx)
    printb(b'commit %s: %s' % (i, hex(n)))

    # touch 00manifest.i mtime so storecache could expire.
    # repo.__dict__['manifestlog'] is deleted by transaction releasefn.
    st = repo.svfs.stat(b'00manifest.i')
    repo.svfs.utime(b'00manifest.i',
                    (st[stat.ST_MTIME] + 1, st[stat.ST_MTIME] + 1))

    # read the file just committed
    try:
        if repo[n][i].data() != i:
            print('data mismatch')
    except Exception as ex:
        print('cannot read data: %r' % ex)

with repo.wlock(), repo.lock(), repo.transaction(b'test'):
    def pushes(self,
               start_id=None,
               start_id_exclusive=False,
               end_id=None,
               end_id_exclusive=False,
               reverse=False,
               limit=None,
               offset=None,
               users=None,
               start_time=None,
               start_time_exclusive=False,
               end_time=None,
               end_time_exclusive=False,
               start_node=None,
               start_node_exclusive=False,
               end_node=None,
               end_node_exclusive=False,
               nodes=None,
               only_replicated=False):
        """Return information about pushes to this repository.

        This is a generator of Push namedtuples describing each push. Each
        tuple has the form:

            (pushid, who, when, [nodes])

        Nodes are returned in their 40 byte hex form.

        ``start_id`` and ``end_id`` define the lower and upper bounds for
        numeric push IDs. ``start_id_exclusive`` and ``end_end_exclusive`` can
        be used to make the boundary condition exclusive instead of inclusive.

        ``start_time`` and ``end_time`` define a lower and upper limit for the
        push time, as specified in seconds since UNIX epoch.
        ``start_time_exclusive`` and ``end_time_exclusive`` can be used to make
        the boundary condition exclusive instead of inclusive.

        ``start_node`` and ``end_node`` define a lower and upper limit for
        pushes as defined by a push containing a revision.
        ``start_node_exclusive`` and ``end_node_exclusive`` can be used to make
        the boundary condition exclusive instead of inclusive.

        ``nodes`` is an iterable of revision identifiers. If specified, only
        pushes containing nodes from this set will be returned.

        ``users`` is an iterable of push users to limit results to.

        ``reverse`` can be used to return pushes from most recent to oldest
        instead of the default of oldest to newest.

        ``offset`` can be used to skip the first N pushes that would be
        returned.

        ``limit`` can be used to limit the number of returned pushes to that
        count.

        ``only_replicated`` can be specified to only include info about pushes
        that have been fully replicated.

        When multiple filters are defined, they are logically ANDed together.
        """
        if start_id is not None and start_node is not None:
            raise ValueError('cannot specify both start_id and start_node')

        if end_id is not None and end_node is not None:
            raise ValueError('cannot specify both end_id and end_node')

        with self.conn(readonly=True) as c:
            if not c:
                return

            start_id = start_id if start_id is not None else 0

            # We further refine start_id and end_id by nodes, if specified.
            # We /could/ do this in a single SQL statement. But that would
            # make the level of nesting a bit complicated. So we just issue
            # an extra SQL statement to resolve the push id from a node.
            if start_node is not None:
                start_node = self.repo.lookup(start_node)
                start_id = self.pushfromnode(start_node).pushid
                start_id_exclusive = start_node_exclusive

            if end_node is not None:
                end_node = self.repo.lookup(end_node)
                end_id = self.pushfromnode(end_node).pushid
                end_id_exclusive = end_node_exclusive

            op = '>' if start_id_exclusive else '>='

            # In order to support LIMIT and OFFSET at the push level,
            # we need to use an inner SELECT to apply the filtering there.
            # That's because LIMIT and OFFSET apply to the SELECT as a whole.
            # Since we're doing a LEFT JOIN, LIMIT and OFFSET would count nodes,
            # not pushes.
            inner_q = ('SELECT id, user, date FROM pushlog '
                       'WHERE id %s ? ' % op)
            args = [start_id]

            if end_id is not None:
                op = '<' if end_id_exclusive else '<='
                inner_q += 'AND id %s ? ' % op
                args.append(end_id)

            if start_time is not None:
                op = '>' if start_time_exclusive else '>='
                inner_q += 'AND date %s ? ' % op
                args.append(start_time)

            if end_time is not None:
                op = '<' if end_time_exclusive else '<='
                inner_q += 'AND date %s ? ' % op
                args.append(end_time)

            user_q = []
            for user in users or []:
                user_q.append('user=?')
                args.append(user)

            if user_q:
                inner_q += 'AND (%s) ' % ' OR '.join(user_q)

            # We include the push for each listed node. We do this via multiple
            # subqueries to select the pushid for each node.
            node_q = []
            for node in nodes or []:
                node_q.append(
                    'id=(SELECT pushid FROM changesets WHERE node=?)')
                args.append(hex(self.repo.lookup(node)))

            if node_q:
                inner_q += 'AND (%s) ' % ' OR '.join(node_q)

            # Implement max push ID filtering separately from end_id. This makes
            # things simpler, as we don't need to take inclusive/exclusive into
            # play.
            if only_replicated:
                max_push_id = self.last_push_id_replicated(conn=c)
            else:
                max_push_id = self.lastpushid(conn=c)

            inner_q += 'AND id <= ? '
            args.append(max_push_id)

            if reverse:
                inner_q += 'ORDER BY id DESC '
            else:
                inner_q += 'ORDER BY id ASC '

            if limit is not None:
                inner_q += 'LIMIT ? '
                args.append(limit)

            if offset is not None:
                inner_q += 'OFFSET ? '
                args.append(offset)

            q = ('SELECT id, user, date, rev, node FROM (%s) '
                 'LEFT JOIN changesets on id=pushid ' % inner_q)

            if reverse:
                q += 'ORDER BY id DESC, rev DESC '
            else:
                q += 'ORDER BY id ASC, rev ASC '

            res = c.execute(q, args)

            lastid = None
            current = None
            for pushid, who, when, rev, node in res:
                if pushid != lastid:
                    if current:
                        yield current
                    lastid = pushid
                    current = Push(pushid, who, when, [])
                    if node:
                        current.nodes.append(node)
                else:
                    current.nodes.append(node)

            if current:
                yield current
    def import_git_commit(self, commit):
        self.ui.debug(_("importing: %s\n") % commit.id)

        (strip_message, hg_renames, hg_branch,
         extra) = self.extract_hg_metadata(commit.message)

        # get a list of the changed, added, removed files
        files = self.get_files_changed(commit)

        date = (commit.author_time, -commit.author_timezone)
        text = strip_message

        origtext = text
        try:
            text.decode('utf-8')
        except UnicodeDecodeError:
            text = self.decode_guess(text, commit.encoding)

        text = '\n'.join([l.rstrip() for l in text.splitlines()]).strip('\n')
        if text + '\n' != origtext:
            extra['message'] = create_delta(text + '\n', origtext)

        author = commit.author

        # convert extra data back to the end
        if ' ext:' in commit.author:
            regex = re.compile('^(.*?)\ ext:\((.*)\) <(.*)\>$')
            m = regex.match(commit.author)
            if m:
                name = m.group(1)
                ex = urllib.unquote(m.group(2))
                email = m.group(3)
                author = name + ' <' + email + '>' + ex

        if ' <none@none>' in commit.author:
            author = commit.author[:-12]

        try:
            author.decode('utf-8')
        except UnicodeDecodeError:
            origauthor = author
            author = self.decode_guess(author, commit.encoding)
            extra['author'] = create_delta(author, origauthor)

        oldenc = self.swap_out_encoding()

        def findconvergedfiles(p1, p2):
            # If any files have the same contents in both parents of a merge
            # (and are therefore not reported as changed by Git) but are at
            # different file revisions in Mercurial (because they arrived at
            # those contents in different ways), we need to include them in
            # the list of changed files so that Mercurial can join up their
            # filelog histories (same as if the merge was done in Mercurial to
            # begin with).
            if p2 == nullid:
                return []
            manifest1 = self.repo.changectx(p1).manifest()
            manifest2 = self.repo.changectx(p2).manifest()
            return [
                path for path, node1 in manifest1.iteritems()
                if path not in files and manifest2.get(path, node1) != node1
            ]

        def getfilectx(repo, memctx, f):
            info = files.get(f)
            if info != None:
                # it's a file reported as modified from Git
                delete, mode, sha = info
                if delete:
                    raise IOError

                data = self.git[sha].data
                copied_path = hg_renames.get(f)
                e = self.convert_git_int_mode(mode)
            else:
                # it's a converged file
                fc = context.filectx(self.repo, f, changeid=memctx.p1().rev())
                data = fc.data()
                e = fc.flags()
                copied_path = fc.renamed()

            return context.memfilectx(f, data, 'l' in e, 'x' in e, copied_path)

        gparents = map(self.map_hg_get, commit.parents)
        p1, p2 = (nullid, nullid)
        octopus = False

        if len(gparents) > 1:
            # merge, possibly octopus
            def commit_octopus(p1, p2):
                ctx = context.memctx(self.repo, (p1, p2), text,
                                     list(files) + findconvergedfiles(p1, p2),
                                     getfilectx, author, date,
                                     {'hg-git': 'octopus'})
                return hex(self.repo.commitctx(ctx))

            octopus = len(gparents) > 2
            p2 = gparents.pop()
            p1 = gparents.pop()
            while len(gparents) > 0:
                p2 = commit_octopus(p1, p2)
                p1 = gparents.pop()
        else:
            if gparents:
                p1 = gparents.pop()

        pa = None
        if not (p2 == nullid):
            node1 = self.repo.changectx(p1)
            node2 = self.repo.changectx(p2)
            pa = node1.ancestor(node2)

        # if named branch, add to extra
        if hg_branch:
            extra['branch'] = hg_branch

        # if committer is different than author, add it to extra
        if commit.author != commit.committer \
               or commit.author_time != commit.commit_time \
               or commit.author_timezone != commit.commit_timezone:
            extra['committer'] = "%s %d %d" % (
                commit.committer, commit.commit_time, -commit.commit_timezone)

        if commit.encoding:
            extra['encoding'] = commit.encoding

        if hg_branch:
            extra['branch'] = hg_branch

        if octopus:
            extra['hg-git'] = 'octopus-done'

        # TODO use 'n in self.repo' when we require hg 1.5
        def repo_contains(n):
            try:
                return bool(self.repo.lookup(n))
            except error.RepoLookupError:
                return False

        if not (repo_contains(p1) and repo_contains(p2)):
            raise hgutil.Abort(
                _('you appear to have run strip - '
                  'please run hg git-cleanup'))
        ctx = context.memctx(self.repo, (p1, p2), text,
                             list(files) + findconvergedfiles(p1, p2),
                             getfilectx, author, date, extra)

        node = self.repo.commitctx(ctx)

        self.swap_out_encoding(oldenc)

        # save changeset to mapping file
        cs = hex(node)
        self.map_set(commit.id, cs)
Exemple #45
0
def _docheckout(ui, url, dest, upstream, revision, branch, purge, sharebase,
                optimes, behaviors, networkattemptlimit, networkattempts=None,
                sparse_profile=None):
    if not networkattempts:
        networkattempts = [1]

    def callself():
        return _docheckout(ui, url, dest, upstream, revision, branch, purge,
                           sharebase, optimes, behaviors, networkattemptlimit,
                           networkattempts=networkattempts,
                           sparse_profile=sparse_profile)

    @contextlib.contextmanager
    def timeit(op, behavior):
        behaviors.add(behavior)
        errored = False
        try:
            start = time.time()
            yield
        except Exception:
            errored = True
            raise
        finally:
            elapsed = time.time() - start

            if errored:
                op += '_errored'

            optimes.append((op, elapsed))

    ui.write('ensuring %s@%s is available at %s\n' % (url, revision or branch,
                                                      dest))

    # We assume that we're the only process on the machine touching the
    # repository paths that we were told to use. This means our recovery
    # scenario when things aren't "right" is to just nuke things and start
    # from scratch. This is easier to implement than verifying the state
    # of the data and attempting recovery. And in some scenarios (such as
    # potential repo corruption), it is probably faster, since verifying
    # repos can take a while.

    destvfs = getvfs()(dest, audit=False, realpath=True)

    def deletesharedstore(path=None):
        storepath = path or destvfs.read('.hg/sharedpath').strip()
        if storepath.endswith('.hg'):
            storepath = os.path.dirname(storepath)

        storevfs = getvfs()(storepath, audit=False)
        storevfs.rmtree(forcibly=True)

    if destvfs.exists() and not destvfs.exists('.hg'):
        raise error.Abort('destination exists but no .hg directory')

    # Refuse to enable sparse checkouts on existing checkouts. The reasoning
    # here is that another consumer of this repo may not be sparse aware. If we
    # enabled sparse, we would lock them out.
    if destvfs.exists() and sparse_profile and not destvfs.exists('.hg/sparse'):
        raise error.Abort('cannot enable sparse profile on existing '
                          'non-sparse checkout',
                          hint='use a separate working directory to use sparse')

    # And the other direction for symmetry.
    if not sparse_profile and destvfs.exists('.hg/sparse'):
        raise error.Abort('cannot use non-sparse checkout on existing sparse '
                          'checkout',
                          hint='use a separate working directory to use sparse')

    # Require checkouts to be tied to shared storage because efficiency.
    if destvfs.exists('.hg') and not destvfs.exists('.hg/sharedpath'):
        ui.warn('(destination is not shared; deleting)\n')
        with timeit('remove_unshared_dest', 'remove-wdir'):
            destvfs.rmtree(forcibly=True)

    # Verify the shared path exists and is using modern pooled storage.
    if destvfs.exists('.hg/sharedpath'):
        storepath = destvfs.read('.hg/sharedpath').strip()

        ui.write('(existing repository shared store: %s)\n' % storepath)

        if not os.path.exists(storepath):
            ui.warn('(shared store does not exist; deleting destination)\n')
            with timeit('removed_missing_shared_store', 'remove-wdir'):
                destvfs.rmtree(forcibly=True)
        elif not re.search('[a-f0-9]{40}/\.hg$', storepath.replace('\\', '/')):
            ui.warn('(shared store does not belong to pooled storage; '
                    'deleting destination to improve efficiency)\n')
            with timeit('remove_unpooled_store', 'remove-wdir'):
                destvfs.rmtree(forcibly=True)

    if destvfs.isfileorlink('.hg/wlock'):
        ui.warn('(dest has an active working directory lock; assuming it is '
                'left over from a previous process and that the destination '
                'is corrupt; deleting it just to be sure)\n')
        with timeit('remove_locked_wdir', 'remove-wdir'):
            destvfs.rmtree(forcibly=True)

    def handlerepoerror(e):
        if e.message == _('abandoned transaction found'):
            ui.warn('(abandoned transaction found; trying to recover)\n')
            repo = hg.repository(ui, dest)
            if not repo.recover():
                ui.warn('(could not recover repo state; '
                        'deleting shared store)\n')
                with timeit('remove_unrecovered_shared_store', 'remove-store'):
                    deletesharedstore()

            ui.warn('(attempting checkout from beginning)\n')
            return callself()

        raise

    # At this point we either have an existing working directory using
    # shared, pooled storage or we have nothing.

    def handlenetworkfailure():
        if networkattempts[0] >= networkattemptlimit:
            raise error.Abort('reached maximum number of network attempts; '
                              'giving up\n')

        ui.warn('(retrying after network failure on attempt %d of %d)\n' %
                (networkattempts[0], networkattemptlimit))

        # Do a backoff on retries to mitigate the thundering herd
        # problem. This is an exponential backoff with a multipler
        # plus random jitter thrown in for good measure.
        # With the default settings, backoffs will be:
        # 1) 2.5 - 6.5
        # 2) 5.5 - 9.5
        # 3) 11.5 - 15.5
        backoff = (2 ** networkattempts[0] - 1) * 1.5
        jittermin = ui.configint('robustcheckout', 'retryjittermin', 1000)
        jittermax = ui.configint('robustcheckout', 'retryjittermax', 5000)
        backoff += float(random.randint(jittermin, jittermax)) / 1000.0
        ui.warn('(waiting %.2fs before retry)\n' % backoff)
        time.sleep(backoff)

        networkattempts[0] += 1

    def handlepullerror(e):
        """Handle an exception raised during a pull.

        Returns True if caller should call ``callself()`` to retry.
        """
        if isinstance(e, error.Abort):
            if e.args[0] == _('repository is unrelated'):
                ui.warn('(repository is unrelated; deleting)\n')
                destvfs.rmtree(forcibly=True)
                return True
            elif e.args[0].startswith(_('stream ended unexpectedly')):
                ui.warn('%s\n' % e.args[0])
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        elif isinstance(e, ssl.SSLError):
            # Assume all SSL errors are due to the network, as Mercurial
            # should convert non-transport errors like cert validation failures
            # to error.Abort.
            ui.warn('ssl error: %s\n' % e)
            handlenetworkfailure()
            return True
        elif isinstance(e, urllib2.URLError):
            if isinstance(e.reason, socket.error):
                ui.warn('socket error: %s\n' % e.reason)
                handlenetworkfailure()
                return True
            else:
                ui.warn('unhandled URLError; reason type: %s; value: %s' % (
                    e.reason.__class__.__name__, e.reason))
        else:
            ui.warn('unhandled exception during network operation; type: %s; '
                    'value: %s' % (e.__class__.__name__, e))

        return False

    # Perform sanity checking of store. We may or may not know the path to the
    # local store. It depends if we have an existing destvfs pointing to a
    # share. To ensure we always find a local store, perform the same logic
    # that Mercurial's pooled storage does to resolve the local store path.
    cloneurl = upstream or url

    try:
        clonepeer = hg.peer(ui, {}, cloneurl)
        rootnode = peerlookup(clonepeer, '0')
    except error.RepoLookupError:
        raise error.Abort('unable to resolve root revision from clone '
                          'source')
    except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
        if handlepullerror(e):
            return callself()
        raise

    if rootnode == nullid:
        raise error.Abort('source repo appears to be empty')

    storepath = os.path.join(sharebase, hex(rootnode))
    storevfs = getvfs()(storepath, audit=False)

    if storevfs.isfileorlink('.hg/store/lock'):
        ui.warn('(shared store has an active lock; assuming it is left '
                'over from a previous process and that the store is '
                'corrupt; deleting store and destination just to be '
                'sure)\n')
        if destvfs.exists():
            with timeit('remove_dest_active_lock', 'remove-wdir'):
                destvfs.rmtree(forcibly=True)

        with timeit('remove_shared_store_active_lock', 'remove-store'):
            storevfs.rmtree(forcibly=True)

    if storevfs.exists() and not storevfs.exists('.hg/requires'):
        ui.warn('(shared store missing requires file; this is a really '
                'odd failure; deleting store and destination)\n')
        if destvfs.exists():
            with timeit('remove_dest_no_requires', 'remove-wdir'):
                destvfs.rmtree(forcibly=True)

        with timeit('remove_shared_store_no_requires', 'remove-store'):
            storevfs.rmtree(forcibly=True)

    if storevfs.exists('.hg/requires'):
        requires = set(storevfs.read('.hg/requires').splitlines())
        # FUTURE when we require generaldelta, this is where we can check
        # for that.
        required = {'dotencode', 'fncache'}

        missing = required - requires
        if missing:
            ui.warn('(shared store missing requirements: %s; deleting '
                    'store and destination to ensure optimal behavior)\n' %
                    ', '.join(sorted(missing)))
            if destvfs.exists():
                with timeit('remove_dest_missing_requires', 'remove-wdir'):
                    destvfs.rmtree(forcibly=True)

            with timeit('remove_shared_store_missing_requires', 'remove-store'):
                storevfs.rmtree(forcibly=True)

    created = False

    if not destvfs.exists():
        # Ensure parent directories of destination exist.
        # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
        if util.safehasattr(util, 'ensuredirs'):
            makedirs = util.ensuredirs
        else:
            makedirs = util.makedirs

        makedirs(os.path.dirname(destvfs.base), notindexed=True)
        makedirs(sharebase, notindexed=True)

        if upstream:
            ui.write('(cloning from upstream repo %s)\n' % upstream)

        if not storevfs.exists():
            behaviors.add('create-store')

        try:
            with timeit('clone', 'clone'):
                shareopts = {'pool': sharebase, 'mode': 'identity'}
                res = hg.clone(ui, {}, clonepeer, dest=dest, update=False,
                               shareopts=shareopts)
        except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' % e.message)
            with timeit('remove_shared_store_revlogerror', 'remote-store'):
                deletesharedstore()
            return callself()

        # TODO retry here.
        if res is None:
            raise error.Abort('clone failed')

        # Verify it is using shared pool storage.
        if not destvfs.exists('.hg/sharedpath'):
            raise error.Abort('clone did not create a shared repo')

        created = True

    # The destination .hg directory should exist. Now make sure we have the
    # wanted revision.

    repo = hg.repository(ui, dest)

    # We only pull if we are using symbolic names or the requested revision
    # doesn't exist.
    havewantedrev = False

    if revision:
        try:
            ctx = scmutil.revsingle(repo, revision)
        except error.RepoLookupError:
            ctx = None

        if ctx:
            if not ctx.hex().startswith(revision):
                raise error.Abort('--revision argument is ambiguous',
                                  hint='must be the first 12+ characters of a '
                                       'SHA-1 fragment')

            checkoutrevision = ctx.hex()
            havewantedrev = True

    if not havewantedrev:
        ui.write('(pulling to obtain %s)\n' % (revision or branch,))

        remote = None
        try:
            remote = hg.peer(repo, {}, url)
            pullrevs = [peerlookup(remote, revision or branch)]
            checkoutrevision = hex(pullrevs[0])
            if branch:
                ui.warn('(remote resolved %s to %s; '
                        'result is not deterministic)\n' %
                        (branch, checkoutrevision))

            if checkoutrevision in repo:
                ui.warn('(revision already present locally; not pulling)\n')
            else:
                with timeit('pull', 'pull'):
                    pullop = exchange.pull(repo, remote, heads=pullrevs)
                    if not pullop.rheads:
                        raise error.Abort('unable to pull requested revision')
        except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' % e.message)
            deletesharedstore()
            return callself()
        finally:
            if remote:
                remote.close()

    # Now we should have the wanted revision in the store. Perform
    # working directory manipulation.

    # Purge if requested. We purge before update because this way we're
    # guaranteed to not have conflicts on `hg update`.
    if purge and not created:
        ui.write('(purging working directory)\n')
        purgeext = extensions.find('purge')

        # Mercurial 4.3 doesn't purge files outside the sparse checkout.
        # See https://bz.mercurial-scm.org/show_bug.cgi?id=5626. Force
        # purging by monkeypatching the sparse matcher.
        try:
            old_sparse_fn = getattr(repo.dirstate, '_sparsematchfn', None)
            if old_sparse_fn is not None:
                assert supported_hg(), 'Mercurial version not supported (must be 4.3+)'
                repo.dirstate._sparsematchfn = lambda: matchmod.always(repo.root, '')

            with timeit('purge', 'purge'):
                if purgeext.purge(ui, repo, all=True, abort_on_err=True,
                                  # The function expects all arguments to be
                                  # defined.
                                  **{'print': None,
                                     'print0': None,
                                     'dirs': None,
                                     'files': None}):
                    raise error.Abort('error purging')
        finally:
            if old_sparse_fn is not None:
                repo.dirstate._sparsematchfn = old_sparse_fn

    # Update the working directory.

    if repo['.'].node() == nullid:
        behaviors.add('empty-wdir')
    else:
        behaviors.add('populated-wdir')

    if sparse_profile:
        sparsemod = getsparse()

        # By default, Mercurial will ignore unknown sparse profiles. This could
        # lead to a full checkout. Be more strict.
        try:
            repo.filectx(sparse_profile, changeid=checkoutrevision).data()
        except error.ManifestLookupError:
            raise error.Abort('sparse profile %s does not exist at revision '
                              '%s' % (sparse_profile, checkoutrevision))

        # TRACKING hg48 - parseconfig takes `action` param
        if util.versiontuple(n=2) >= (4, 8):
            old_config = sparsemod.parseconfig(repo.ui, repo.vfs.tryread('sparse'), 'sparse')
        else:
            old_config = sparsemod.parseconfig(repo.ui, repo.vfs.tryread('sparse'))

        old_includes, old_excludes, old_profiles = old_config

        if old_profiles == {sparse_profile} and not old_includes and not \
                old_excludes:
            ui.write('(sparse profile %s already set; no need to update '
                     'sparse config)\n' % sparse_profile)
        else:
            if old_includes or old_excludes or old_profiles:
                ui.write('(replacing existing sparse config with profile '
                         '%s)\n' % sparse_profile)
            else:
                ui.write('(setting sparse config to profile %s)\n' %
                         sparse_profile)

            # If doing an incremental update, this will perform two updates:
            # one to change the sparse profile and another to update to the new
            # revision. This is not desired. But there's not a good API in
            # Mercurial to do this as one operation.
            with repo.wlock(), timeit('sparse_update_config',
                                      'sparse-update-config'):
                fcounts = map(len, sparsemod._updateconfigandrefreshwdir(
                    repo, [], [], [sparse_profile], force=True))

                repo.ui.status('%d files added, %d files dropped, '
                               '%d files conflicting\n' % tuple(fcounts))

            ui.write('(sparse refresh complete)\n')

    op = 'update_sparse' if sparse_profile else 'update'
    behavior = 'update-sparse' if sparse_profile else 'update'

    with timeit(op, behavior):
        if commands.update(ui, repo, rev=checkoutrevision, clean=True):
            raise error.Abort('error updating')

    ui.write('updated to %s\n' % checkoutrevision)

    return None
Exemple #46
0
def _docheckout(
    ui,
    url,
    dest,
    upstream,
    revision,
    branch,
    purge,
    sharebase,
    optimes,
    behaviors,
    networkattemptlimit,
    networkattempts=None,
    sparse_profile=None,
    noupdate=False,
):
    if not networkattempts:
        networkattempts = [1]

    def callself():
        return _docheckout(
            ui,
            url,
            dest,
            upstream,
            revision,
            branch,
            purge,
            sharebase,
            optimes,
            behaviors,
            networkattemptlimit,
            networkattempts=networkattempts,
            sparse_profile=sparse_profile,
            noupdate=noupdate,
        )

    @contextlib.contextmanager
    def timeit(op, behavior):
        behaviors.add(behavior)
        errored = False
        try:
            start = time.time()
            yield
        except Exception:
            errored = True
            raise
        finally:
            elapsed = time.time() - start

            if errored:
                op += "_errored"

            optimes.append((op, elapsed))

    ui.write(b"ensuring %s@%s is available at %s\n" %
             (url, revision or branch, dest))

    # We assume that we're the only process on the machine touching the
    # repository paths that we were told to use. This means our recovery
    # scenario when things aren't "right" is to just nuke things and start
    # from scratch. This is easier to implement than verifying the state
    # of the data and attempting recovery. And in some scenarios (such as
    # potential repo corruption), it is probably faster, since verifying
    # repos can take a while.

    destvfs = vfs.vfs(dest, audit=False, realpath=True)

    def deletesharedstore(path=None):
        storepath = path or destvfs.read(b".hg/sharedpath").strip()
        if storepath.endswith(b".hg"):
            storepath = os.path.dirname(storepath)

        storevfs = vfs.vfs(storepath, audit=False)
        storevfs.rmtree(forcibly=True)

    if destvfs.exists() and not destvfs.exists(b".hg"):
        raise error.Abort(b"destination exists but no .hg directory")

    # Refuse to enable sparse checkouts on existing checkouts. The reasoning
    # here is that another consumer of this repo may not be sparse aware. If we
    # enabled sparse, we would lock them out.
    if destvfs.exists(
    ) and sparse_profile and not destvfs.exists(b".hg/sparse"):
        raise error.Abort(
            b"cannot enable sparse profile on existing "
            b"non-sparse checkout",
            hint=b"use a separate working directory to use sparse",
        )

    # And the other direction for symmetry.
    if not sparse_profile and destvfs.exists(b".hg/sparse"):
        raise error.Abort(
            b"cannot use non-sparse checkout on existing sparse "
            b"checkout",
            hint=b"use a separate working directory to use sparse",
        )

    # Require checkouts to be tied to shared storage because efficiency.
    if destvfs.exists(b".hg") and not destvfs.exists(b".hg/sharedpath"):
        ui.warn(b"(destination is not shared; deleting)\n")
        with timeit("remove_unshared_dest", "remove-wdir"):
            destvfs.rmtree(forcibly=True)

    # Verify the shared path exists and is using modern pooled storage.
    if destvfs.exists(b".hg/sharedpath"):
        storepath = destvfs.read(b".hg/sharedpath").strip()

        ui.write(b"(existing repository shared store: %s)\n" % storepath)

        if not os.path.exists(storepath):
            ui.warn(b"(shared store does not exist; deleting destination)\n")
            with timeit("removed_missing_shared_store", "remove-wdir"):
                destvfs.rmtree(forcibly=True)
        elif not re.search(b"[a-f0-9]{40}/\.hg$", storepath.replace(
                b"\\", b"/")):
            ui.warn(b"(shared store does not belong to pooled storage; "
                    b"deleting destination to improve efficiency)\n")
            with timeit("remove_unpooled_store", "remove-wdir"):
                destvfs.rmtree(forcibly=True)

    if destvfs.isfileorlink(b".hg/wlock"):
        ui.warn(b"(dest has an active working directory lock; assuming it is "
                b"left over from a previous process and that the destination "
                b"is corrupt; deleting it just to be sure)\n")
        with timeit("remove_locked_wdir", "remove-wdir"):
            destvfs.rmtree(forcibly=True)

    def handlerepoerror(e):
        if pycompat.bytestr(e) == _(b"abandoned transaction found"):
            ui.warn(b"(abandoned transaction found; trying to recover)\n")
            repo = hg.repository(ui, dest)
            if not repo.recover():
                ui.warn(b"(could not recover repo state; "
                        b"deleting shared store)\n")
                with timeit("remove_unrecovered_shared_store", "remove-store"):
                    deletesharedstore()

            ui.warn(b"(attempting checkout from beginning)\n")
            return callself()

        raise

    # At this point we either have an existing working directory using
    # shared, pooled storage or we have nothing.

    def handlenetworkfailure():
        if networkattempts[0] >= networkattemptlimit:
            raise error.Abort(b"reached maximum number of network attempts; "
                              b"giving up\n")

        ui.warn(b"(retrying after network failure on attempt %d of %d)\n" %
                (networkattempts[0], networkattemptlimit))

        # Do a backoff on retries to mitigate the thundering herd
        # problem. This is an exponential backoff with a multipler
        # plus random jitter thrown in for good measure.
        # With the default settings, backoffs will be:
        # 1) 2.5 - 6.5
        # 2) 5.5 - 9.5
        # 3) 11.5 - 15.5
        backoff = (2**networkattempts[0] - 1) * 1.5
        jittermin = ui.configint(b"robustcheckout", b"retryjittermin", 1000)
        jittermax = ui.configint(b"robustcheckout", b"retryjittermax", 5000)
        backoff += float(random.randint(jittermin, jittermax)) / 1000.0
        ui.warn(b"(waiting %.2fs before retry)\n" % backoff)
        time.sleep(backoff)

        networkattempts[0] += 1

    def handlepullerror(e):
        """Handle an exception raised during a pull.

        Returns True if caller should call ``callself()`` to retry.
        """
        if isinstance(e, error.Abort):
            if e.args[0] == _(b"repository is unrelated"):
                ui.warn(b"(repository is unrelated; deleting)\n")
                destvfs.rmtree(forcibly=True)
                return True
            elif e.args[0].startswith(_(b"stream ended unexpectedly")):
                ui.warn(b"%s\n" % e.args[0])
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        # TODO test this branch
        elif isinstance(e, error.ResponseError):
            if e.args[0].startswith(
                    _(b"unexpected response from remote server:")):
                ui.warn(
                    b"(unexpected response from remote server; retrying)\n")
                destvfs.rmtree(forcibly=True)
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        elif isinstance(e, ssl.SSLError):
            # Assume all SSL errors are due to the network, as Mercurial
            # should convert non-transport errors like cert validation failures
            # to error.Abort.
            ui.warn(b"ssl error: %s\n" % pycompat.bytestr(str(e)))
            handlenetworkfailure()
            return True
        elif isinstance(e, urllibcompat.urlerr.urlerror):
            if isinstance(e.reason, socket.error):
                ui.warn(b"socket error: %s\n" %
                        pycompat.bytestr(str(e.reason)))
                handlenetworkfailure()
                return True
            else:
                ui.warn(b"unhandled URLError; reason type: %s; value: %s\n" % (
                    pycompat.bytestr(e.reason.__class__.__name__),
                    pycompat.bytestr(str(e.reason)),
                ))
        else:
            ui.warn(b"unhandled exception during network operation; type: %s; "
                    b"value: %s\n" % (pycompat.bytestr(
                        e.__class__.__name__), pycompat.bytestr(str(e))))

        return False

    # Perform sanity checking of store. We may or may not know the path to the
    # local store. It depends if we have an existing destvfs pointing to a
    # share. To ensure we always find a local store, perform the same logic
    # that Mercurial's pooled storage does to resolve the local store path.
    cloneurl = upstream or url

    try:
        clonepeer = hg.peer(ui, {}, cloneurl)
        rootnode = peerlookup(clonepeer, b"0")
    except error.RepoLookupError:
        raise error.Abort(b"unable to resolve root revision from clone "
                          b"source")
    except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
        if handlepullerror(e):
            return callself()
        raise

    if rootnode == nullid:
        raise error.Abort(b"source repo appears to be empty")

    storepath = os.path.join(sharebase, hex(rootnode))
    storevfs = vfs.vfs(storepath, audit=False)

    if storevfs.isfileorlink(b".hg/store/lock"):
        ui.warn(b"(shared store has an active lock; assuming it is left "
                b"over from a previous process and that the store is "
                b"corrupt; deleting store and destination just to be "
                b"sure)\n")
        if destvfs.exists():
            with timeit("remove_dest_active_lock", "remove-wdir"):
                destvfs.rmtree(forcibly=True)

        with timeit("remove_shared_store_active_lock", "remove-store"):
            storevfs.rmtree(forcibly=True)

    if storevfs.exists() and not storevfs.exists(b".hg/requires"):
        ui.warn(b"(shared store missing requires file; this is a really "
                b"odd failure; deleting store and destination)\n")
        if destvfs.exists():
            with timeit("remove_dest_no_requires", "remove-wdir"):
                destvfs.rmtree(forcibly=True)

        with timeit("remove_shared_store_no_requires", "remove-store"):
            storevfs.rmtree(forcibly=True)

    if storevfs.exists(b".hg/requires"):
        requires = set(storevfs.read(b".hg/requires").splitlines())
        # FUTURE when we require generaldelta, this is where we can check
        # for that.
        required = {b"dotencode", b"fncache"}

        missing = required - requires
        if missing:
            ui.warn(b"(shared store missing requirements: %s; deleting "
                    b"store and destination to ensure optimal behavior)\n" %
                    b", ".join(sorted(missing)))
            if destvfs.exists():
                with timeit("remove_dest_missing_requires", "remove-wdir"):
                    destvfs.rmtree(forcibly=True)

            with timeit("remove_shared_store_missing_requires",
                        "remove-store"):
                storevfs.rmtree(forcibly=True)

    created = False

    if not destvfs.exists():
        # Ensure parent directories of destination exist.
        # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
        if util.safehasattr(util, "ensuredirs"):
            makedirs = util.ensuredirs
        else:
            makedirs = util.makedirs

        makedirs(os.path.dirname(destvfs.base), notindexed=True)
        makedirs(sharebase, notindexed=True)

        if upstream:
            ui.write(b"(cloning from upstream repo %s)\n" % upstream)

        if not storevfs.exists():
            behaviors.add(b"create-store")

        try:
            with timeit("clone", "clone"):
                shareopts = {b"pool": sharebase, b"mode": b"identity"}
                res = hg.clone(
                    ui,
                    {},
                    clonepeer,
                    dest=dest,
                    update=False,
                    shareopts=shareopts,
                    stream=True,
                )
        except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn(b"(repo corruption: %s; deleting shared store)\n" % e)
            with timeit("remove_shared_store_revlogerror", "remote-store"):
                deletesharedstore()
            return callself()

        # TODO retry here.
        if res is None:
            raise error.Abort(b"clone failed")

        # Verify it is using shared pool storage.
        if not destvfs.exists(b".hg/sharedpath"):
            raise error.Abort(b"clone did not create a shared repo")

        created = True

    # The destination .hg directory should exist. Now make sure we have the
    # wanted revision.

    repo = hg.repository(ui, dest)

    # We only pull if we are using symbolic names or the requested revision
    # doesn't exist.
    havewantedrev = False

    if revision:
        try:
            ctx = scmutil.revsingle(repo, revision)
        except error.RepoLookupError:
            ctx = None

        if ctx:
            if not ctx.hex().startswith(revision):
                raise error.Abort(
                    b"--revision argument is ambiguous",
                    hint=b"must be the first 12+ characters of a "
                    b"SHA-1 fragment",
                )

            checkoutrevision = ctx.hex()
            havewantedrev = True

    if not havewantedrev:
        ui.write(b"(pulling to obtain %s)\n" % (revision or branch, ))

        remote = None
        try:
            remote = hg.peer(repo, {}, url)
            pullrevs = [peerlookup(remote, revision or branch)]
            checkoutrevision = hex(pullrevs[0])
            if branch:
                ui.warn(b"(remote resolved %s to %s; "
                        b"result is not deterministic)\n" %
                        (branch, checkoutrevision))

            if checkoutrevision in repo:
                ui.warn(b"(revision already present locally; not pulling)\n")
            else:
                with timeit("pull", "pull"):
                    pullop = exchange.pull(repo, remote, heads=pullrevs)
                    if not pullop.rheads:
                        raise error.Abort(b"unable to pull requested revision")
        except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn(b"(repo corruption: %s; deleting shared store)\n" % e)
            deletesharedstore()
            return callself()
        finally:
            if remote:
                remote.close()

    # Now we should have the wanted revision in the store. Perform
    # working directory manipulation.

    # Avoid any working directory manipulations if `-U`/`--noupdate` was passed
    if noupdate:
        ui.write(b"(skipping update since `-U` was passed)\n")
        return None

    # Purge if requested. We purge before update because this way we're
    # guaranteed to not have conflicts on `hg update`.
    if purge and not created:
        ui.write(b"(purging working directory)\n")
        purge = getattr(commands, "purge", None)
        if not purge:
            purge = extensions.find(b"purge").purge

        # Mercurial 4.3 doesn't purge files outside the sparse checkout.
        # See https://bz.mercurial-scm.org/show_bug.cgi?id=5626. Force
        # purging by monkeypatching the sparse matcher.
        try:
            old_sparse_fn = getattr(repo.dirstate, "_sparsematchfn", None)
            if old_sparse_fn is not None:
                # TRACKING hg50
                # Arguments passed to `matchmod.always` were unused and have been removed
                if util.versiontuple(n=2) >= (5, 0):
                    repo.dirstate._sparsematchfn = lambda: matchmod.always()
                else:
                    repo.dirstate._sparsematchfn = lambda: matchmod.always(
                        repo.root, "")

            with timeit("purge", "purge"):
                if purge(
                        ui,
                        repo,
                        all=True,
                        abort_on_err=True,
                        # The function expects all arguments to be
                        # defined.
                        **{
                            "print": None,
                            "print0": None,
                            "dirs": None,
                            "files": None
                        }):
                    raise error.Abort(b"error purging")
        finally:
            if old_sparse_fn is not None:
                repo.dirstate._sparsematchfn = old_sparse_fn

    # Update the working directory.

    if repo[b"."].node() == nullid:
        behaviors.add("empty-wdir")
    else:
        behaviors.add("populated-wdir")

    if sparse_profile:
        sparsemod = getsparse()

        # By default, Mercurial will ignore unknown sparse profiles. This could
        # lead to a full checkout. Be more strict.
        try:
            repo.filectx(sparse_profile, changeid=checkoutrevision).data()
        except error.ManifestLookupError:
            raise error.Abort(b"sparse profile %s does not exist at revision "
                              b"%s" % (sparse_profile, checkoutrevision))

        # TRACKING hg48 - parseconfig takes `action` param
        if util.versiontuple(n=2) >= (4, 8):
            old_config = sparsemod.parseconfig(repo.ui,
                                               repo.vfs.tryread(b"sparse"),
                                               b"sparse")
        else:
            old_config = sparsemod.parseconfig(repo.ui,
                                               repo.vfs.tryread(b"sparse"))

        old_includes, old_excludes, old_profiles = old_config

        if old_profiles == {sparse_profile
                            } and not old_includes and not old_excludes:
            ui.write(b"(sparse profile %s already set; no need to update "
                     b"sparse config)\n" % sparse_profile)
        else:
            if old_includes or old_excludes or old_profiles:
                ui.write(b"(replacing existing sparse config with profile "
                         b"%s)\n" % sparse_profile)
            else:
                ui.write(b"(setting sparse config to profile %s)\n" %
                         sparse_profile)

            # If doing an incremental update, this will perform two updates:
            # one to change the sparse profile and another to update to the new
            # revision. This is not desired. But there's not a good API in
            # Mercurial to do this as one operation.
            with repo.wlock(), timeit("sparse_update_config",
                                      "sparse-update-config"):
                # pylint --py3k: W1636
                fcounts = list(
                    map(
                        len,
                        sparsemod._updateconfigandrefreshwdir(repo, [], [],
                                                              [sparse_profile],
                                                              force=True),
                    ))

                repo.ui.status(b"%d files added, %d files dropped, "
                               b"%d files conflicting\n" % tuple(fcounts))

            ui.write(b"(sparse refresh complete)\n")

    op = "update_sparse" if sparse_profile else "update"
    behavior = "update-sparse" if sparse_profile else "update"

    with timeit(op, behavior):
        if commands.update(ui, repo, rev=checkoutrevision, clean=True):
            raise error.Abort(b"error updating")

    ui.write(b"updated to %s\n" % checkoutrevision)

    return None
def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
    heads = heads or []
    # newheads are parents of roots of scratch bundles that were requested
    newphases = {}
    scratchbundles = []
    newheads = []
    scratchheads = []
    nodestobundle = {}
    allbundlestocleanup = []
    try:
        for head in heads:
            if not repo.changelog.index.has_node(head):
                if head not in nodestobundle:
                    newbundlefile = common.downloadbundle(repo, head)
                    bundlepath = b"bundle:%s+%s" % (repo.root, newbundlefile)
                    bundlerepo = hg.repository(repo.ui, bundlepath)

                    allbundlestocleanup.append((bundlerepo, newbundlefile))
                    bundlerevs = set(_readbundlerevs(bundlerepo))
                    bundlecaps = _includefilelogstobundle(
                        bundlecaps, bundlerepo, bundlerevs, repo.ui
                    )
                    cl = bundlerepo.changelog
                    bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
                    for rev in bundlerevs:
                        node = cl.node(rev)
                        newphases[hex(node)] = str(phases.draft)
                        nodestobundle[node] = (
                            bundlerepo,
                            bundleroots,
                            newbundlefile,
                        )

                scratchbundles.append(
                    _generateoutputparts(head, *nodestobundle[head])
                )
                newheads.extend(bundleroots)
                scratchheads.append(head)
    finally:
        for bundlerepo, bundlefile in allbundlestocleanup:
            bundlerepo.close()
            try:
                os.unlink(bundlefile)
            except (IOError, OSError):
                # if we can't cleanup the file then just ignore the error,
                # no need to fail
                pass

    pullfrombundlestore = bool(scratchbundles)
    wrappedchangegrouppart = False
    wrappedlistkeys = False
    oldchangegrouppart = exchange.getbundle2partsmapping[b'changegroup']
    try:

        def _changegrouppart(bundler, *args, **kwargs):
            # Order is important here. First add non-scratch part
            # and only then add parts with scratch bundles because
            # non-scratch part contains parents of roots of scratch bundles.
            result = oldchangegrouppart(bundler, *args, **kwargs)
            for bundle in scratchbundles:
                for part in bundle:
                    bundler.addpart(part)
            return result

        exchange.getbundle2partsmapping[b'changegroup'] = _changegrouppart
        wrappedchangegrouppart = True

        def _listkeys(orig, self, namespace):
            origvalues = orig(self, namespace)
            if namespace == b'phases' and pullfrombundlestore:
                if origvalues.get(b'publishing') == b'True':
                    # Make repo non-publishing to preserve draft phase
                    del origvalues[b'publishing']
                origvalues.update(newphases)
            return origvalues

        extensions.wrapfunction(
            localrepo.localrepository, b'listkeys', _listkeys
        )
        wrappedlistkeys = True
        heads = list((set(newheads) | set(heads)) - set(scratchheads))
        result = orig(
            repo, source, heads=heads, bundlecaps=bundlecaps, **kwargs
        )
    finally:
        if wrappedchangegrouppart:
            exchange.getbundle2partsmapping[b'changegroup'] = oldchangegrouppart
        if wrappedlistkeys:
            extensions.unwrapfunction(
                localrepo.localrepository, b'listkeys', _listkeys
            )
    return result
Exemple #48
0
def _verify(oid, content):
    realoid = node.hex(hashlib.sha256(content).digest())
    if realoid != oid:
        raise LfsCorruptionError(_('detected corrupt lfs object: %s') % oid,
                                 hint=_('run hg verify'))
Exemple #49
0
 def dump(self, xw):
     xw.writeAttribute('root', hglib.tounicode(self._root))
     xw.writeAttribute('shortname', self.shortname())
     xw.writeAttribute('basenode', node.hex(self.basenode()))
Exemple #50
0
 def lookuprev(self, rev):
     try:
         return nodemod.hex(self.repo.lookup(rev))
     except (error.RepoError, error.LookupError):
         return None
Exemple #51
0
 def test_revid_bzr_to_foreign(self):
     self.assertEquals(
         "myrev",
         self.mapping.revision_id_bzr_to_foreign(self.mapping.revid_prefix +
                                                 ":" + hex("myrev"))[0])
Exemple #52
0
 def getancestors(self, name, node, known=None):
     self._fileservice.prefetch([(name, hex(node))],
                                force=True,
                                fetchdata=False,
                                fetchhistory=True)
     return self._shared.getancestors(name, node, known=known)
Exemple #53
0
 def getmeta(self, name, node):
     self._fileservice.prefetch([(name, hex(node))],
                                force=True,
                                fetchdata=True)
     return self._shared.getmeta(name, node)
Exemple #54
0
 def test_revid_foreign_to_bzr(self):
     self.assertEquals(self.mapping.revid_prefix + ":" + hex("a" * 20),
                       self.mapping.revision_id_foreign_to_bzr("a" * 20))
Exemple #55
0
 def write(self):
     f = self._repo.vfs('remoterefs', 'w', atomictemp=True)
     for ref in sorted(self):
         f.write('%s %s\n' % (hex(self[ref]), encoding.fromlocal(ref)))
     f.close()
Exemple #56
0
def comparison(web, req, tmpl):
    """
    /comparison/{revision}/{path}
    -----------------------------

    Show a comparison between the old and new versions of a file from changes
    made on a particular revision.

    This is similar to the ``diff`` handler. However, this form features
    a split or side-by-side diff rather than a unified diff.

    The ``context`` query string argument can be used to control the lines of
    context in the diff.

    The ``filecomparison`` template is rendered.
    """
    ctx = webutil.changectx(web.repo, req)
    if 'file' not in req.form:
        raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
    path = webutil.cleanpath(web.repo, req.form['file'][0])
    rename = path in ctx and webutil.renamelink(ctx[path]) or []

    parsecontext = lambda v: v == 'full' and -1 or int(v)
    if 'context' in req.form:
        context = parsecontext(req.form['context'][0])
    else:
        context = parsecontext(web.config('web', 'comparisoncontext', '5'))

    def filelines(f):
        if util.binary(f.data()):
            mt = mimetypes.guess_type(f.path())[0]
            if not mt:
                mt = 'application/octet-stream'
            return [_('(binary file %s, hash: %s)') % (mt, hex(f.filenode()))]
        return f.data().splitlines()

    parent = ctx.p1()
    leftrev = parent.rev()
    leftnode = parent.node()
    rightrev = ctx.rev()
    rightnode = ctx.node()
    if path in ctx:
        fctx = ctx[path]
        rightlines = filelines(fctx)
        if path not in parent:
            leftlines = ()
        else:
            pfctx = parent[path]
            leftlines = filelines(pfctx)
    else:
        rightlines = ()
        fctx = ctx.parents()[0][path]
        leftlines = filelines(fctx)

    comparison = webutil.compare(tmpl, context, leftlines, rightlines)
    return tmpl('filecomparison',
                file=path,
                node=hex(ctx.node()),
                rev=ctx.rev(),
                symrev=webutil.symrevorshortnode(req, ctx),
                date=ctx.date(),
                desc=ctx.description(),
                extra=ctx.extra(),
                author=ctx.user(),
                rename=rename,
                branch=webutil.nodebranchnodefault(ctx),
                parent=webutil.parents(fctx),
                child=webutil.children(fctx),
                tags=webutil.nodetagsdict(web.repo, ctx.node()),
                bookmarks=webutil.nodebookmarksdict(web.repo, ctx.node()),
                leftrev=leftrev,
                leftnode=hex(leftnode),
                rightrev=rightrev,
                rightnode=hex(rightnode),
                comparison=comparison)
Exemple #57
0
def getcachekey(reponame, file, id):
    pathhash = node.hex(hashlib.sha1(file).digest())
    return os.path.join(reponame, pathhash[:2], pathhash[2:], id)
Exemple #58
0
 def gitorhg(n):
     hn = self.repo.handler.map_hg_get(hex(n))
     if hn is not None:
         return bin(hn)
     return n
Exemple #59
0
 def check_heads():
     heads = map(hex, repo.heads())
     return their_heads == [hex('force')] or their_heads == heads
Exemple #60
0
 def get_tip(self):
     """
     Get the changeset id of the hg tip as a hex number.
     """
     return hex(self.repo.changelog.tip())