示例#1
0
def get_repo(url, alias):
    global peer

    myui = ui.ui()
    myui.setconfig('ui', 'interactive', 'off')
    myui.fout = sys.stderr

    if get_config_bool('remote-hg.insecure'):
        myui.setconfig('web', 'cacerts', '')

    extensions.loadall(myui)

    if hg.islocal(url) and not os.environ.get('GIT_REMOTE_HG_TEST_REMOTE'):
        repo = hg.repository(myui, url)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
    else:
        shared_path = os.path.join(gitdir, 'hg')

        # check and upgrade old organization
        hg_path = os.path.join(shared_path, '.hg')
        if os.path.exists(shared_path) and not os.path.exists(hg_path):
            repos = os.listdir(shared_path)
            for x in repos:
                local_hg = os.path.join(shared_path, x, 'clone', '.hg')
                if not os.path.exists(local_hg):
                    continue
                if not os.path.exists(hg_path):
                    shutil.move(local_hg, hg_path)
                shutil.rmtree(os.path.join(shared_path, x, 'clone'))

        # setup shared repo (if not there)
        try:
            hg.peer(myui, {}, shared_path, create=True)
        except error.RepoError:
            pass

        if not os.path.exists(dirname):
            os.makedirs(dirname)

        local_path = os.path.join(dirname, 'clone')
        if not os.path.exists(local_path):
            hg.share(myui, shared_path, local_path, update=False)
        else:
            # make sure the shared path is always up-to-date
            util.writefile(os.path.join(local_path, '.hg', 'sharedpath'), hg_path)

        repo = hg.repository(myui, local_path)
        try:
            peer = hg.peer(myui, {}, url)
        except:
            die('Repository error')
        repo.pull(peer, heads=None, force=True)

        updatebookmarks(repo, peer)

    return repo
示例#2
0
def _pullreviewidentifiers(repo, identifiers):
    """Pull down information for a list of review identifier strings.

    This will request the currently published data for a review identifier,
    including the mapping of commits to review request ids for all review
    requests that are currently part of the identifier.
    """
    reviews = repo.reviews

    # In the ideal world, we'd use RBTools to talk directly to the ReviewBoard
    # API. Unfortunately, the Mercurial distribution on Windows doesn't ship
    # with the json module. So, we proxy through the Mercurial server and have
    # it do all the heavy lifting.
    # FUTURE Hook up RBTools directly.
    remote = hg.peer(repo, {}, reviews.remoteurl)
    caps = getreviewcaps(remote)
    if 'pullreviews' not in caps:
        raise util.Abort('cannot pull code review metadata; '
                         'server lacks necessary features')

    req = commonrequestdict(repo.ui)
    req['identifiers'] = [str(i) for i in identifiers]
    res = calljsoncommand(repo.ui, remote, 'pullreviews', data=req)

    for rid, data in sorted(res['reviewrequests'].iteritems()):
        reviews.savereviewrequest(rid, data)

    return res['reviewrequests']
示例#3
0
文件: hg.py 项目: pombreda/bob
 def getTip(self):
     hg_ui = ui.ui()
     if hasattr(hg, 'peer'):
         repo = hg.peer(hg_ui, {}, self.uri)
     else:
         repo = hg.repository(hg_ui, self.uri)
     return short(repo.heads()[0])
示例#4
0
def nclone(ui, source, dest=None, **opts):
    '''make a copy of an existing repository and all nested repositories

    Create a copy of an existing repository in a new directory.

    Look at the help of clone command for more informations.'''
    origsource = ui.expandpath(source)
    remotesource, remotebranch = hg.parseurl(origsource, opts.get('branch'))
    if hasattr(hg, 'peer'):
        remoterepo = hg.peer(ui, opts, remotesource)
        localrepo = remoterepo.local()
        if localrepo:
            remoterepo = localrepo
    else:
        remoterepo = hg.repository(hg.remoteui(ui, opts), remotesource)
    if dest is None:
        dest = hg.defaultdest(source)
        ui.status(_("destination directory: %s\n") % dest)
    for npath in remoterepo.nested:
        if npath == '.':
            npath = ''
        u = util.url(source)
        if u.scheme:
            nsource = '%s/%s' % (source, npath)
        else:
            nsource = os.path.join(source, npath)
        ndest = os.path.join(dest, npath)
        ui.status('[%s]\n' % os.path.normpath(
            os.path.join(os.path.basename(dest),
                ndest[len(dest) + 1:])))
        commands.clone(ui, nsource, dest=ndest, **opts)
        ui.status('\n')
示例#5
0
def get_repo(remote):
    if not changegroup or experiment('wire'):
        if not changegroup and not check_enabled('no-mercurial'):
            logging.warning('Mercurial libraries not found. Falling back to '
                            'native access.')
        logging.warning(
            'Native access to mercurial repositories is experimental!')

        stream = HgRepoHelper.connect(remote.url)
        if stream:
            return bundlerepo(remote.url, stream)
        return HelperRepo(remote.url)

    if remote.parsed_url.scheme == 'file':
        # Make file://c:/... paths work by taking the netloc
        path = remote.parsed_url.netloc + remote.parsed_url.path
        if sys.platform == 'win32':
            # TODO: This probably needs more thought.
            path = path.lstrip('/')
        if not os.path.isdir(path):
            return bundlerepo(path)
    ui = get_ui()
    if changegroup and remote.parsed_url.scheme == 'file':
        repo = localpeer(ui, path)
    else:
        try:
            repo = hg.peer(ui, {}, remote.url)
        except (error.RepoError, urllib2.HTTPError, IOError):
            return bundlerepo(remote.url, HTTPReader(remote.url))

    assert repo.capable('getbundle')

    return repo
示例#6
0
def createrepomanifest(ui, repo, search=None, replace=None):
    """Create a manifest of available review repositories.

    The arguments define literal string search and replace values to use to
    convert assumed http(s):// repo URLs into ssh:// URLs.
    """
    repos = {}
    for url in getreposfromreviewboard(repo):
        peer = hg.peer(ui, {}, url)
        root = peer.lookup('0')
        # Filter out empty repos.
        if root == nullid:
            continue

        if not url.startswith(('http://', 'https://')):
            raise util.Abort('Expected http:// or https:// repo: %s' % url)

        sshurl = url.replace(search, replace)

        repos[root] = (url, sshurl)

    lines = []
    for root, (http, ssh) in sorted(repos.items()):
        lines.append('%s %s %s\n' % (hex(root), http, ssh))

    data = ''.join(lines)
    repo.vfs.write('reviewrepos', data)
    ui.write(data)
示例#7
0
def get_repo(remote):
    if remote.parsed_url.scheme == 'file':
        path = remote.parsed_url.path
        if sys.platform == 'win32':
            # TODO: This probably needs more thought.
            path = path.lstrip('/')
        if not os.path.isdir(path):
            return bundlerepo(path)
    if not changegroup or Git.config('cinnabar.experiments') == 'true':
        if not changegroup:
            logging.warning('Mercurial libraries not found. Falling back to '
                            'native access.')
        logging.warning(
            'Native access to mercurial repositories is experimental!')
        try:
            return HelperRepo(remote.url)
        except NoHelperException:
            raise Exception('Native access to mercurial repositories requires '
                            'the helper.')
    if changegroup and remote.parsed_url.scheme == 'file':
        repo = localpeer(get_ui(), path)
    else:
        repo = hg.peer(get_ui(), {}, remote.url)
    assert repo.capable('getbundle')

    return repo
示例#8
0
def _findbundle(repo, rev):
    """Returns the backup bundle that contains the given rev. If found, it
    returns the bundle peer and the full rev hash. If not found, it return None
    and the given rev value.
    """
    ui = repo.ui
    backuppath = repo.vfs.join("strip-backup")
    backups = filter(os.path.isfile, glob.glob(backuppath + "/*.hg"))
    backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
    for backup in backups:
        # Much of this is copied from the hg incoming logic
        source = os.path.relpath(backup, pycompat.getcwd())
        source = ui.expandpath(source)
        source, branches = hg.parseurl(source)
        other = hg.peer(repo, {}, source)

        quiet = ui.quiet
        try:
            ui.quiet = True
            ret = bundlerepo.getremotechanges(ui, repo, other, None, None, None)
            localother, chlist, cleanupfn = ret
            for node in chlist:
                if hex(node).startswith(rev):
                    return other, node
        except error.LookupError:
            continue
        finally:
            ui.quiet = quiet

    return None, rev
示例#9
0
def annotatepeer(repo):
    ui = repo.ui

    # fileservice belongs to remotefilelog
    fileservice = getattr(repo, 'fileservice', None)
    sharepeer = ui.configbool('fastannotate', 'clientsharepeer', True)

    if sharepeer and fileservice:
        ui.debug('fastannotate: using remotefilelog connection pool\n')
        conn = repo.connectionpool.get(repo.fallbackpath)
        peer = conn.peer
        stolen = True
    else:
        remotepath = ui.expandpath(
            ui.config('fastannotate', 'remotepath', 'default'))
        peer = hg.peer(ui, {}, remotepath)
        stolen = False

    try:
        # Note: fastannotate requests should never trigger a remotefilelog
        # "getfiles" request, because "getfiles" puts the stream into a state
        # that does not exit. See "clientfetch": it does "getannotate" before
        # any hg stuff that could potentially trigger a "getfiles".
        yield peer
    finally:
        if not stolen:
            for i in ['close', 'cleanup']:
                getattr(peer, i, lambda: None)()
        else:
            conn.__exit__(None, None, None)
示例#10
0
def gc(ui, *args, **opts):
    '''garbage collect the client and server filelog caches
    '''
    cachepaths = set()

    # get the system client cache
    systemcache = ui.config("remotefilelog", "cachepath")
    if systemcache:
        systemcache = util.expandpath(systemcache)
        cachepaths.add(systemcache)

    # get repo client and server cache
    repopaths = [ui.environ['PWD']]
    repopaths.extend(args)
    repos = []
    for repopath in repopaths:
        try:
            repo = hg.peer(ui, {}, repopath)
            repos.append(repo)

            repocache = repo.ui.config("remotefilelog", "cachepath")
            if repocache:
                repocache = util.expandpath(repocache)
                cachepaths.add(repocache)
        except error.RepoError:
            pass

    # gc client cache
    for cachepath in cachepaths:
        gcclient(ui, cachepath)

    # gc server cache
    for repo in repos:
        remotefilelogserver.gcserver(ui, repo._repo)
示例#11
0
    def build_repo(self, url):
        '''Make the Mercurial repo object self.repo available. If the local
        clone does not exist, clone it, otherwise, ensure it is fetched.'''
        myui = ui()
        myui.setconfig('ui', 'interactive', 'off')
        myui.setconfig('extensions', 'mq', '')

        local_path = self.remotedir.joinpath('clone')
        if not local_path.exists():
            try:
                self.peer, dstpeer = hg.clone(myui, {}, url.encode('utf-8'),
                    local_path.encode('utf-8'), update=False, pull=True)
            except (RepoError, Abort) as e:
                sys.stderr.write("abort: %s\n" % e)
                if e.hint:
                    sys.stderr.write("(%s)\n" % e.hint)
                sys.exit(-1)

            self.repo = dstpeer.local()
        else:
            self.repo = hg.repository(myui, local_path.encode('utf-8'))
            self.peer = hg.peer(myui, {}, url.encode('utf-8'))
            self.repo.pull(self.peer, heads=None, force=True)

        self.marks.upgrade_marks(self.repo)
示例#12
0
def cloneunified(ui, dest='gecko', **opts):
    """Clone main Mozilla repositories into a unified local repository.

    This command will clone the most common Mozilla repositories and will
    add changesets and remote tracking markers into a common repository.

    If the destination path is not given, 'gecko' will be used.

    This command is effectively an alias for a number of other commands.
    However, due to the way Mercurial internally stores data, it is recommended
    to run this command to ensure optimal storage of data.
    """
    path = ui.expandpath(dest)
    repo = hg.repository(ui, path, create=True)

    success = False

    try:
        for tree in ('esr17', 'b2g18', 'release', 'beta', 'aurora', 'central',
            'inbound'):
            peer = hg.peer(ui, {}, tree)
            ui.warn('Pulling from %s.\n' % peer.url())
            repo.pull(peer)
        res = hg.update(repo, repo.lookup('central/default'))
        success = True
        return res
    finally:
        if not success:
            shutil.rmtree(path)
示例#13
0
    def build_repo(self, url):
        '''Make the Mercurial repo object self.repo available. If the local
        clone does not exist, clone it, otherwise, ensure it is fetched.'''
        myui = ui()
        myui.setconfig('ui', 'interactive', 'off')
        myui.setconfig('extensions', 'mq', '')
        # FIXME: the following is a hack to achieve hg-git / remote-git compatibility
        # at least for *local* operations. still need to figure out what the right
        # thing to do is.
        myui.setconfig('phases', 'publish', False)

        local_path = self.remotedir.joinpath('clone')
        if not local_path.exists():
            try:
                self.peer, dstpeer = hg.clone(myui, {}, url.encode('utf-8'),
                    local_path.encode('utf-8'), update=False, pull=True)
            except (RepoError, Abort) as e:
                sys.stderr.write("abort: %s\n" % e)
                if e.hint:
                    sys.stderr.write("(%s)\n" % e.hint)
                sys.exit(-1)

            self.repo = dstpeer.local()
        else:
            self.repo = hg.repository(myui, local_path.encode('utf-8'))
            self.peer = hg.peer(myui, {}, url.encode('utf-8'))
            hg_pull(self.repo, self.peer, None, True)

        self.marks.upgrade_marks(self)
示例#14
0
def _lookup_node(repo, hexnode, from_scm_type):
    gitlookupnode = '_gitlookup_%s_%s' % (from_scm_type, hexnode)

    # ui.expandpath('default') returns 'default' if there is no default
    # path. This can be the case when command is ran on the server.
    # In that case let's run lookup() command locally.
    try:
        return repo.lookup(gitlookupnode)
    except error.RepoLookupError:
        # Note: RepoLookupError is caught here because repo.lookup()
        # can throw only this exception.
        peerpath = repo.ui.expandpath('default')

        # sshing can cause junk 'remote: ...' output to stdout, so we need to
        # redirect it temporarily so automation can parse the result easily.
        oldfout = repo.ui.fout
        try:
            repo.baseui.fout = repo.ui.ferr
            remoterepo = hg.peer(repo, {}, peerpath)
            return remoterepo.lookup(gitlookupnode)
        except error.RepoError:
            # Note: RepoError can be thrown by hg.peer(), RepoLookupError
            # can be thrown by remoterepo.lookup(). RepoLookupError is a
            # subclass of RepoError so catching just error.RepoError is enough.
            return None
        finally:
            repo.baseui.fout = oldfout
示例#15
0
    def build_repo(self, url):
        """Make the Mercurial repo object self.repo available. If the local
        clone does not exist, clone it, otherwise, ensure it is fetched."""
        myui = ui()
        myui.setconfig("ui", "interactive", "off")
        myui.setconfig("extensions", "mq", "")
        # FIXME: the following is a hack to achieve hg-git / remote-git compatibility
        # at least for *local* operations. still need to figure out what the right
        # thing to do is.
        myui.setconfig("phases", "publish", False)

        local_path = self.remotedir.joinpath("clone")
        if not local_path.exists():
            try:
                self.peer, dstpeer = hg.clone(
                    myui, {}, url.encode("utf-8"), local_path.encode("utf-8"), update=False, pull=True
                )
            except (RepoError, Abort) as e:
                sys.stderr.write("abort: %s\n" % e)
                if e.hint:
                    sys.stderr.write("(%s)\n" % e.hint)
                sys.exit(-1)

            self.repo = dstpeer.local()
        else:
            self.repo = hg.repository(myui, local_path.encode("utf-8"))
            self.peer = hg.peer(myui, {}, url.encode("utf-8"))
            self.repo.pull(self.peer, heads=None, force=True)

        self.marks.upgrade_marks(self)
示例#16
0
def getpeer(ui, opts, source):
    try:
        peer = hg.peer(ui, opts, source)
        # ewwww, life without an API is messy
        if isinstance(peer, localrepo.localpeer):
            peer = localrepo.locallegacypeer(peer._repo)
        return peer
    except AttributeError:
        return hg.repository(ui, source)
示例#17
0
def find_pull_peer(repo, opts, source):
    source, branches = hg.parseurl(repo.ui.expandpath(source), opts.get('branch'))
    try:
        return hg.peer(repo, opts, source)
    except error.RepoError:
        if source == "default":
            return
        else:
            raise
示例#18
0
def identify(originalIdentify, ui, repo, *pats, **opts):
    if not opts["list"]:
        return originalIdentify(ui, repo, *pats, **opts)
    else:
        peer = hg.peer(ui, {}, pats[0])
        for name, rev in peer.branchmap().items():
            info = name
            for r in rev:
                info += ' ' + node.short(r)
            print(info)
示例#19
0
def gcclient(ui, cachepath):
    # get list of repos that use this cache
    repospath = os.path.join(cachepath, 'repos')
    if not os.path.exists(repospath):
        ui.warn("no known cache at %s\n" % cachepath)
        return

    reposfile = open(repospath, 'r')
    repos = set([r[:-1] for r in reposfile.readlines()])
    reposfile.close()

    # build list of useful files
    validrepos = []
    keepkeys = set()

    _analyzing = _("analyzing repositories")

    localcache = None

    count = 0
    for path in repos:
        ui.progress(_analyzing, count, unit="repos", total=len(repos))
        count += 1
        path = ui.expandpath(path)
        try:
            peer = hg.peer(ui, {}, path)
        except error.RepoError:
            continue

        validrepos.append(path)

        reponame = peer._repo.name
        if not localcache:
            localcache = peer._repo.fileservice.localcache
        keep = peer._repo.revs("(parents(draft()) + heads(all())) & public()")
        for r in keep:
            m = peer._repo[r].manifest()
            for filename, filenode in m.iteritems():
                key = fileserverclient.getcachekey(reponame, filename,
                    hex(filenode))
                keepkeys.add(key)

    ui.progress(_analyzing, None)

    # write list of valid repos back
    oldumask = os.umask(0o002)
    try:
        reposfile = open(repospath, 'w')
        reposfile.writelines([("%s\n" % r) for r in validrepos])
        reposfile.close()
    finally:
        os.umask(oldumask)

    # prune cache
    localcache.gc(keepkeys)
示例#20
0
def find_push_peer(repo, opts, dest):
    dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
    dest, branches = hg.parseurl(dest, opts.get('branch'))

    try:
        return hg.peer(repo, opts, dest)
    except error.RepoError:
        if dest == "default-push":
            return
        else:
            raise
示例#21
0
def get_repo(url):
    parsed_url = urlparse(url)
    if not parsed_url.scheme:
        url = urlunparse(('file', '', parsed_url.path, '', '', ''))
    ui_ = ui.ui()
    ui_.fout = ui_.ferr
    if (not parsed_url.scheme or parsed_url.scheme == 'file') and \
            not os.path.isdir(parsed_url.path):
        return bundlerepo(parsed_url.path)
    else:
        repo = hg.peer(ui_, {}, url)
        assert repo.capable('getbundle')
        return repo
示例#22
0
def get_repo(url):
    parsed_url = munge_url(url)
    if parsed_url.scheme == 'file':
        path = parsed_url.path
        if sys.platform == 'win32':
            # TODO: This probably needs more thought.
            path = path.lstrip('/')
        if not os.path.isdir(path):
            return bundlerepo(path)
    url = urlunparse(parsed_url)
    repo = hg.peer(get_ui(), {}, url)
    assert repo.capable('getbundle')
    return repo
示例#23
0
def remoteparent(ui, repo, opts, rev, upstream=None):
    if upstream:
        remotepath = ui.expandpath(upstream)
    else:
        remotepath = ui.expandpath(ui.expandpath('reviewboard', 'default-push'),
                                   'default')
    remoterepo = hg.peer(repo, opts, remotepath)
    out = findoutgoing(repo, remoterepo)
    ancestors = repo.changelog.ancestors([repo.changelog.rev(repo.lookup(rev))])
    for o in out:
        orev = repo[o]
        a, b, c = repo.changelog.nodesbetween([orev.node()], [repo[rev].node()])
        if a:
            return orev.parents()[0]
示例#24
0
 def test(ui, repo, dest=None, **opts):
     dest = ui.expandpath(dest or 'default-push', dest or 'default')
     dest, branches = hg.parseurl(dest, opts.get('branch'))
     revs, checkout = hg.addbranchrevs(repo, repo, branches,
             opts.get('rev'))
     if hasattr(hg, 'peer'):
         other = hg.peer(ui, opts, dest)
         localother = other.local()
         if localother:
             other = localother
     else:
         other = hg.repository(hg.remoteui(repo, opts), dest)
     if revs:
         revs = [other.lookup(rev) for rev in revs]
示例#25
0
 def getoutgoing(dest, revs):
     '''Return the revisions present locally but not in dest'''
     dest = ui.expandpath(dest or 'default-push', dest or 'default')
     dest, branches = hg.parseurl(dest)
     revs, checkout = hg.addbranchrevs(repo, repo, branches, revs)
     other = hg.peer(repo, opts, dest)
     ui.status(_('comparing with %s\n') % util.hidepassword(dest))
     common, _anyinc, _heads = discovery.findcommonincoming(repo, other)
     nodes = revs and map(repo.lookup, revs) or revs
     o = repo.changelog.findmissing(common, heads=nodes)
     if not o:
         ui.status(_("no changes found\n"))
         return []
     return [str(repo.changelog.rev(r)) for r in o]
示例#26
0
def _pullreviewidentifiers(repo, identifiers):
    """Pull down information for a list of review identifier strings.

    This will request the currently published data for a review identifier,
    including the mapping of commits to review request ids for all review
    requests that are currently part of the identifier.
    """
    reviews = repo.reviews

    # In the ideal world, we'd use RBTools to talk directly to the ReviewBoard
    # API. Unfortunately, the Mercurial distribution on Windows doesn't ship
    # with the json module. So, we proxy through the Mercurial server and have
    # it do all the heavy lifting.
    # FUTURE Hook up RBTools directly.
    remote = hg.peer(repo, {}, reviews.remoteurl)
    caps = getreviewcaps(remote)
    if 'pullreviews' not in caps:
        raise util.Abort('cannot pull code review metadata; '
                         'server lacks necessary features')

    lines = commonrequestlines(repo.ui)
    for identifier in identifiers:
        lines.append('reviewid %s' % identifier)

    res = remote._call('pullreviews', data='\n'.join(lines))
    lines = getpayload(res)

    reviewdata = {}

    for line in lines:
        t, d = line.split(' ', 1)

        if t == 'parentreview':
            identifier, parentid = map(urllib.unquote, d.split(' ', 2))
            reviewdata[parentid] = {}
        elif t == 'csetreview':
            parentid, node, rid = map(urllib.unquote, d.split(' ', 3))
            reviewdata[rid] = {}
        elif t == 'reviewdata':
            rid, field, value = map(urllib.unquote, d.split(' ', 3))
            reviewdata.setdefault(rid, {})[field] = decodepossiblelistvalue(value)
        elif t == 'error':
            raise util.Abort(d)
        else:
            raise util.Abort(_('unknown value in response payload: %s') % t)

    for rid, data in reviewdata.iteritems():
        reviews.savereviewrequest(rid, data)

    return reviewdata
def main(args):
    parser = argparse.ArgumentParser()
    parser.add_argument('url', help='URL of Mercurial repo to push to')
    parser.add_argument('commit', help='Git commit to push')
    parser.add_argument('--config-file', action='append',
                        help='Extra Mercurial config file to load')

    args = parser.parse_args(args)

    url = args.url
    commit = args.commit

    init_logging()

    if passwordmgr:
        repo = get_repo(Remote('hg::%s' % url, url))
    else:
        from mercurial import hg

        ui = get_ui()

        for p in args.config_file or []:
            ui.readconfig(p, trust=True)

        repo = hg.peer(ui, {}, url)

    heads = (hexlify(h) for h in repo.heads())
    store = PushStore()
    pushed = push(repo, store, {commit: (None, False)}, heads, ())

    commits = []
    if pushed:
        for commit in pushed.iternodes():
            changeset = store.hg_changeset(commit)
            ref = store.changeset_ref(changeset)
            new_data = type(ref) != str

            commits.append([commit, changeset, new_data])

    # By now, cinnabar or its subprocesses should not be writing anything to
    # either stdout or stderr. Ensure stderr is flushed for _this_ process,
    # since git-mozreview uses the same file descriptor for both stdout and
    # stderr, and we want to try to avoid mixed output.
    sys.stderr.flush()
    for commit, changeset, new_data in commits:
        print('>result>', commit, changeset, new_data)
    sys.stdout.flush()

    return 0
示例#28
0
    def __init__(self, path, origin_uri, quiet=True, noisy_clone=False, show_output=False):
        super(HgRepository, self).__init__(path, quiet=quiet, noisy_clone=noisy_clone)
        self.show_output = show_output

        self.remotes = {'default': origin_uri}
        self.remote = hg.peer(ui.ui(), {}, origin_uri)
        self.repo = None
        self.repoExists = False

        checkHg()
        self._connectRepo()

        self.current_branch = None
        self.current_rev = None
        self.remote_rev = None
def _pullreviewidentifiers(repo, identifiers):
    """Pull down information for a list of review identifier strings.

    This will request the currently published data for a review identifier,
    including the mapping of commits to review request ids for all review
    requests that are currently part of the identifier.
    """
    reviews = repo.reviews

    # In the ideal world, we'd use RBTools to talk directly to the ReviewBoard
    # API. Unfortunately, the Mercurial distribution on Windows doesn't ship
    # with the json module. So, we proxy through the Mercurial server and have
    # it do all the heavy lifting.
    # FUTURE Hook up RBTools directly.
    remote = hg.peer(repo, {}, reviews.remoteurl)
    remote.requirecap('pullreviews', _('obtain code reviews'))

    lines = ['1']
    for identifier in identifiers:
        lines.append('reviewid %s' % identifier)

    res = remote._call('pullreviews', data='\n'.join(lines))

    version = _verifyresponseversion(res)
    assert version == 1

    lines = res.split('\n')[1:]
    reviewdata = {}

    for line in lines:
        t, d = line.split(' ', 1)

        if t == 'parentreview':
            identifier, parentid = map(urllib.unquote, d.split(' ', 2))
            reviewdata[parentid] = {}
        elif t == 'csetreview':
            parentid, node, rid = map(urllib.unquote, d.split(' ', 3))
            reviewdata[rid] = {}
        elif t == 'reviewdata':
            rid, field, value = map(urllib.unquote, d.split(' ', 3))
            reviewdata.setdefault(rid, {})[field] = value
        else:
            raise util.Abort(_('unknown value in response payload: %s') % t)

    for rid, data in reviewdata.iteritems():
        reviews.savereviewrequest(rid, data)

    return reviewdata
示例#30
0
    def build_repo(self, url):
        '''Make the Mercurial repo object self.repo available. If the local
        clone does not exist, clone it, otherwise, ensure it is fetched.'''
        myui = ui()
        myui.setconfig('ui', 'interactive', 'off')
        myui.setconfig('extensions', 'mq', '')

        local_path = self.remotedir.joinpath('clone')
        if not local_path.exists():
            self.peer, dstpeer = hg.clone(myui, {}, url.encode('utf-8'),
                local_path.encode('utf-8'), update=False, pull=True)
            self.repo = dstpeer.local()
        else:
            self.repo = hg.repository(myui, local_path.encode('utf-8'))
            self.peer = hg.peer(myui, {}, url.encode('utf-8'))
            self.repo.pull(self.peer, heads=None, force=True)
示例#31
0
#!/usr/bin/env python
# -*- coding: utf-8; -*-

from __future__ import print_function

import sys
import getopt

from mercurial import ui, hg, node

opts, args = getopt.getopt(sys.argv[1:], 'r:m:')
opts = dict(opts)
repo = opts['-r']

branches = []
peer = hg.peer(ui.ui(), {}, repo)

for name, rev in peer.branchmap().items():
    branches.append((name, node.short(rev[0])))

# This can be read back with ast.literal_eval().
print(repr(branches))
示例#32
0
def _pull(orig, ui, repo, source=b"default", **opts):
    opts = pycompat.byteskwargs(opts)
    # Copy paste from `pull` command
    source, branches = urlutil.get_unique_pull_path(
        b"infinite-push's pull",
        repo,
        ui,
        source,
        default_branches=opts.get(b'branch'),
    )

    scratchbookmarks = {}
    unfi = repo.unfiltered()
    unknownnodes = []
    for rev in opts.get(b'rev', []):
        if rev not in unfi:
            unknownnodes.append(rev)
    if opts.get(b'bookmark'):
        bookmarks = []
        revs = opts.get(b'rev') or []
        for bookmark in opts.get(b'bookmark'):
            if _scratchbranchmatcher(bookmark):
                # rev is not known yet
                # it will be fetched with listkeyspatterns next
                scratchbookmarks[bookmark] = b'REVTOFETCH'
            else:
                bookmarks.append(bookmark)

        if scratchbookmarks:
            other = hg.peer(repo, opts, source)
            try:
                fetchedbookmarks = other.listkeyspatterns(
                    b'bookmarks', patterns=scratchbookmarks
                )
                for bookmark in scratchbookmarks:
                    if bookmark not in fetchedbookmarks:
                        raise error.Abort(
                            b'remote bookmark %s not found!' % bookmark
                        )
                    scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
                    revs.append(fetchedbookmarks[bookmark])
            finally:
                other.close()
        opts[b'bookmark'] = bookmarks
        opts[b'rev'] = revs

    if scratchbookmarks or unknownnodes:
        # Set anyincoming to True
        extensions.wrapfunction(
            discovery, b'findcommonincoming', _findcommonincoming
        )
    try:
        # Remote scratch bookmarks will be deleted because remotenames doesn't
        # know about them. Let's save it before pull and restore after
        remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
        result = orig(ui, repo, source, **pycompat.strkwargs(opts))
        # TODO(stash): race condition is possible
        # if scratch bookmarks was updated right after orig.
        # But that's unlikely and shouldn't be harmful.
        if common.isremotebooksenabled(ui):
            remotescratchbookmarks.update(scratchbookmarks)
            _saveremotebookmarks(repo, remotescratchbookmarks, source)
        else:
            _savelocalbookmarks(repo, scratchbookmarks)
        return result
    finally:
        if scratchbookmarks:
            extensions.unwrapfunction(discovery, b'findcommonincoming')
示例#33
0
def gcclient(ui, cachepath):
    # get list of repos that use this cache
    repospath = os.path.join(cachepath, b'repos')
    if not os.path.exists(repospath):
        ui.warn(_(b"no known cache at %s\n") % cachepath)
        return

    reposfile = open(repospath, b'rb')
    repos = {r[:-1] for r in reposfile.readlines()}
    reposfile.close()

    # build list of useful files
    validrepos = []
    keepkeys = set()

    sharedcache = None
    filesrepacked = False

    count = 0
    progress = ui.makeprogress(_(b"analyzing repositories"),
                               unit=b"repos",
                               total=len(repos))
    for path in repos:
        progress.update(count)
        count += 1
        try:
            path = ui.expandpath(os.path.normpath(path))
        except TypeError as e:
            ui.warn(_(b"warning: malformed path: %r:%s\n") % (path, e))
            traceback.print_exc()
            continue
        try:
            peer = hg.peer(ui, {}, path)
            repo = peer._repo
        except error.RepoError:
            continue

        validrepos.append(path)

        # Protect against any repo or config changes that have happened since
        # this repo was added to the repos file. We'd rather this loop succeed
        # and too much be deleted, than the loop fail and nothing gets deleted.
        if not isenabled(repo):
            continue

        if not util.safehasattr(repo, b'name'):
            ui.warn(
                _(b"repo %s is a misconfigured remotefilelog repo\n") % path)
            continue

        # If garbage collection on repack and repack on hg gc are enabled
        # then loose files are repacked and garbage collected.
        # Otherwise regular garbage collection is performed.
        repackonhggc = repo.ui.configbool(b'remotefilelog', b'repackonhggc')
        gcrepack = repo.ui.configbool(b'remotefilelog', b'gcrepack')
        if repackonhggc and gcrepack:
            try:
                repackmod.incrementalrepack(repo)
                filesrepacked = True
                continue
            except (IOError, repackmod.RepackAlreadyRunning):
                # If repack cannot be performed due to not enough disk space
                # continue doing garbage collection of loose files w/o repack
                pass

        reponame = repo.name
        if not sharedcache:
            sharedcache = repo.sharedstore

        # Compute a keepset which is not garbage collected
        def keyfn(fname, fnode):
            return fileserverclient.getcachekey(reponame, fname, hex(fnode))

        keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)

    progress.complete()

    # write list of valid repos back
    oldumask = os.umask(0o002)
    try:
        reposfile = open(repospath, b'wb')
        reposfile.writelines([(b"%s\n" % r) for r in validrepos])
        reposfile.close()
    finally:
        os.umask(oldumask)

    # prune cache
    if sharedcache is not None:
        sharedcache.gc(keepkeys)
    elif not filesrepacked:
        ui.warn(_(b"warning: no valid repos in repofile\n"))
示例#34
0
    commands,
    hg,
    ui as uimod,
    util,
)

TESTDIR = os.environ["TESTDIR"]
BUNDLEPATH = os.path.join(TESTDIR, 'bundles', 'test-no-symlinks.hg')

# only makes sense to test on os which supports symlinks
if not getattr(os, "symlink", False):
    sys.exit(80)  # SKIPPED_STATUS defined in run-tests.py

u = uimod.ui.load()
# hide outer repo
hg.peer(u, {}, '.', create=True)

# clone with symlink support
hg.clone(u, {}, BUNDLEPATH, 'test0')

repo = hg.repository(u, 'test0')

# wait a bit, or the status call wont update the dirstate
time.sleep(1)
commands.status(u, repo)


# now disable symlink support -- this is what os.symlink would do on a
# non-symlink file system
def symlink_failure(src, dst):
    raise OSError(1, "Operation not permitted")
示例#35
0
    def request(self, fileids):
        """Takes a list of filename/node pairs and fetches them from the
        server. Files are stored in the local cache.
        A list of nodes that the server couldn't find is returned.
        If the connection fails, an exception is raised.
        """
        if not self.remotecache.connected:
            self.connect()
        cache = self.remotecache
        localcache = self.localcache

        repo = self.repo
        count = len(fileids)
        request = "get\n%d\n" % count
        idmap = {}
        reponame = repo.name
        for file, id in fileids:
            fullid = getcachekey(reponame, file, id)
            request += fullid + "\n"
            idmap[fullid] = file

        cache.request(request)

        missing = []
        total = count
        self.ui.progress(_downloading, 0, total=count)

        fallbackpath = repo.fallbackpath

        missed = []
        count = 0
        while True:
            missingid = cache.receiveline()
            if not missingid:
                missedset = set(missed)
                for missingid in idmap.iterkeys():
                    if not missingid in missedset:
                        missed.append(missingid)
                self.ui.warn(
                    _("warning: cache connection closed early - " +
                      "falling back to server\n"))
                break
            if missingid == "0":
                break
            if missingid.startswith("_hits_"):
                # receive progress reports
                parts = missingid.split("_")
                count += int(parts[2])
                self.ui.progress(_downloading, count, total=total)
                continue

            missed.append(missingid)

        global fetchmisses
        fetchmisses += len(missed)

        count = total - len(missed)
        self.ui.progress(_downloading, count, total=total)

        oldumask = os.umask(0o002)
        try:
            # receive cache misses from master
            if missed:
                verbose = self.ui.verbose
                try:
                    # When verbose is true, sshpeer prints 'running ssh...'
                    # to stdout, which can interfere with some command
                    # outputs
                    self.ui.verbose = False

                    if not fallbackpath:
                        raise util.Abort(
                            "no remotefilelog server configured - "
                            "is your .hg/hgrc trusted?")
                    remote = hg.peer(self.ui, {}, fallbackpath)
                    remote._callstream("getfiles")
                finally:
                    self.ui.verbose = verbose

                i = 0
                while i < len(missed):
                    # issue a batch of requests
                    start = i
                    end = min(len(missed), start + 10000)
                    i = end
                    for missingid in missed[start:end]:
                        # issue new request
                        versionid = missingid[-40:]
                        file = idmap[missingid]
                        sshrequest = "%s%s\n" % (versionid, file)
                        remote.pipeo.write(sshrequest)
                    remote.pipeo.flush()

                    # receive batch results
                    for j in range(start, end):
                        self.receivemissing(remote.pipei, missed[j])
                        count += 1
                        self.ui.progress(_downloading, count, total=total)

                remote.cleanup()
                remote = None

                # send to memcache
                count = len(missed)
                request = "set\n%d\n%s\n" % (count, "\n".join(missed))
                cache.request(request)

            self.ui.progress(_downloading, None)

            # mark ourselves as a user of this cache
            localcache.markrepo()
        finally:
            os.umask(oldumask)

        return missing
示例#36
0
def _docheckout(ui,
                url,
                dest,
                upstream,
                revision,
                branch,
                purge,
                sharebase,
                optimes,
                networkattemptlimit,
                networkattempts=None,
                sparse_profile=None):
    if not networkattempts:
        networkattempts = [1]

    def callself():
        return _docheckout(ui,
                           url,
                           dest,
                           upstream,
                           revision,
                           branch,
                           purge,
                           sharebase,
                           optimes,
                           networkattemptlimit,
                           networkattempts=networkattempts,
                           sparse_profile=sparse_profile)

    @contextlib.contextmanager
    def timeit(op):
        errored = False
        try:
            start = time.time()
            yield
        except Exception:
            errored = True
            raise
        finally:
            elapsed = time.time() - start

            if errored:
                op += '_errored'

            optimes.append((op, elapsed))

    ui.write('ensuring %s@%s is available at %s\n' %
             (url, revision or branch, dest))

    # We assume that we're the only process on the machine touching the
    # repository paths that we were told to use. This means our recovery
    # scenario when things aren't "right" is to just nuke things and start
    # from scratch. This is easier to implement than verifying the state
    # of the data and attempting recovery. And in some scenarios (such as
    # potential repo corruption), it is probably faster, since verifying
    # repos can take a while.

    destvfs = getvfs()(dest, audit=False, realpath=True)

    def deletesharedstore(path=None):
        storepath = path or destvfs.read('.hg/sharedpath').strip()
        if storepath.endswith('.hg'):
            storepath = os.path.dirname(storepath)

        storevfs = getvfs()(storepath, audit=False)
        storevfs.rmtree(forcibly=True)

    if destvfs.exists() and not destvfs.exists('.hg'):
        raise error.Abort('destination exists but no .hg directory')

    # Refuse to enable sparse checkouts on existing checkouts. The reasoning
    # here is that another consumer of this repo may not be sparse aware. If we
    # enabled sparse, we would lock them out.
    if destvfs.exists(
    ) and sparse_profile and not destvfs.exists('.hg/sparse'):
        raise error.Abort(
            'cannot enable sparse profile on existing '
            'non-sparse checkout',
            hint='use a separate working directory to use sparse')

    # And the other direction for symmetry.
    if not sparse_profile and destvfs.exists('.hg/sparse'):
        raise error.Abort(
            'cannot use non-sparse checkout on existing sparse '
            'checkout',
            hint='use a separate working directory to use sparse')

    # Require checkouts to be tied to shared storage because efficiency.
    if destvfs.exists('.hg') and not destvfs.exists('.hg/sharedpath'):
        ui.warn('(destination is not shared; deleting)\n')
        with timeit('remove_unshared_dest'):
            destvfs.rmtree(forcibly=True)

    # Verify the shared path exists and is using modern pooled storage.
    if destvfs.exists('.hg/sharedpath'):
        storepath = destvfs.read('.hg/sharedpath').strip()

        ui.write('(existing repository shared store: %s)\n' % storepath)

        if not os.path.exists(storepath):
            ui.warn('(shared store does not exist; deleting destination)\n')
            with timeit('removed_missing_shared_store'):
                destvfs.rmtree(forcibly=True)
        elif not re.search('[a-f0-9]{40}/\.hg$', storepath.replace('\\', '/')):
            ui.warn('(shared store does not belong to pooled storage; '
                    'deleting destination to improve efficiency)\n')
            with timeit('remove_unpooled_store'):
                destvfs.rmtree(forcibly=True)

    if destvfs.isfileorlink('.hg/wlock'):
        ui.warn('(dest has an active working directory lock; assuming it is '
                'left over from a previous process and that the destination '
                'is corrupt; deleting it just to be sure)\n')
        with timeit('remove_locked_wdir'):
            destvfs.rmtree(forcibly=True)

    def handlerepoerror(e):
        if e.message == _('abandoned transaction found'):
            ui.warn('(abandoned transaction found; trying to recover)\n')
            repo = hg.repository(ui, dest)
            if not repo.recover():
                ui.warn('(could not recover repo state; '
                        'deleting shared store)\n')
                with timeit('remove_unrecovered_shared_store'):
                    deletesharedstore()

            ui.warn('(attempting checkout from beginning)\n')
            return callself()

        raise

    # At this point we either have an existing working directory using
    # shared, pooled storage or we have nothing.

    def handlenetworkfailure():
        if networkattempts[0] >= networkattemptlimit:
            raise error.Abort('reached maximum number of network attempts; '
                              'giving up\n')

        ui.warn('(retrying after network failure on attempt %d of %d)\n' %
                (networkattempts[0], networkattemptlimit))

        # Do a backoff on retries to mitigate the thundering herd
        # problem. This is an exponential backoff with a multipler
        # plus random jitter thrown in for good measure.
        # With the default settings, backoffs will be:
        # 1) 2.5 - 6.5
        # 2) 5.5 - 9.5
        # 3) 11.5 - 15.5
        backoff = (2**networkattempts[0] - 1) * 1.5
        jittermin = ui.configint('robustcheckout', 'retryjittermin', 1000)
        jittermax = ui.configint('robustcheckout', 'retryjittermax', 5000)
        backoff += float(random.randint(jittermin, jittermax)) / 1000.0
        ui.warn('(waiting %.2fs before retry)\n' % backoff)
        time.sleep(backoff)

        networkattempts[0] += 1

    def handlepullerror(e):
        """Handle an exception raised during a pull.

        Returns True if caller should call ``callself()`` to retry.
        """
        if isinstance(e, error.Abort):
            if e.args[0] == _('repository is unrelated'):
                ui.warn('(repository is unrelated; deleting)\n')
                destvfs.rmtree(forcibly=True)
                return True
            elif e.args[0].startswith(_('stream ended unexpectedly')):
                ui.warn('%s\n' % e.args[0])
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        elif isinstance(e, ssl.SSLError):
            # Assume all SSL errors are due to the network, as Mercurial
            # should convert non-transport errors like cert validation failures
            # to error.Abort.
            ui.warn('ssl error: %s\n' % e)
            handlenetworkfailure()
            return True
        elif isinstance(e, urllib2.URLError):
            if isinstance(e.reason, socket.error):
                ui.warn('socket error: %s\n' % e.reason)
                handlenetworkfailure()
                return True
            else:
                ui.warn('unhandled URLError; reason type: %s; value: %s' %
                        (e.reason.__class__.__name__, e.reason))
        else:
            ui.warn('unhandled exception during network operation; type: %s; '
                    'value: %s' % (e.__class__.__name__, e))

        return False

    # Perform sanity checking of store. We may or may not know the path to the
    # local store. It depends if we have an existing destvfs pointing to a
    # share. To ensure we always find a local store, perform the same logic
    # that Mercurial's pooled storage does to resolve the local store path.
    cloneurl = upstream or url

    try:
        clonepeer = hg.peer(ui, {}, cloneurl)
        rootnode = clonepeer.lookup('0')
    except error.RepoLookupError:
        raise error.Abort('unable to resolve root revision from clone '
                          'source')
    except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
        if handlepullerror(e):
            return callself()
        raise

    if rootnode == nullid:
        raise error.Abort('source repo appears to be empty')

    storepath = os.path.join(sharebase, hex(rootnode))
    storevfs = getvfs()(storepath, audit=False)

    if storevfs.isfileorlink('.hg/store/lock'):
        ui.warn('(shared store has an active lock; assuming it is left '
                'over from a previous process and that the store is '
                'corrupt; deleting store and destination just to be '
                'sure)\n')
        if destvfs.exists():
            with timeit('remove_dest_active_lock'):
                destvfs.rmtree(forcibly=True)

        with timeit('remove_shared_store_active_lock'):
            storevfs.rmtree(forcibly=True)

    if storevfs.exists() and not storevfs.exists('.hg/requires'):
        ui.warn('(shared store missing requires file; this is a really '
                'odd failure; deleting store and destination)\n')
        if destvfs.exists():
            with timeit('remove_dest_no_requires'):
                destvfs.rmtree(forcibly=True)

        with timeit('remove_shared_store_no_requires'):
            storevfs.rmtree(forcibly=True)

    if storevfs.exists('.hg/requires'):
        requires = set(storevfs.read('.hg/requires').splitlines())
        # FUTURE when we require generaldelta, this is where we can check
        # for that.
        required = {'dotencode', 'fncache'}

        missing = required - requires
        if missing:
            ui.warn('(shared store missing requirements: %s; deleting '
                    'store and destination to ensure optimal behavior)\n' %
                    ', '.join(sorted(missing)))
            if destvfs.exists():
                with timeit('remove_dest_missing_requires'):
                    destvfs.rmtree(forcibly=True)

            with timeit('remove_shared_store_missing_requires'):
                storevfs.rmtree(forcibly=True)

    created = False

    if not destvfs.exists():
        # Ensure parent directories of destination exist.
        # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
        if util.safehasattr(util, 'ensuredirs'):
            makedirs = util.ensuredirs
        else:
            makedirs = util.makedirs

        makedirs(os.path.dirname(destvfs.base), notindexed=True)
        makedirs(sharebase, notindexed=True)

        if upstream:
            ui.write('(cloning from upstream repo %s)\n' % upstream)

        try:
            with timeit('clone'):
                shareopts = {'pool': sharebase, 'mode': 'identity'}
                res = hg.clone(ui, {},
                               clonepeer,
                               dest=dest,
                               update=False,
                               shareopts=shareopts)
        except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' %
                    e.message)
            with timeit('remove_shared_store_revlogerror'):
                deletesharedstore()
            return callself()

        # TODO retry here.
        if res is None:
            raise error.Abort('clone failed')

        # Verify it is using shared pool storage.
        if not destvfs.exists('.hg/sharedpath'):
            raise error.Abort('clone did not create a shared repo')

        created = True

    # The destination .hg directory should exist. Now make sure we have the
    # wanted revision.

    repo = hg.repository(ui, dest)

    # We only pull if we are using symbolic names or the requested revision
    # doesn't exist.
    havewantedrev = False
    if revision and revision in repo:
        ctx = repo[revision]

        if not ctx.hex().startswith(revision):
            raise error.Abort('--revision argument is ambiguous',
                              hint='must be the first 12+ characters of a '
                              'SHA-1 fragment')

        checkoutrevision = ctx.hex()
        havewantedrev = True

    if not havewantedrev:
        ui.write('(pulling to obtain %s)\n' % (revision or branch, ))

        remote = None
        try:
            remote = hg.peer(repo, {}, url)
            pullrevs = [remote.lookup(revision or branch)]
            checkoutrevision = hex(pullrevs[0])
            if branch:
                ui.warn('(remote resolved %s to %s; '
                        'result is not deterministic)\n' %
                        (branch, checkoutrevision))

            if checkoutrevision in repo:
                ui.warn('(revision already present locally; not pulling)\n')
            else:
                with timeit('pull'):
                    pullop = exchange.pull(repo, remote, heads=pullrevs)
                    if not pullop.rheads:
                        raise error.Abort('unable to pull requested revision')
        except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' %
                    e.message)
            deletesharedstore()
            return callself()
        finally:
            if remote:
                remote.close()

    # Now we should have the wanted revision in the store. Perform
    # working directory manipulation.

    # Purge if requested. We purge before update because this way we're
    # guaranteed to not have conflicts on `hg update`.
    if purge and not created:
        ui.write('(purging working directory)\n')
        purgeext = extensions.find('purge')

        # Mercurial 4.3 doesn't purge files outside the sparse checkout.
        # See https://bz.mercurial-scm.org/show_bug.cgi?id=5626. Force
        # purging by monkeypatching the sparse matcher.
        try:
            old_sparse_fn = getattr(repo.dirstate, '_sparsematchfn', None)
            if old_sparse_fn is not None:
                assert util.versiontuple(n=2) in ((4, 3), (4, 4), (4, 5))
                repo.dirstate._sparsematchfn = lambda: matchmod.always(
                    repo.root, '')

            with timeit('purge'):
                if purgeext.purge(
                        ui,
                        repo,
                        all=True,
                        abort_on_err=True,
                        # The function expects all arguments to be
                        # defined.
                        **{
                            'print': None,
                            'print0': None,
                            'dirs': None,
                            'files': None
                        }):
                    raise error.Abort('error purging')
        finally:
            if old_sparse_fn is not None:
                repo.dirstate._sparsematchfn = old_sparse_fn

    # Update the working directory.

    if sparse_profile:
        sparsemod = getsparse()

        # By default, Mercurial will ignore unknown sparse profiles. This could
        # lead to a full checkout. Be more strict.
        try:
            repo.filectx(sparse_profile, changeid=checkoutrevision).data()
        except error.ManifestLookupError:
            raise error.Abort('sparse profile %s does not exist at revision '
                              '%s' % (sparse_profile, checkoutrevision))

        old_config = sparsemod.parseconfig(repo.ui, repo.vfs.tryread('sparse'))
        old_includes, old_excludes, old_profiles = old_config

        if old_profiles == {sparse_profile} and not old_includes and not \
                old_excludes:
            ui.write('(sparse profile %s already set; no need to update '
                     'sparse config)\n' % sparse_profile)
        else:
            if old_includes or old_excludes or old_profiles:
                ui.write('(replacing existing sparse config with profile '
                         '%s)\n' % sparse_profile)
            else:
                ui.write('(setting sparse config to profile %s)\n' %
                         sparse_profile)

            # If doing an incremental update, this will perform two updates:
            # one to change the sparse profile and another to update to the new
            # revision. This is not desired. But there's not a good API in
            # Mercurial to do this as one operation.
            with repo.wlock(), timeit('sparse_update_config'):
                fcounts = map(
                    len,
                    sparsemod._updateconfigandrefreshwdir(repo, [], [],
                                                          [sparse_profile],
                                                          force=True))

                repo.ui.status('%d files added, %d files dropped, '
                               '%d files conflicting\n' % tuple(fcounts))

            ui.write('(sparse refresh complete)\n')

    op = 'update_sparse' if sparse_profile else 'update'

    with timeit(op):
        if commands.update(ui, repo, rev=checkoutrevision, clean=True):
            raise error.Abort('error updating')

    ui.write('updated to %s\n' % checkoutrevision)
    return None
示例#37
0
def _docheckout(
    ui,
    url,
    dest,
    upstream,
    revision,
    branch,
    purge,
    sharebase,
    optimes,
    behaviors,
    networkattemptlimit,
    networkattempts=None,
    sparse_profile=None,
    noupdate=False,
):
    if not networkattempts:
        networkattempts = [1]

    def callself():
        return _docheckout(
            ui,
            url,
            dest,
            upstream,
            revision,
            branch,
            purge,
            sharebase,
            optimes,
            behaviors,
            networkattemptlimit,
            networkattempts=networkattempts,
            sparse_profile=sparse_profile,
            noupdate=noupdate,
        )

    @contextlib.contextmanager
    def timeit(op, behavior):
        behaviors.add(behavior)
        errored = False
        try:
            start = time.time()
            yield
        except Exception:
            errored = True
            raise
        finally:
            elapsed = time.time() - start

            if errored:
                op += "_errored"

            optimes.append((op, elapsed))

    ui.write(b"ensuring %s@%s is available at %s\n" %
             (url, revision or branch, dest))

    # We assume that we're the only process on the machine touching the
    # repository paths that we were told to use. This means our recovery
    # scenario when things aren't "right" is to just nuke things and start
    # from scratch. This is easier to implement than verifying the state
    # of the data and attempting recovery. And in some scenarios (such as
    # potential repo corruption), it is probably faster, since verifying
    # repos can take a while.

    destvfs = vfs.vfs(dest, audit=False, realpath=True)

    def deletesharedstore(path=None):
        storepath = path or destvfs.read(b".hg/sharedpath").strip()
        if storepath.endswith(b".hg"):
            storepath = os.path.dirname(storepath)

        storevfs = vfs.vfs(storepath, audit=False)
        storevfs.rmtree(forcibly=True)

    if destvfs.exists() and not destvfs.exists(b".hg"):
        raise error.Abort(b"destination exists but no .hg directory")

    # Refuse to enable sparse checkouts on existing checkouts. The reasoning
    # here is that another consumer of this repo may not be sparse aware. If we
    # enabled sparse, we would lock them out.
    if destvfs.exists(
    ) and sparse_profile and not destvfs.exists(b".hg/sparse"):
        raise error.Abort(
            b"cannot enable sparse profile on existing "
            b"non-sparse checkout",
            hint=b"use a separate working directory to use sparse",
        )

    # And the other direction for symmetry.
    if not sparse_profile and destvfs.exists(b".hg/sparse"):
        raise error.Abort(
            b"cannot use non-sparse checkout on existing sparse "
            b"checkout",
            hint=b"use a separate working directory to use sparse",
        )

    # Require checkouts to be tied to shared storage because efficiency.
    if destvfs.exists(b".hg") and not destvfs.exists(b".hg/sharedpath"):
        ui.warn(b"(destination is not shared; deleting)\n")
        with timeit("remove_unshared_dest", "remove-wdir"):
            destvfs.rmtree(forcibly=True)

    # Verify the shared path exists and is using modern pooled storage.
    if destvfs.exists(b".hg/sharedpath"):
        storepath = destvfs.read(b".hg/sharedpath").strip()

        ui.write(b"(existing repository shared store: %s)\n" % storepath)

        if not os.path.exists(storepath):
            ui.warn(b"(shared store does not exist; deleting destination)\n")
            with timeit("removed_missing_shared_store", "remove-wdir"):
                destvfs.rmtree(forcibly=True)
        elif not re.search(b"[a-f0-9]{40}/\.hg$", storepath.replace(
                b"\\", b"/")):
            ui.warn(b"(shared store does not belong to pooled storage; "
                    b"deleting destination to improve efficiency)\n")
            with timeit("remove_unpooled_store", "remove-wdir"):
                destvfs.rmtree(forcibly=True)

    if destvfs.isfileorlink(b".hg/wlock"):
        ui.warn(b"(dest has an active working directory lock; assuming it is "
                b"left over from a previous process and that the destination "
                b"is corrupt; deleting it just to be sure)\n")
        with timeit("remove_locked_wdir", "remove-wdir"):
            destvfs.rmtree(forcibly=True)

    def handlerepoerror(e):
        if pycompat.bytestr(e) == _(b"abandoned transaction found"):
            ui.warn(b"(abandoned transaction found; trying to recover)\n")
            repo = hg.repository(ui, dest)
            if not repo.recover():
                ui.warn(b"(could not recover repo state; "
                        b"deleting shared store)\n")
                with timeit("remove_unrecovered_shared_store", "remove-store"):
                    deletesharedstore()

            ui.warn(b"(attempting checkout from beginning)\n")
            return callself()

        raise

    # At this point we either have an existing working directory using
    # shared, pooled storage or we have nothing.

    def handlenetworkfailure():
        if networkattempts[0] >= networkattemptlimit:
            raise error.Abort(b"reached maximum number of network attempts; "
                              b"giving up\n")

        ui.warn(b"(retrying after network failure on attempt %d of %d)\n" %
                (networkattempts[0], networkattemptlimit))

        # Do a backoff on retries to mitigate the thundering herd
        # problem. This is an exponential backoff with a multipler
        # plus random jitter thrown in for good measure.
        # With the default settings, backoffs will be:
        # 1) 2.5 - 6.5
        # 2) 5.5 - 9.5
        # 3) 11.5 - 15.5
        backoff = (2**networkattempts[0] - 1) * 1.5
        jittermin = ui.configint(b"robustcheckout", b"retryjittermin", 1000)
        jittermax = ui.configint(b"robustcheckout", b"retryjittermax", 5000)
        backoff += float(random.randint(jittermin, jittermax)) / 1000.0
        ui.warn(b"(waiting %.2fs before retry)\n" % backoff)
        time.sleep(backoff)

        networkattempts[0] += 1

    def handlepullerror(e):
        """Handle an exception raised during a pull.

        Returns True if caller should call ``callself()`` to retry.
        """
        if isinstance(e, error.Abort):
            if e.args[0] == _(b"repository is unrelated"):
                ui.warn(b"(repository is unrelated; deleting)\n")
                destvfs.rmtree(forcibly=True)
                return True
            elif e.args[0].startswith(_(b"stream ended unexpectedly")):
                ui.warn(b"%s\n" % e.args[0])
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        # TODO test this branch
        elif isinstance(e, error.ResponseError):
            if e.args[0].startswith(
                    _(b"unexpected response from remote server:")):
                ui.warn(
                    b"(unexpected response from remote server; retrying)\n")
                destvfs.rmtree(forcibly=True)
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        elif isinstance(e, ssl.SSLError):
            # Assume all SSL errors are due to the network, as Mercurial
            # should convert non-transport errors like cert validation failures
            # to error.Abort.
            ui.warn(b"ssl error: %s\n" % pycompat.bytestr(str(e)))
            handlenetworkfailure()
            return True
        elif isinstance(e, urllibcompat.urlerr.urlerror):
            if isinstance(e.reason, socket.error):
                ui.warn(b"socket error: %s\n" %
                        pycompat.bytestr(str(e.reason)))
                handlenetworkfailure()
                return True
            else:
                ui.warn(b"unhandled URLError; reason type: %s; value: %s\n" % (
                    pycompat.bytestr(e.reason.__class__.__name__),
                    pycompat.bytestr(str(e.reason)),
                ))
        else:
            ui.warn(b"unhandled exception during network operation; type: %s; "
                    b"value: %s\n" % (pycompat.bytestr(
                        e.__class__.__name__), pycompat.bytestr(str(e))))

        return False

    # Perform sanity checking of store. We may or may not know the path to the
    # local store. It depends if we have an existing destvfs pointing to a
    # share. To ensure we always find a local store, perform the same logic
    # that Mercurial's pooled storage does to resolve the local store path.
    cloneurl = upstream or url

    try:
        clonepeer = hg.peer(ui, {}, cloneurl)
        rootnode = peerlookup(clonepeer, b"0")
    except error.RepoLookupError:
        raise error.Abort(b"unable to resolve root revision from clone "
                          b"source")
    except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
        if handlepullerror(e):
            return callself()
        raise

    if rootnode == nullid:
        raise error.Abort(b"source repo appears to be empty")

    storepath = os.path.join(sharebase, hex(rootnode))
    storevfs = vfs.vfs(storepath, audit=False)

    if storevfs.isfileorlink(b".hg/store/lock"):
        ui.warn(b"(shared store has an active lock; assuming it is left "
                b"over from a previous process and that the store is "
                b"corrupt; deleting store and destination just to be "
                b"sure)\n")
        if destvfs.exists():
            with timeit("remove_dest_active_lock", "remove-wdir"):
                destvfs.rmtree(forcibly=True)

        with timeit("remove_shared_store_active_lock", "remove-store"):
            storevfs.rmtree(forcibly=True)

    if storevfs.exists() and not storevfs.exists(b".hg/requires"):
        ui.warn(b"(shared store missing requires file; this is a really "
                b"odd failure; deleting store and destination)\n")
        if destvfs.exists():
            with timeit("remove_dest_no_requires", "remove-wdir"):
                destvfs.rmtree(forcibly=True)

        with timeit("remove_shared_store_no_requires", "remove-store"):
            storevfs.rmtree(forcibly=True)

    if storevfs.exists(b".hg/requires"):
        requires = set(storevfs.read(b".hg/requires").splitlines())
        # FUTURE when we require generaldelta, this is where we can check
        # for that.
        required = {b"dotencode", b"fncache"}

        missing = required - requires
        if missing:
            ui.warn(b"(shared store missing requirements: %s; deleting "
                    b"store and destination to ensure optimal behavior)\n" %
                    b", ".join(sorted(missing)))
            if destvfs.exists():
                with timeit("remove_dest_missing_requires", "remove-wdir"):
                    destvfs.rmtree(forcibly=True)

            with timeit("remove_shared_store_missing_requires",
                        "remove-store"):
                storevfs.rmtree(forcibly=True)

    created = False

    if not destvfs.exists():
        # Ensure parent directories of destination exist.
        # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
        if util.safehasattr(util, "ensuredirs"):
            makedirs = util.ensuredirs
        else:
            makedirs = util.makedirs

        makedirs(os.path.dirname(destvfs.base), notindexed=True)
        makedirs(sharebase, notindexed=True)

        if upstream:
            ui.write(b"(cloning from upstream repo %s)\n" % upstream)

        if not storevfs.exists():
            behaviors.add(b"create-store")

        try:
            with timeit("clone", "clone"):
                shareopts = {b"pool": sharebase, b"mode": b"identity"}
                res = hg.clone(
                    ui,
                    {},
                    clonepeer,
                    dest=dest,
                    update=False,
                    shareopts=shareopts,
                    stream=True,
                )
        except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn(b"(repo corruption: %s; deleting shared store)\n" % e)
            with timeit("remove_shared_store_revlogerror", "remote-store"):
                deletesharedstore()
            return callself()

        # TODO retry here.
        if res is None:
            raise error.Abort(b"clone failed")

        # Verify it is using shared pool storage.
        if not destvfs.exists(b".hg/sharedpath"):
            raise error.Abort(b"clone did not create a shared repo")

        created = True

    # The destination .hg directory should exist. Now make sure we have the
    # wanted revision.

    repo = hg.repository(ui, dest)

    # We only pull if we are using symbolic names or the requested revision
    # doesn't exist.
    havewantedrev = False

    if revision:
        try:
            ctx = scmutil.revsingle(repo, revision)
        except error.RepoLookupError:
            ctx = None

        if ctx:
            if not ctx.hex().startswith(revision):
                raise error.Abort(
                    b"--revision argument is ambiguous",
                    hint=b"must be the first 12+ characters of a "
                    b"SHA-1 fragment",
                )

            checkoutrevision = ctx.hex()
            havewantedrev = True

    if not havewantedrev:
        ui.write(b"(pulling to obtain %s)\n" % (revision or branch, ))

        remote = None
        try:
            remote = hg.peer(repo, {}, url)
            pullrevs = [peerlookup(remote, revision or branch)]
            checkoutrevision = hex(pullrevs[0])
            if branch:
                ui.warn(b"(remote resolved %s to %s; "
                        b"result is not deterministic)\n" %
                        (branch, checkoutrevision))

            if checkoutrevision in repo:
                ui.warn(b"(revision already present locally; not pulling)\n")
            else:
                with timeit("pull", "pull"):
                    pullop = exchange.pull(repo, remote, heads=pullrevs)
                    if not pullop.rheads:
                        raise error.Abort(b"unable to pull requested revision")
        except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn(b"(repo corruption: %s; deleting shared store)\n" % e)
            deletesharedstore()
            return callself()
        finally:
            if remote:
                remote.close()

    # Now we should have the wanted revision in the store. Perform
    # working directory manipulation.

    # Avoid any working directory manipulations if `-U`/`--noupdate` was passed
    if noupdate:
        ui.write(b"(skipping update since `-U` was passed)\n")
        return None

    # Purge if requested. We purge before update because this way we're
    # guaranteed to not have conflicts on `hg update`.
    if purge and not created:
        ui.write(b"(purging working directory)\n")
        purge = getattr(commands, "purge", None)
        if not purge:
            purge = extensions.find(b"purge").purge

        # Mercurial 4.3 doesn't purge files outside the sparse checkout.
        # See https://bz.mercurial-scm.org/show_bug.cgi?id=5626. Force
        # purging by monkeypatching the sparse matcher.
        try:
            old_sparse_fn = getattr(repo.dirstate, "_sparsematchfn", None)
            if old_sparse_fn is not None:
                # TRACKING hg50
                # Arguments passed to `matchmod.always` were unused and have been removed
                if util.versiontuple(n=2) >= (5, 0):
                    repo.dirstate._sparsematchfn = lambda: matchmod.always()
                else:
                    repo.dirstate._sparsematchfn = lambda: matchmod.always(
                        repo.root, "")

            with timeit("purge", "purge"):
                if purge(
                        ui,
                        repo,
                        all=True,
                        abort_on_err=True,
                        # The function expects all arguments to be
                        # defined.
                        **{
                            "print": None,
                            "print0": None,
                            "dirs": None,
                            "files": None
                        }):
                    raise error.Abort(b"error purging")
        finally:
            if old_sparse_fn is not None:
                repo.dirstate._sparsematchfn = old_sparse_fn

    # Update the working directory.

    if repo[b"."].node() == nullid:
        behaviors.add("empty-wdir")
    else:
        behaviors.add("populated-wdir")

    if sparse_profile:
        sparsemod = getsparse()

        # By default, Mercurial will ignore unknown sparse profiles. This could
        # lead to a full checkout. Be more strict.
        try:
            repo.filectx(sparse_profile, changeid=checkoutrevision).data()
        except error.ManifestLookupError:
            raise error.Abort(b"sparse profile %s does not exist at revision "
                              b"%s" % (sparse_profile, checkoutrevision))

        # TRACKING hg48 - parseconfig takes `action` param
        if util.versiontuple(n=2) >= (4, 8):
            old_config = sparsemod.parseconfig(repo.ui,
                                               repo.vfs.tryread(b"sparse"),
                                               b"sparse")
        else:
            old_config = sparsemod.parseconfig(repo.ui,
                                               repo.vfs.tryread(b"sparse"))

        old_includes, old_excludes, old_profiles = old_config

        if old_profiles == {sparse_profile
                            } and not old_includes and not old_excludes:
            ui.write(b"(sparse profile %s already set; no need to update "
                     b"sparse config)\n" % sparse_profile)
        else:
            if old_includes or old_excludes or old_profiles:
                ui.write(b"(replacing existing sparse config with profile "
                         b"%s)\n" % sparse_profile)
            else:
                ui.write(b"(setting sparse config to profile %s)\n" %
                         sparse_profile)

            # If doing an incremental update, this will perform two updates:
            # one to change the sparse profile and another to update to the new
            # revision. This is not desired. But there's not a good API in
            # Mercurial to do this as one operation.
            with repo.wlock(), timeit("sparse_update_config",
                                      "sparse-update-config"):
                # pylint --py3k: W1636
                fcounts = list(
                    map(
                        len,
                        sparsemod._updateconfigandrefreshwdir(repo, [], [],
                                                              [sparse_profile],
                                                              force=True),
                    ))

                repo.ui.status(b"%d files added, %d files dropped, "
                               b"%d files conflicting\n" % tuple(fcounts))

            ui.write(b"(sparse refresh complete)\n")

    op = "update_sparse" if sparse_profile else "update"
    behavior = "update-sparse" if sparse_profile else "update"

    with timeit(op, behavior):
        if commands.update(ui, repo, rev=checkoutrevision, clean=True):
            raise error.Abort(b"error updating")

    ui.write(b"updated to %s\n" % checkoutrevision)

    return None
示例#38
0
def trackedcmd(ui, repo, remotepath=None, *pats, **opts):
    """show or change the current narrowspec

    With no argument, shows the current narrowspec entries, one per line. Each
    line will be prefixed with 'I' or 'X' for included or excluded patterns,
    respectively.

    The narrowspec is comprised of expressions to match remote files and/or
    directories that should be pulled into your client.
    The narrowspec has *include* and *exclude* expressions, with excludes always
    trumping includes: that is, if a file matches an exclude expression, it will
    be excluded even if it also matches an include expression.
    Excluding files that were never included has no effect.

    Each included or excluded entry is in the format described by
    'hg help patterns'.

    The options allow you to add or remove included and excluded expressions.

    If --clear is specified, then all previous includes and excludes are DROPPED
    and replaced by the new ones specified to --addinclude and --addexclude.
    If --clear is specified without any further options, the narrowspec will be
    empty and will not match any files.
    """
    opts = pycompat.byteskwargs(opts)
    if changegroup.NARROW_REQUIREMENT not in repo.requirements:
        ui.warn(
            _('The narrow command is only supported on respositories cloned'
              ' with --narrow.\n'))
        return 1

    # Before supporting, decide whether it "hg tracked --clear" should mean
    # tracking no paths or all paths.
    if opts['clear']:
        ui.warn(_('The --clear option is not yet supported.\n'))
        return 1

    if narrowspec.needsexpansion(opts['addinclude'] + opts['addexclude']):
        raise error.Abort('Expansion not yet supported on widen/narrow')

    addedincludes = narrowspec.parsepatterns(opts['addinclude'])
    removedincludes = narrowspec.parsepatterns(opts['removeinclude'])
    addedexcludes = narrowspec.parsepatterns(opts['addexclude'])
    removedexcludes = narrowspec.parsepatterns(opts['removeexclude'])
    widening = addedincludes or removedexcludes
    narrowing = removedincludes or addedexcludes
    only_show = not widening and not narrowing

    # Only print the current narrowspec.
    if only_show:
        include, exclude = repo.narrowpats

        ui.pager('tracked')
        fm = ui.formatter('narrow', opts)
        for i in sorted(include):
            fm.startitem()
            fm.write('status', '%s ', 'I', label='narrow.included')
            fm.write('pat', '%s\n', i, label='narrow.included')
        for i in sorted(exclude):
            fm.startitem()
            fm.write('status', '%s ', 'X', label='narrow.excluded')
            fm.write('pat', '%s\n', i, label='narrow.excluded')
        fm.end()
        return 0

    with repo.wlock(), repo.lock():
        cmdutil.bailifchanged(repo)

        # Find the revisions we have in common with the remote. These will
        # be used for finding local-only changes for narrowing. They will
        # also define the set of revisions to update for widening.
        remotepath = ui.expandpath(remotepath or 'default')
        url, branches = hg.parseurl(remotepath)
        ui.status(_('comparing with %s\n') % util.hidepassword(url))
        remote = hg.peer(repo, opts, url)
        commoninc = discovery.findcommonincoming(repo, remote)

        oldincludes, oldexcludes = repo.narrowpats
        if narrowing:
            newincludes = oldincludes - removedincludes
            newexcludes = oldexcludes | addedexcludes
            _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes,
                    newincludes, newexcludes,
                    opts['force_delete_local_changes'])
            # _narrow() updated the narrowspec and _widen() below needs to
            # use the updated values as its base (otherwise removed includes
            # and addedexcludes will be lost in the resulting narrowspec)
            oldincludes = newincludes
            oldexcludes = newexcludes

        if widening:
            newincludes = oldincludes | addedincludes
            newexcludes = oldexcludes - removedexcludes
            _widen(ui, repo, remote, commoninc, newincludes, newexcludes)

    return 0
示例#39
0
def hg_repo(ui, url, opts):
    parts = url.split(':', 2)
    if len(parts) == 1 or parts[0] == 'file':
        return hg.repository(ui, url)
    return hg.peer(ui, opts, url)
示例#40
0
def email(ui, repo, *revs, **opts):
    '''send changesets by email

    By default, diffs are sent in the format generated by
    :hg:`export`, one per message. The series starts with a "[PATCH 0
    of N]" introduction, which describes the series as a whole.

    Each patch email has a Subject line of "[PATCH M of N] ...", using
    the first line of the changeset description as the subject text.
    The message contains two or three parts. First, the changeset
    description.

    With the -d/--diffstat option, if the diffstat program is
    installed, the result of running diffstat on the patch is inserted.

    Finally, the patch itself, as generated by :hg:`export`.

    With the -d/--diffstat or --confirm options, you will be presented
    with a final summary of all messages and asked for confirmation before
    the messages are sent.

    By default the patch is included as text in the email body for
    easy reviewing. Using the -a/--attach option will instead create
    an attachment for the patch. With -i/--inline an inline attachment
    will be created. You can include a patch both as text in the email
    body and as a regular or an inline attachment by combining the
    -a/--attach or -i/--inline with the --body option.

    With -B/--bookmark changesets reachable by the given bookmark are
    selected.

    With -o/--outgoing, emails will be generated for patches not found
    in the destination repository (or only those which are ancestors
    of the specified revisions if any are provided)

    With -b/--bundle, changesets are selected as for --outgoing, but a
    single email containing a binary Mercurial bundle as an attachment
    will be sent. Use the ``patchbomb.bundletype`` config option to
    control the bundle type as with :hg:`bundle --type`.

    With -m/--mbox, instead of previewing each patchbomb message in a
    pager or sending the messages directly, it will create a UNIX
    mailbox file with the patch emails. This mailbox file can be
    previewed with any mail user agent which supports UNIX mbox
    files.

    With -n/--test, all steps will run, but mail will not be sent.
    You will be prompted for an email recipient address, a subject and
    an introductory message describing the patches of your patchbomb.
    Then when all is done, patchbomb messages are displayed.

    In case email sending fails, you will find a backup of your series
    introductory message in ``.hg/last-email.txt``.

    The default behavior of this command can be customized through
    configuration. (See :hg:`help patchbomb` for details)

    Examples::

      hg email -r 3000          # send patch 3000 only
      hg email -r 3000 -r 3001  # send patches 3000 and 3001
      hg email -r 3000:3005     # send patches 3000 through 3005
      hg email 3000             # send patch 3000 (deprecated)

      hg email -o               # send all patches not in default
      hg email -o DEST          # send all patches not in DEST
      hg email -o -r 3000       # send all ancestors of 3000 not in default
      hg email -o -r 3000 DEST  # send all ancestors of 3000 not in DEST

      hg email -B feature       # send all ancestors of feature bookmark

      hg email -b               # send bundle of all patches not in default
      hg email -b DEST          # send bundle of all patches not in DEST
      hg email -b -r 3000       # bundle of all ancestors of 3000 not in default
      hg email -b -r 3000 DEST  # bundle of all ancestors of 3000 not in DEST

      hg email -o -m mbox &&    # generate an mbox file...
        mutt -R -f mbox         # ... and view it with mutt
      hg email -o -m mbox &&    # generate an mbox file ...
        formail -s sendmail \\   # ... and use formail to send from the mbox
          -bm -t < mbox         # ... using sendmail

    Before using this command, you will need to enable email in your
    hgrc. See the [email] section in hgrc(5) for details.
    '''
    opts = pycompat.byteskwargs(opts)

    _charsets = mail._charsets(ui)

    bundle = opts.get('bundle')
    date = opts.get('date')
    mbox = opts.get('mbox')
    outgoing = opts.get('outgoing')
    rev = opts.get('rev')
    bookmark = opts.get('bookmark')

    if not (opts.get('test') or mbox):
        # really sending
        mail.validateconfig(ui)

    if not (revs or rev or outgoing or bundle or bookmark):
        raise error.Abort(
            _('specify at least one changeset with -B, -r or -o'))

    if outgoing and bundle:
        raise error.Abort(
            _("--outgoing mode always on with --bundle;"
              " do not re-specify --outgoing"))
    if rev and bookmark:
        raise error.Abort(_("-r and -B are mutually exclusive"))

    if outgoing or bundle:
        if len(revs) > 1:
            raise error.Abort(_("too many destinations"))
        if revs:
            dest = revs[0]
        else:
            dest = None
        revs = []

    if rev:
        if revs:
            raise error.Abort(_('use only one form to specify the revision'))
        revs = rev
    elif bookmark:
        if bookmark not in repo._bookmarks:
            raise error.Abort(_("bookmark '%s' not found") % bookmark)
        revs = scmutil.bookmarkrevs(repo, bookmark)

    revs = scmutil.revrange(repo, revs)
    if outgoing:
        revs = _getoutgoing(repo, dest, revs)
    if bundle:
        opts['revs'] = ["%d" % r for r in revs]

    # check if revision exist on the public destination
    publicurl = repo.ui.config('patchbomb', 'publicurl')
    if publicurl:
        repo.ui.debug('checking that revision exist in the public repo\n')
        try:
            publicpeer = hg.peer(repo, {}, publicurl)
        except error.RepoError:
            repo.ui.write_err(
                _('unable to access public repo: %s\n') % publicurl)
            raise
        if not publicpeer.capable('known'):
            repo.ui.debug('skipping existence checks: public repo too old\n')
        else:
            out = [repo[r] for r in revs]
            known = publicpeer.known(h.node() for h in out)
            missing = []
            for idx, h in enumerate(out):
                if not known[idx]:
                    missing.append(h)
            if missing:
                if 1 < len(missing):
                    msg = _('public "%s" is missing %s and %i others')
                    msg %= (publicurl, missing[0], len(missing) - 1)
                else:
                    msg = _('public url %s is missing %s')
                    msg %= (publicurl, missing[0])
                missingrevs = [ctx.rev() for ctx in missing]
                revhint = ' '.join(
                    '-r %s' % h for h in repo.set('heads(%ld)', missingrevs))
                hint = _("use 'hg push %s %s'") % (publicurl, revhint)
                raise error.Abort(msg, hint=hint)

    # start
    if date:
        start_time = dateutil.parsedate(date)
    else:
        start_time = dateutil.makedate()

    def genmsgid(id):
        return '<%s.%d@%s>' % (id[:20], int(
            start_time[0]), encoding.strtolocal(socket.getfqdn()))

    # deprecated config: patchbomb.from
    sender = (opts.get('from') or ui.config('email', 'from')
              or ui.config('patchbomb', 'from')
              or prompt(ui, 'From', ui.username()))

    if bundle:
        stropts = pycompat.strkwargs(opts)
        bundledata = _getbundle(repo, dest, **stropts)
        bundleopts = stropts.copy()
        bundleopts.pop(r'bundle', None)  # already processed
        msgs = _getbundlemsgs(repo, sender, bundledata, **bundleopts)
    else:
        msgs = _getpatchmsgs(repo, sender, revs, **pycompat.strkwargs(opts))

    showaddrs = []

    def getaddrs(header, ask=False, default=None):
        configkey = header.lower()
        opt = header.replace('-', '_').lower()
        addrs = opts.get(opt)
        if addrs:
            showaddrs.append('%s: %s' % (header, ', '.join(addrs)))
            return mail.addrlistencode(ui, addrs, _charsets, opts.get('test'))

        # not on the command line: fallback to config and then maybe ask
        addr = (ui.config('email', configkey)
                or ui.config('patchbomb', configkey))
        if not addr:
            specified = (ui.hasconfig('email', configkey)
                         or ui.hasconfig('patchbomb', configkey))
            if not specified and ask:
                addr = prompt(ui, header, default=default)
        if addr:
            showaddrs.append('%s: %s' % (header, addr))
            return mail.addrlistencode(ui, [addr], _charsets, opts.get('test'))
        elif default:
            return mail.addrlistencode(ui, [default], _charsets,
                                       opts.get('test'))
        return []

    to = getaddrs('To', ask=True)
    if not to:
        # we can get here in non-interactive mode
        raise error.Abort(_('no recipient addresses provided'))
    cc = getaddrs('Cc', ask=True, default='')
    bcc = getaddrs('Bcc')
    replyto = getaddrs('Reply-To')

    confirm = ui.configbool('patchbomb', 'confirm')
    confirm |= bool(opts.get('diffstat') or opts.get('confirm'))

    if confirm:
        ui.write(_('\nFinal summary:\n\n'), label='patchbomb.finalsummary')
        ui.write(('From: %s\n' % sender), label='patchbomb.from')
        for addr in showaddrs:
            ui.write('%s\n' % addr, label='patchbomb.to')
        for m, subj, ds in msgs:
            ui.write(('Subject: %s\n' % subj), label='patchbomb.subject')
            if ds:
                ui.write(ds, label='patchbomb.diffstats')
        ui.write('\n')
        if ui.promptchoice(
                _('are you sure you want to send (yn)?'
                  '$$ &Yes $$ &No')):
            raise error.Abort(_('patchbomb canceled'))

    ui.write('\n')

    parent = opts.get('in_reply_to') or None
    # angle brackets may be omitted, they're not semantically part of the msg-id
    if parent is not None:
        if not parent.startswith('<'):
            parent = '<' + parent
        if not parent.endswith('>'):
            parent += '>'

    sender_addr = eutil.parseaddr(encoding.strfromlocal(sender))[1]
    sender = mail.addressencode(ui, sender, _charsets, opts.get('test'))
    sendmail = None
    firstpatch = None
    for i, (m, subj, ds) in enumerate(msgs):
        try:
            m['Message-Id'] = genmsgid(m['X-Mercurial-Node'])
            if not firstpatch:
                firstpatch = m['Message-Id']
            m['X-Mercurial-Series-Id'] = firstpatch
        except TypeError:
            m['Message-Id'] = genmsgid('patchbomb')
        if parent:
            m['In-Reply-To'] = parent
            m['References'] = parent
        if not parent or 'X-Mercurial-Node' not in m:
            parent = m['Message-Id']

        m['User-Agent'] = 'Mercurial-patchbomb/%s' % util.version()
        m['Date'] = eutil.formatdate(start_time[0], localtime=True)

        start_time = (start_time[0] + 1, start_time[1])
        m['From'] = sender
        m['To'] = ', '.join(to)
        if cc:
            m['Cc'] = ', '.join(cc)
        if bcc:
            m['Bcc'] = ', '.join(bcc)
        if replyto:
            m['Reply-To'] = ', '.join(replyto)
        if opts.get('test'):
            ui.status(_('displaying '), subj, ' ...\n')
            ui.pager('email')
            generator = emailgen.Generator(ui, mangle_from_=False)
            try:
                generator.flatten(m, 0)
                ui.write('\n')
            except IOError as inst:
                if inst.errno != errno.EPIPE:
                    raise
        else:
            if not sendmail:
                sendmail = mail.connect(ui, mbox=mbox)
            ui.status(_('sending '), subj, ' ...\n')
            ui.progress(_('sending'),
                        i,
                        item=subj,
                        total=len(msgs),
                        unit=_('emails'))
            if not mbox:
                # Exim does not remove the Bcc field
                del m['Bcc']
            fp = stringio()
            generator = emailgen.Generator(fp, mangle_from_=False)
            generator.flatten(m, 0)
            sendmail(sender_addr, to + bcc + cc, fp.getvalue())

    ui.progress(_('writing'), None)
    ui.progress(_('sending'), None)
示例#41
0
def clone_cache_cmd(ui, source, dest=None, **opts):
    source_url = url(source)
    if source_url.fragment is not None:
        raise ValueError('Someone is being clever! We are not clever. Bail.')

    orig_source = source
    cache_source = os.path.join(CACHE, url_to_filename(source))
    was_cached = False
    clone_source = source
    if not opts.get('nocache'):
        was_cached = os.path.exists(cache_source)
        if was_cached:
            ui.status('cloning from cache {}\n'.format(cache_source))
            clone_source = cache_source
            if dest is None:
                dest = hg.defaultdest(source)
            if opts.get('rev'):
                ui.status('updating cache {} to rev {}\n'.format(cache_source, opts.get('rev')))
                cache_peer = hg.peer(ui, {}, cache_source)
                commands.pull(cache_peer.ui, cache_peer.local(), noupdate=True, rev=opts.get('rev'))
        else:
            ui.status('no cache found at {}, cloning from source {}\n'.format(
                cache_source, source))
    
    if opts.get('noupdate') and opts.get('updaterev'):
        raise util.Abort(_("cannot specify both --noupdate and --updaterev"))

    r = hg.clone(ui, opts, clone_source, dest,
                 pull=opts.get('pull'),
                 stream=opts.get('uncompressed'),
                 rev=opts.get('rev'),
                 update=opts.get('updaterev') or not opts.get('noupdate'),
                 branch=opts.get('branch'))

    if r is None:
        return True

    source_peer, dest_peer = r

    if was_cached:
        dest_repo = dest_peer.local()
        if dest_repo:
            orig_source = dest_repo.ui.expandpath(orig_source)
            abspath = orig_source
            if hg.islocal(orig_source):
                abspath = os.path.abspath(hg.util.urllocalpath(orig_source))

            u = url(abspath)
            u.passwd = None
            defaulturl = str(u)
            fp = dest_repo.opener("hgrc", "w", text=True)
            fp.write("[paths]\n")
            fp.write("default = %s\n" % defaulturl)
            fp.write('\n')
            fp.write('[clonecache]\n')
            fp.write('cache = %s\n' % cache_source)
            fp.close()

            dest_repo.ui.setconfig('paths', 'default', defaulturl, 'clone')

            commands.pull(dest_repo.ui, dest_repo)

            commands.update(ui, dest_repo)

    return False
示例#42
0
def get_repo(url, alias):
    global peer

    myui = ui.ui()
    myui.setconfig('ui', 'interactive', 'off')
    myui.fout = sys.stderr

    if get_config_bool('remote-hg.insecure'):
        myui.setconfig('web', 'cacerts', '')

    extensions.loadall(myui)

    if hg.islocal(url) and not os.environ.get('GIT_REMOTE_HG_TEST_REMOTE'):
        repo = hg.repository(myui, url)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
    else:
        shared_path = os.path.join(gitdir, 'hg')

        # check and upgrade old organization
        hg_path = os.path.join(shared_path, '.hg')
        if os.path.exists(shared_path) and not os.path.exists(hg_path):
            repos = os.listdir(shared_path)
            for x in repos:
                local_hg = os.path.join(shared_path, x, 'clone', '.hg')
                if not os.path.exists(local_hg):
                    continue
                if not os.path.exists(hg_path):
                    shutil.move(local_hg, hg_path)
                shutil.rmtree(os.path.join(shared_path, x, 'clone'))

        # setup shared repo (if not there)
        try:
            hg.peer(myui, {}, shared_path, create=True)
        except error.RepoError:
            pass

        if not os.path.exists(dirname):
            os.makedirs(dirname)

        local_path = os.path.join(dirname, 'clone')
        if not os.path.exists(local_path):
            hg.share(myui, shared_path, local_path, update=False)
        else:
            # make sure the shared path is always up-to-date
            util.writefile(os.path.join(local_path, '.hg', 'sharedpath'), hg_path)

        repo = hg.repository(myui, local_path)
        try:
            peer = hg.peer(repo.ui, {}, url)
        except:
            die('Repository error')

        if check_version(3, 0):
            from mercurial import exchange
            exchange.pull(repo, peer, heads=None, force=True)
        else:
            repo.pull(peer, heads=None, force=True)

        updatebookmarks(repo, peer)

    return repo
示例#43
0
def fetch(ui, repo, source='default', **opts):
    '''pull changes from a remote repository, merge new changes if needed.

    This finds all changes from the repository at the specified path
    or URL and adds them to the local repository.

    If the pulled changes add a new branch head, the head is
    automatically merged, and the result of the merge is committed.
    Otherwise, the working directory is updated to include the new
    changes.

    When a merge is needed, the working directory is first updated to
    the newly pulled changes. Local changes are then merged into the
    pulled changes. To switch the merge order, use --switch-parent.

    See :hg:`help dates` for a list of formats valid for -d/--date.

    Returns 0 on success.
    '''

    date = opts.get('date')
    if date:
        opts['date'] = util.parsedate(date)

    parent, _p2 = repo.dirstate.parents()
    branch = repo.dirstate.branch()
    try:
        branchnode = repo.branchtip(branch)
    except error.RepoLookupError:
        branchnode = None
    if parent != branchnode:
        raise error.Abort(_('working directory not at branch tip'),
                          hint=_("use 'hg update' to check out branch tip"))

    wlock = lock = None
    try:
        wlock = repo.wlock()
        lock = repo.lock()

        cmdutil.bailifchanged(repo)

        bheads = repo.branchheads(branch)
        bheads = [head for head in bheads if len(repo[head].children()) == 0]
        if len(bheads) > 1:
            raise error.Abort(
                _('multiple heads in this branch '
                  '(use "hg heads ." and "hg merge" to merge)'))

        other = hg.peer(repo, opts, ui.expandpath(source))
        ui.status(
            _('pulling from %s\n') % util.hidepassword(ui.expandpath(source)))
        revs = None
        if opts['rev']:
            try:
                revs = [other.lookup(rev) for rev in opts['rev']]
            except error.CapabilityError:
                err = _("other repository doesn't support revision lookup, "
                        "so a rev cannot be specified.")
                raise error.Abort(err)

        # Are there any changes at all?
        modheads = exchange.pull(repo, other, heads=revs).cgresult
        if modheads == 0:
            return 0

        # Is this a simple fast-forward along the current branch?
        newheads = repo.branchheads(branch)
        newchildren = repo.changelog.nodesbetween([parent], newheads)[2]
        if len(newheads) == 1 and len(newchildren):
            if newchildren[0] != parent:
                return hg.update(repo, newchildren[0])
            else:
                return 0

        # Are there more than one additional branch heads?
        newchildren = [n for n in newchildren if n != parent]
        newparent = parent
        if newchildren:
            newparent = newchildren[0]
            hg.clean(repo, newparent)
        newheads = [n for n in newheads if n != newparent]
        if len(newheads) > 1:
            ui.status(
                _('not merging with %d other new branch heads '
                  '(use "hg heads ." and "hg merge" to merge them)\n') %
                (len(newheads) - 1))
            return 1

        if not newheads:
            return 0

        # Otherwise, let's merge.
        err = False
        if newheads:
            # By default, we consider the repository we're pulling
            # *from* as authoritative, so we merge our changes into
            # theirs.
            if opts['switch_parent']:
                firstparent, secondparent = newparent, newheads[0]
            else:
                firstparent, secondparent = newheads[0], newparent
                ui.status(
                    _('updating to %d:%s\n') %
                    (repo.changelog.rev(firstparent), short(firstparent)))
            hg.clean(repo, firstparent)
            ui.status(
                _('merging with %d:%s\n') %
                (repo.changelog.rev(secondparent), short(secondparent)))
            err = hg.merge(repo, secondparent, remind=False)

        if not err:
            # we don't translate commit messages
            message = (cmdutil.logmessage(ui, opts)
                       or ('Automated merge with %s' %
                           util.removeauth(other.url())))
            editopt = opts.get('edit') or opts.get('force_editor')
            editor = cmdutil.getcommiteditor(edit=editopt, editform='fetch')
            n = repo.commit(message, opts['user'], opts['date'], editor=editor)
            ui.status(
                _('new changeset %d:%s merges remote changes '
                  'with local\n') % (repo.changelog.rev(n), short(n)))

        return err

    finally:
        release(lock, wlock)
示例#44
0
def _docheckout(ui,
                url,
                dest,
                upstream,
                revision,
                branch,
                purge,
                sharebase,
                networkattemptlimit,
                networkattempts=None):
    if not networkattempts:
        networkattempts = [1]

    def callself():
        return _docheckout(ui, url, dest, upstream, revision, branch, purge,
                           sharebase, networkattemptlimit, networkattempts)

    ui.write('ensuring %s@%s is available at %s\n' %
             (url, revision or branch, dest))

    destvfs = scmutil.vfs(dest, audit=False, realpath=True)

    if destvfs.exists() and not destvfs.exists('.hg'):
        raise error.Abort('destination exists but no .hg directory')

    # Require checkouts to be tied to shared storage because efficiency.
    if destvfs.exists('.hg') and not destvfs.exists('.hg/sharedpath'):
        ui.warn('(destination is not shared; deleting)\n')
        destvfs.rmtree(forcibly=True)

    # Verify the shared path exists and is using modern pooled storage.
    if destvfs.exists('.hg/sharedpath'):
        storepath = destvfs.read('.hg/sharedpath').strip()

        ui.write('(existing repository shared store: %s)\n' % storepath)

        if not os.path.exists(storepath):
            ui.warn('(shared store does not exist; deleting)\n')
            destvfs.rmtree(forcibly=True)
        elif not re.search('[a-f0-9]{40}/\.hg$', storepath.replace('\\', '/')):
            ui.warn('(shared store does not belong to pooled storage; '
                    'deleting to improve efficiency)\n')
            destvfs.rmtree(forcibly=True)

        # FUTURE when we require generaldelta, this is where we can check
        # for that.

    def deletesharedstore():
        storepath = destvfs.read('.hg/sharedpath').strip()
        if storepath.endswith('.hg'):
            storepath = os.path.dirname(storepath)

        storevfs = scmutil.vfs(storepath, audit=False)
        storevfs.rmtree(forcibly=True)

    def handlerepoerror(e):
        if e.message == _('abandoned transaction found'):
            ui.warn('(abandoned transaction found; trying to recover)\n')
            repo = hg.repository(ui, dest)
            if not repo.recover():
                ui.warn('(could not recover repo state; '
                        'deleting shared store)\n')
                deletesharedstore()

            ui.warn('(attempting checkout from beginning)\n')
            return callself()

        raise

    # At this point we either have an existing working directory using
    # shared, pooled storage or we have nothing.

    def handlenetworkfailure():
        if networkattempts[0] >= networkattemptlimit:
            raise error.Abort('reached maximum number of network attempts; '
                              'giving up\n')

        ui.warn('(retrying after network failure on attempt %d of %d)\n' %
                (networkattempts[0], networkattemptlimit))

        # Do a backoff on retries to mitigate the thundering herd
        # problem. This is an exponential backoff with a multipler
        # plus random jitter thrown in for good measure.
        # With the default settings, backoffs will be:
        # 1) 2.5 - 6.5
        # 2) 5.5 - 9.5
        # 3) 11.5 - 15.5
        backoff = (2**networkattempts[0] - 1) * 1.5
        jittermin = ui.configint('robustcheckout', 'retryjittermin', 1000)
        jittermax = ui.configint('robustcheckout', 'retryjittermax', 5000)
        backoff += float(random.randint(jittermin, jittermax)) / 1000.0
        ui.warn('(waiting %.2fs before retry)\n' % backoff)
        time.sleep(backoff)

        networkattempts[0] += 1

    def handlepullerror(e):
        """Handle an exception raised during a pull.

        Returns True if caller should call ``callself()`` to retry.
        """
        if isinstance(e, error.Abort):
            if e.args[0] == _('repository is unrelated'):
                ui.warn('(repository is unrelated; deleting)\n')
                destvfs.rmtree(forcibly=True)
                return True
            elif e.args[0].startswith(_('stream ended unexpectedly')):
                ui.warn('%s\n' % e.args[0])
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        elif isinstance(e, urllib2.URLError):
            if isinstance(e.reason, socket.error):
                ui.warn('socket error: %s\n' % e.reason)
                handlenetworkfailure()
                return True

        return False

    created = False

    if not destvfs.exists():
        # Ensure parent directories of destination exist.
        # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
        if util.safehasattr(util, 'ensuredirs'):
            makedirs = util.ensuredirs
        else:
            makedirs = util.makedirs

        makedirs(os.path.dirname(destvfs.base), notindexed=True)
        makedirs(sharebase, notindexed=True)

        if upstream:
            ui.write('(cloning from upstream repo %s)\n' % upstream)
        cloneurl = upstream or url

        try:
            res = hg.clone(ui, {},
                           cloneurl,
                           dest=dest,
                           update=False,
                           shareopts={
                               'pool': sharebase,
                               'mode': 'identity'
                           })
        except (error.Abort, urllib2.URLError) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' %
                    e.message)
            deletesharedstore()
            return callself()

        # TODO retry here.
        if res is None:
            raise error.Abort('clone failed')

        # Verify it is using shared pool storage.
        if not destvfs.exists('.hg/sharedpath'):
            raise error.Abort('clone did not create a shared repo')

        created = True

    # The destination .hg directory should exist. Now make sure we have the
    # wanted revision.

    repo = hg.repository(ui, dest)

    # We only pull if we are using symbolic names or the requested revision
    # doesn't exist.
    havewantedrev = False
    if revision and revision in repo:
        ctx = repo[revision]

        if not ctx.hex().startswith(revision):
            raise error.Abort('--revision argument is ambiguous',
                              hint='must be the first 12+ characters of a '
                              'SHA-1 fragment')

        checkoutrevision = ctx.hex()
        havewantedrev = True

    if not havewantedrev:
        ui.write('(pulling to obtain %s)\n' % (revision or branch, ))

        remote = None
        try:
            remote = hg.peer(repo, {}, url)
            pullrevs = [remote.lookup(revision or branch)]
            checkoutrevision = hex(pullrevs[0])
            if branch:
                ui.warn('(remote resolved %s to %s; '
                        'result is not deterministic)\n' %
                        (branch, checkoutrevision))

            if checkoutrevision in repo:
                ui.warn('(revision already present locally; not pulling)\n')
            else:
                pullop = exchange.pull(repo, remote, heads=pullrevs)
                if not pullop.rheads:
                    raise error.Abort('unable to pull requested revision')
        except (error.Abort, urllib2.URLError) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' %
                    e.message)
            deletesharedstore()
            return callself()
        finally:
            if remote:
                remote.close()

    # Now we should have the wanted revision in the store. Perform
    # working directory manipulation.

    # Purge if requested. We purge before update because this way we're
    # guaranteed to not have conflicts on `hg update`.
    if purge and not created:
        ui.write('(purging working directory)\n')
        purgeext = extensions.find('purge')

        if purgeext.purge(
                ui,
                repo,
                all=True,
                abort_on_err=True,
                # The function expects all arguments to be
                # defined.
                **{
                    'print': None,
                    'print0': None,
                    'dirs': None,
                    'files': None
                }):
            raise error.Abort('error purging')

    # Update the working directory.
    if commands.update(ui, repo, rev=checkoutrevision, clean=True):
        raise error.Abort('error updating')

    ui.write('updated to %s\n' % checkoutrevision)
    return None
示例#45
0
文件: rheads.py 项目: jglick/jk--
# https://stackoverflow.com/a/11900786/12916
from mercurial import ui, hg, node
from sys import argv
peer = hg.peer(ui.ui(), {}, argv[1])
for name, rev in peer.branchmap().items():
    print name, node.short(rev[0])
示例#46
0
def _docheckout(ui, url, dest, upstream, revision, branch, purge, sharebase):
    def callself():
        return _docheckout(ui, url, dest, upstream, revision, branch, purge,
                           sharebase)

    ui.write('ensuring %s@%s is available at %s\n' %
             (url, revision or branch, dest))

    destvfs = scmutil.vfs(dest, audit=False, realpath=True)

    if destvfs.exists() and not destvfs.exists('.hg'):
        raise error.Abort('destination exists but no .hg directory')

    # Require checkouts to be tied to shared storage because efficiency.
    if destvfs.exists('.hg') and not destvfs.exists('.hg/sharedpath'):
        ui.warn('(destination is not shared; deleting)\n')
        destvfs.rmtree(forcibly=True)

    # Verify the shared path exists and is using modern pooled storage.
    if destvfs.exists('.hg/sharedpath'):
        storepath = destvfs.read('.hg/sharedpath').strip()

        ui.write('(existing repository shared store: %s)\n' % storepath)

        if not os.path.exists(storepath):
            ui.warn('(shared store does not exist; deleting)\n')
            destvfs.rmtree(forcibly=True)
        elif not re.search('[a-f0-9]{40}/\.hg$', storepath.replace('\\', '/')):
            ui.warn('(shared store does not belong to pooled storage; '
                    'deleting to improve efficiency)\n')
            destvfs.rmtree(forcibly=True)

        # FUTURE when we require generaldelta, this is where we can check
        # for that.

    def deletesharedstore():
        storepath = destvfs.read('.hg/sharedpath').strip()
        if storepath.endswith('.hg'):
            storepath = os.path.dirname(storepath)

        storevfs = scmutil.vfs(storepath, audit=False)
        storevfs.rmtree(forcibly=True)

    def handlerepoerror(e):
        if e.message == _('abandoned transaction found'):
            ui.warn('(abandoned transaction found; trying to recover)\n')
            repo = hg.repository(ui, dest)
            if not repo.recover():
                ui.warn('(could not recover repo state; '
                        'deleting shared store)\n')
                deletesharedstore()

            ui.warn('(attempting checkout from beginning)\n')
            return callself()

        raise

    # At this point we either have an existing working directory using
    # shared, pooled storage or we have nothing.
    created = False

    if not destvfs.exists():
        # Ensure parent directories of destination exist.
        # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
        if util.safehasattr(util, 'ensuredirs'):
            makedirs = util.ensuredirs
        else:
            makedirs = util.makedirs

        makedirs(os.path.dirname(destvfs.base), notindexed=True)
        makedirs(sharebase, notindexed=True)

        if upstream:
            ui.write('(cloning from upstream repo %s)\n' % upstream)
        cloneurl = upstream or url

        try:
            res = hg.clone(ui, {},
                           cloneurl,
                           dest=dest,
                           update=False,
                           shareopts={
                               'pool': sharebase,
                               'mode': 'identity'
                           })
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' %
                    e.message)
            deletesharedstore()
            return callself()

        # TODO retry here.
        if res is None:
            raise error.Abort('clone failed')

        # Verify it is using shared pool storage.
        if not destvfs.exists('.hg/sharedpath'):
            raise error.Abort('clone did not create a shared repo')

        created = True

    # The destination .hg directory should exist. Now make sure we have the
    # wanted revision.

    repo = hg.repository(ui, dest)

    # We only pull if we are using symbolic names or the requested revision
    # doesn't exist.
    havewantedrev = False
    if revision and revision in repo:
        ctx = repo[revision]

        if not ctx.hex().startswith(revision):
            raise error.Abort('--revision argument is ambiguous',
                              hint='must be the first 12+ characters of a '
                              'SHA-1 fragment')

        checkoutrevision = ctx.hex()
        havewantedrev = True

    if not havewantedrev:
        ui.write('(pulling to obtain %s)\n' % (revision or branch, ))

        try:
            remote = hg.peer(repo, {}, url)
            pullrevs = [remote.lookup(revision or branch)]
            checkoutrevision = hex(pullrevs[0])
            if branch:
                ui.warn('(remote resolved %s to %s; '
                        'result is not deterministic)\n' %
                        (branch, checkoutrevision))

            if checkoutrevision in repo:
                ui.warn('(revision already present locally; not pulling)\n')
            else:
                pullop = exchange.pull(repo, remote, heads=pullrevs)
                if not pullop.rheads:
                    raise error.Abort('unable to pull requested revision')
        except error.Abort as e:
            if e.message == _('repository is unrelated'):
                ui.warn('(repository is unrelated; deleting)\n')
                destvfs.rmtree(forcibly=True)
                return callself()

            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' %
                    e.message)
            deletesharedstore()
            return callself()
        finally:
            remote.close()

    # Now we should have the wanted revision in the store. Perform
    # working directory manipulation.

    # Purge if requested. We purge before update because this way we're
    # guaranteed to not have conflicts on `hg update`.
    if purge and not created:
        ui.write('(purging working directory)\n')
        purgeext = extensions.find('purge')

        if purgeext.purge(
                ui,
                repo,
                all=True,
                abort_on_err=True,
                # The function expects all arguments to be
                # defined.
                **{
                    'print': None,
                    'print0': None,
                    'dirs': None,
                    'files': None
                }):
            raise error.Abort('error purging')

    # Update the working directory.
    if commands.update(ui, repo, rev=checkoutrevision, clean=True):
        raise error.Abort('error updating')

    ui.write('updated to %s\n' % checkoutrevision)
    return None
示例#47
0
def transplant(ui, repo, *revs, **opts):
    '''transplant changesets from another branch

    Selected changesets will be applied on top of the current working
    directory with the log of the original changeset. The changesets
    are copied and will thus appear twice in the history with different
    identities.

    Consider using the graft command if everything is inside the same
    repository - it will use merges and will usually give a better result.
    Use the rebase extension if the changesets are unpublished and you want
    to move them instead of copying them.

    If --log is specified, log messages will have a comment appended
    of the form::

      (transplanted from CHANGESETHASH)

    You can rewrite the changelog message with the --filter option.
    Its argument will be invoked with the current changelog message as
    $1 and the patch as $2.

    --source/-s specifies another repository to use for selecting changesets,
    just as if it temporarily had been pulled.
    If --branch/-b is specified, these revisions will be used as
    heads when deciding which changsets to transplant, just as if only
    these revisions had been pulled.
    If --all/-a is specified, all the revisions up to the heads specified
    with --branch will be transplanted.

    Example:

    - transplant all changes up to REV on top of your current revision::

        hg transplant --branch REV --all

    You can optionally mark selected transplanted changesets as merge
    changesets. You will not be prompted to transplant any ancestors
    of a merged transplant, and you can merge descendants of them
    normally instead of transplanting them.

    Merge changesets may be transplanted directly by specifying the
    proper parent changeset by calling :hg:`transplant --parent`.

    If no merges or revisions are provided, :hg:`transplant` will
    start an interactive changeset browser.

    If a changeset application fails, you can fix the merge by hand
    and then resume where you left off by calling :hg:`transplant
    --continue/-c`.
    '''
    def incwalk(repo, csets, match=util.always):
        for node in csets:
            if match(node):
                yield node

    def transplantwalk(repo, dest, heads, match=util.always):
        '''Yield all nodes that are ancestors of a head but not ancestors
        of dest.
        If no heads are specified, the heads of repo will be used.'''
        if not heads:
            heads = repo.heads()
        ancestors = []
        for head in heads:
            ancestors.append(repo.changelog.ancestor(dest, head))
        for node in repo.changelog.nodesbetween(ancestors, heads)[0]:
            if match(node):
                yield node

    def checkopts(opts, revs):
        if opts.get('continue'):
            if opts.get('branch') or opts.get('all') or opts.get('merge'):
                raise util.Abort(_('--continue is incompatible with '
                                   '--branch, --all and --merge'))
            return
        if not (opts.get('source') or revs or
                opts.get('merge') or opts.get('branch')):
            raise util.Abort(_('no source URL, branch revision or revision '
                               'list provided'))
        if opts.get('all'):
            if not opts.get('branch'):
                raise util.Abort(_('--all requires a branch revision'))
            if revs:
                raise util.Abort(_('--all is incompatible with a '
                                   'revision list'))

    checkopts(opts, revs)

    if not opts.get('log'):
        opts['log'] = ui.config('transplant', 'log')
    if not opts.get('filter'):
        opts['filter'] = ui.config('transplant', 'filter')

    tp = transplanter(ui, repo)
    if opts.get('edit'):
        tp.editor = cmdutil.commitforceeditor

    cmdutil.checkunfinished(repo)
    p1, p2 = repo.dirstate.parents()
    if len(repo) > 0 and p1 == revlog.nullid:
        raise util.Abort(_('no revision checked out'))
    if not opts.get('continue'):
        if p2 != revlog.nullid:
            raise util.Abort(_('outstanding uncommitted merges'))
        m, a, r, d = repo.status()[:4]
        if m or a or r or d:
            raise util.Abort(_('outstanding local changes'))

    sourcerepo = opts.get('source')
    if sourcerepo:
        peer = hg.peer(repo, opts, ui.expandpath(sourcerepo))
        heads = map(peer.lookup, opts.get('branch', ()))
        source, csets, cleanupfn = bundlerepo.getremotechanges(ui, repo, peer,
                                    onlyheads=heads, force=True)
    else:
        source = repo
        heads = map(source.lookup, opts.get('branch', ()))
        cleanupfn = None

    try:
        if opts.get('continue'):
            tp.resume(repo, source, opts)
            return

        tf = tp.transplantfilter(repo, source, p1)
        if opts.get('prune'):
            prune = set(source.lookup(r)
                        for r in scmutil.revrange(source, opts.get('prune')))
            matchfn = lambda x: tf(x) and x not in prune
        else:
            matchfn = tf
        merges = map(source.lookup, opts.get('merge', ()))
        revmap = {}
        if revs:
            for r in scmutil.revrange(source, revs):
                revmap[int(r)] = source.lookup(r)
        elif opts.get('all') or not merges:
            if source != repo:
                alltransplants = incwalk(source, csets, match=matchfn)
            else:
                alltransplants = transplantwalk(source, p1, heads,
                                                match=matchfn)
            if opts.get('all'):
                revs = alltransplants
            else:
                revs, newmerges = browserevs(ui, source, alltransplants, opts)
                merges.extend(newmerges)
            for r in revs:
                revmap[source.changelog.rev(r)] = r
        for r in merges:
            revmap[source.changelog.rev(r)] = r

        tp.apply(repo, source, revmap, merges, opts)
    finally:
        if cleanupfn:
            cleanupfn()
示例#48
0
    hg,
    pycompat,
    ui as uimod,
    util,
)

TESTDIR = os.environ["TESTDIR"]
BUNDLEPATH = os.path.join(TESTDIR, 'bundles', 'test-no-symlinks.hg')

# only makes sense to test on os which supports symlinks
if not getattr(os, "symlink", False):
    sys.exit(80) # SKIPPED_STATUS defined in run-tests.py

u = uimod.ui.load()
# hide outer repo
hg.peer(u, {}, b'.', create=True)

# unbundle with symlink support
hg.peer(u, {}, b'test0', create=True)

repo = hg.repository(u, b'test0')
commands.unbundle(u, repo, pycompat.fsencode(BUNDLEPATH), update=True)

# wait a bit, or the status call wont update the dirstate
time.sleep(1)
commands.status(u, repo)

# now disable symlink support -- this is what os.symlink would do on a
# non-symlink file system
def symlink_failure(src, dst):
    raise OSError(1, "Operation not permitted")
def _dotransplant(ui, repo, *revs, **opts):
    def incwalk(repo, csets, match=util.always):
        for node in csets:
            if match(node):
                yield node

    def transplantwalk(repo, dest, heads, match=util.always):
        '''Yield all nodes that are ancestors of a head but not ancestors
        of dest.
        If no heads are specified, the heads of repo will be used.'''
        if not heads:
            heads = repo.heads()
        ancestors = []
        ctx = repo[dest]
        for head in heads:
            ancestors.append(ctx.ancestor(repo[head]).node())
        for node in repo.changelog.nodesbetween(ancestors, heads)[0]:
            if match(node):
                yield node

    def checkopts(opts, revs):
        if opts.get('continue'):
            if opts.get('branch') or opts.get('all') or opts.get('merge'):
                raise error.Abort(
                    _('--continue is incompatible with '
                      '--branch, --all and --merge'))
            return
        if not (opts.get('source') or revs or opts.get('merge')
                or opts.get('branch')):
            raise error.Abort(
                _('no source URL, branch revision, or revision '
                  'list provided'))
        if opts.get('all'):
            if not opts.get('branch'):
                raise error.Abort(_('--all requires a branch revision'))
            if revs:
                raise error.Abort(
                    _('--all is incompatible with a '
                      'revision list'))

    checkopts(opts, revs)

    if not opts.get('log'):
        # deprecated config: transplant.log
        opts['log'] = ui.config('transplant', 'log')
    if not opts.get('filter'):
        # deprecated config: transplant.filter
        opts['filter'] = ui.config('transplant', 'filter')

    tp = transplanter(ui, repo, opts)

    p1, p2 = repo.dirstate.parents()
    if len(repo) > 0 and p1 == revlog.nullid:
        raise error.Abort(_('no revision checked out'))
    if opts.get('continue'):
        if not tp.canresume():
            raise error.Abort(_('no transplant to continue'))
    else:
        cmdutil.checkunfinished(repo)
        if p2 != revlog.nullid:
            raise error.Abort(_('outstanding uncommitted merges'))
        m, a, r, d = repo.status()[:4]
        if m or a or r or d:
            raise error.Abort(_('outstanding local changes'))

    sourcerepo = opts.get('source')
    if sourcerepo:
        peer = hg.peer(repo, opts, ui.expandpath(sourcerepo))
        heads = map(peer.lookup, opts.get('branch', ()))
        target = set(heads)
        for r in revs:
            try:
                target.add(peer.lookup(r))
            except error.RepoError:
                pass
        source, csets, cleanupfn = bundlerepo.getremotechanges(
            ui, repo, peer, onlyheads=sorted(target), force=True)
    else:
        source = repo
        heads = map(source.lookup, opts.get('branch', ()))
        cleanupfn = None

    try:
        if opts.get('continue'):
            tp.resume(repo, source, opts)
            return

        tf = tp.transplantfilter(repo, source, p1)
        if opts.get('prune'):
            prune = set(
                source.lookup(r)
                for r in scmutil.revrange(source, opts.get('prune')))
            matchfn = lambda x: tf(x) and x not in prune
        else:
            matchfn = tf
        merges = map(source.lookup, opts.get('merge', ()))
        revmap = {}
        if revs:
            for r in scmutil.revrange(source, revs):
                revmap[int(r)] = source.lookup(r)
        elif opts.get('all') or not merges:
            if source != repo:
                alltransplants = incwalk(source, csets, match=matchfn)
            else:
                alltransplants = transplantwalk(source,
                                                p1,
                                                heads,
                                                match=matchfn)
            if opts.get('all'):
                revs = alltransplants
            else:
                revs, newmerges = browserevs(ui, source, alltransplants, opts)
                merges.extend(newmerges)
            for r in revs:
                revmap[source.changelog.rev(r)] = r
        for r in merges:
            revmap[source.changelog.rev(r)] = r

        tp.apply(repo, source, revmap, merges, opts)
    finally:
        if cleanupfn:
            cleanupfn()
示例#50
0
文件: sync.py 项目: noman798/system
 def __init__(self, ui, repo, source, opts):
     self.ui = ui
     self.repo = repo
     self.source = source
     self.opts = opts
     self.remoterepository = hg.peer(repo, opts, ui.expandpath(source))
示例#51
0
def unifyrepo(ui, settings, **opts):
    """Unify the contents of multiple source repositories using settings.

    The settings file is a Mercurial config file (basically an INI file).
    """
    conf = unifyconfig(settings)

    # Ensure destrepo is created with generaldelta enabled.
    ui.setconfig(b'format', b'usegeneraldelta', True)
    ui.setconfig(b'format', b'generaldelta', True)

    # Verify all source repos have the same revision 0
    rev0s = set()
    for source in conf.sources:
        repo = hg.repository(ui, path=source['path'])

        # Verify
        node = repo[0].node()
        if rev0s and node not in rev0s:
            raise error.Abort(b'repository has different rev 0: %s\n' % source['name'])

        # Verify pushlog exists
        pushlog = getattr(repo, 'pushlog', None)
        if not pushlog:
            raise error.Abort(b'pushlog API not available',
                              hint=b'is the pushlog extension loaded?')

        rev0s.add(node)

    # Ensure the staging repo has all changesets from the source repos.

    stageui = ui.copy()

    # Now collect all the changeset data with pushlog info.
    # node -> (when, source, rev, who, pushid)
    nodepushinfo = {}
    pushcount = 0
    allnodes = set()

    # Obtain pushlog data from each source repo. We obtain data for every node
    # and filter later because we want to be sure we have the earliest known
    # push data for a given node.
    for source in conf.sources:
        path = source['path']
        sourcerepo = hg.repository(ui, path=source['path'])
        pushlog = getattr(sourcerepo, 'pushlog', None)

        index = sourcerepo.changelog.index
        revnode = {}
        for rev in sourcerepo:
            # revlog.node() is too slow. Use the index directly.
            node = index[rev][7]
            revnode[rev] = node
            allnodes.add(node)

        noderev = {v: k for k, v in revnode.items()}

        localpushcount = 0
        pushnodecount = 0
        for pushid, who, when, nodes in pushlog.pushes():
            pushcount += 1
            localpushcount += 1
            for node in nodes:
                pushnodecount += 1
                bnode = bin(node)

                # There is a race between us iterating the repo and querying the
                # pushlog. A new changeset could be written between when we
                # obtain nodes and encounter the pushlog. So ignore pushlog
                # for nodes we don't know about.
                if bnode not in noderev:
                    ui.warn(b'pushlog entry for unknown node: %s; '
                            b'possible race condition?\n' % node)
                    continue

                rev = noderev[bnode]

                if bnode not in nodepushinfo:
                    nodepushinfo[bnode] = (when, path, rev, who, pushid)
                else:
                    currentwhen = nodepushinfo[bnode][0]
                    if when < currentwhen:
                        nodepushinfo[bnode] = (when, path, rev, who, pushid)

        ui.write(b'obtained pushlog info for %d/%d revisions from %d pushes from %s\n' % (
                 pushnodecount, len(revnode), localpushcount, source['name']))

    # Now verify that every node in the source repos has pushlog data.
    missingpl = allnodes - set(nodepushinfo.keys())
    if missingpl:
        raise error.Abort(b'missing pushlog info for %d nodes: %s\n' % (
            len(missingpl), b', '.join(sorted(hex(n) for n in missingpl))))

    # Filter out changesets we aren't aggregating.
    # We also use this pass to identify which nodes to bookmark.
    books = {}
    sourcenodes = set()
    for source in conf.sources:
        sourcerepo = hg.repository(ui, path=source['path'])
        cl = sourcerepo.changelog
        index = cl.index

        sourcerevs = sourcerepo.revs(source['pullrevs'])
        sourcerevs.sort()
        headrevs = set(cl.headrevs())
        sourceheadrevs = headrevs & set(sourcerevs)

        # We /could/ allow multiple heads from each source repo. But for now
        # it is easier to limit to 1 head per source.
        if len(sourceheadrevs) > 1:
            raise error.Abort(b'%s has %d heads' % (source['name'], len(sourceheadrevs)),
                              hint=b'define pullrevs to limit what is aggregated')

        for rev in cl:
            if rev not in sourcerevs:
                continue

            node = index[rev][7]
            sourcenodes.add(node)
            if source['bookmark']:
                books[source['bookmark']] = node

        ui.write(b'aggregating %d/%d revisions for %d heads from %s\n' % (
                 len(sourcerevs), len(cl), len(sourceheadrevs), source['name']))

    nodepushinfo = {k: v for k, v in nodepushinfo.items() if k in sourcenodes}

    ui.write(b'aggregating %d/%d nodes from %d original pushes\n' % (
             len(nodepushinfo), len(allnodes), pushcount))

    # We now have accounting for every changeset. Because pulling changesets
    # is a bit time consuming, it is worthwhile to minimize the number of pull
    # operations. We do this by ordering all changesets by original push time
    # then emitting the minimum number of "fast forward" nodes from the tip
    # of each linear range inside that list.

    # (time, source, rev, user, pushid) -> node
    inversenodeinfo = {v: k for k, v in nodepushinfo.items()}

    destui = ui.copy()
    destui.setconfig(b'format', b'aggressivemergedeltas', True)
    destui.setconfig(b'format', b'maxchainlen', 10000)

    destrepo = hg.repository(destui, path=conf.destpath,
                             create=not os.path.exists(conf.destpath))
    destcl = destrepo.changelog
    pullpushinfo = {k: v for k, v in inversenodeinfo.items() if not destcl.hasnode(v)}

    ui.write(b'%d/%d nodes will be pulled\n' % (len(pullpushinfo), len(inversenodeinfo)))

    # Enable aggressive merge deltas on the stage repo to minimize manifest delta
    # size. This could make delta chains very long. So we may want to institute a
    # delta chain cap on the destination repo. But this will ensure the stage repo
    # has the most efficient/compact representation of deltas. Pulling from this
    # repo will also inherit the optimal delta, so we don't need to enable
    # aggressivemergedeltas on the destination repo.
    stageui.setconfig(b'format', b'aggressivemergedeltas', True)

    stagerepo = hg.repository(stageui, path=conf.stagepath,
                              create=not os.path.exists(conf.stagepath))

    for source in conf.sources:
        path = source['path']
        sourcepeer = hg.peer(ui, {}, path)
        ui.write(b'pulling %s into %s\n' % (path, conf.stagepath))
        exchange.pull(stagerepo, sourcepeer)

    pullnodes = list(emitfastforwardnodes(stagerepo, pullpushinfo))
    unifiedpushes = list(unifypushes(inversenodeinfo))

    ui.write(b'consolidated into %d pulls from %d unique pushes\n' % (
             len(pullnodes), len(unifiedpushes)))

    if not pullnodes:
        ui.write(b'nothing to do; exiting\n')
        return

    stagepeer = hg.peer(ui, {}, conf.stagepath)

    for node in pullnodes:
        # TODO Bug 1265002 - we should update bookmarks when we pull.
        # Otherwise the changesets will get replicated without a bookmark
        # and any poor soul who pulls will see a nameless head.
        exchange.pull(destrepo, stagepeer, heads=[node])
        # For some reason there is a massive memory leak (10+ MB per
        # iteration on Firefox repos) if we don't gc here.
        gc.collect()

    # Now that we've aggregated all the changesets in the destination repo,
    # define the pushlog entries.
    pushlog = getattr(destrepo, 'pushlog', None)
    if not pushlog:
        raise error.Abort(b'pushlog API not available',
                          hint=b'is the pushlog extension loaded?')

    with destrepo.lock():
        with destrepo.transaction(b'pushlog') as tr:
            insertpushes = list(newpushes(destrepo, unifiedpushes))
            ui.write(b'inserting %d pushlog entries\n' % len(insertpushes))
            pushlog.recordpushes(insertpushes, tr=tr)

    # Verify that pushlog time in revision order is always increasing.
    destnodepushtime = {}
    for push in destrepo.pushlog.pushes():
        for node in push.nodes:
            destnodepushtime[bin(node)] = push.when

    destcl = destrepo.changelog
    lastpushtime = 0
    for rev in destrepo:
        node = destcl.node(rev)
        pushtime = destnodepushtime[node]

        if pushtime < lastpushtime:
            ui.warn(b'push time for %d is older than %d\n' % (rev, rev - 1))

        lastpushtime = pushtime

    # Write bookmarks.
    ui.write(b'writing %d bookmarks\n' % len(books))

    with destrepo.wlock():
        with destrepo.lock():
            with destrepo.transaction(b'bookmarks') as tr:
                bm = bookmarks.bmstore(destrepo)
                books.update({
                    book: None  # delete any bookmarks not found in the update
                    for book in bm.keys()
                    if book not in books
                })
                # Mass replacing may not be the proper strategy. But it works for
                # our current use case.
                bm.applychanges(destrepo, tr, books.items())

    if not opts.get('skipreplicate'):
        # This is a bit hacky. Pushlog and bookmarks aren't currently replicated
        # via the normal hooks mechanism because we use the low-level APIs to
        # write them. So, we send a replication message to sync the entire repo.
        try:
            vcsr = extensions.find(b'vcsreplicator')
        except KeyError:
            raise error.Abort(b'vcsreplicator extension not installed; '
                              b'pushlog and bookmarks may not be replicated properly')

        vcsr.replicatecommand(destrepo.ui, destrepo)
示例#52
0
def transplant(ui, repo, *revs, **opts):
    '''transplant changesets from another branch

    Selected changesets will be applied on top of the current working
    directory with the log of the original changeset. The changesets
    are copied and will thus appear twice in the history. Use the
    rebase extension instead if you want to move a whole branch of
    unpublished changesets.

    If --log is specified, log messages will have a comment appended
    of the form::

      (transplanted from CHANGESETHASH)

    You can rewrite the changelog message with the --filter option.
    Its argument will be invoked with the current changelog message as
    $1 and the patch as $2.

    If --source/-s is specified, selects changesets from the named
    repository. If --branch/-b is specified, selects changesets from
    the branch holding the named revision, up to that revision. If
    --all/-a is specified, all changesets on the branch will be
    transplanted, otherwise you will be prompted to select the
    changesets you want.

    :hg:`transplant --branch REVISION --all` will transplant the
    selected branch (up to the named revision) onto your current
    working directory.

    You can optionally mark selected transplanted changesets as merge
    changesets. You will not be prompted to transplant any ancestors
    of a merged transplant, and you can merge descendants of them
    normally instead of transplanting them.

    Merge changesets may be transplanted directly by specifying the
    proper parent changeset by calling :hg:`transplant --parent`.

    If no merges or revisions are provided, :hg:`transplant` will
    start an interactive changeset browser.

    If a changeset application fails, you can fix the merge by hand
    and then resume where you left off by calling :hg:`transplant
    --continue/-c`.
    '''
    def incwalk(repo, csets, match=util.always):
        for node in csets:
            if match(node):
                yield node

    def transplantwalk(repo, root, branches, match=util.always):
        if not branches:
            branches = repo.heads()
        ancestors = []
        for branch in branches:
            ancestors.append(repo.changelog.ancestor(root, branch))
        for node in repo.changelog.nodesbetween(ancestors, branches)[0]:
            if match(node):
                yield node

    def checkopts(opts, revs):
        if opts.get('continue'):
            if opts.get('branch') or opts.get('all') or opts.get('merge'):
                raise util.Abort(
                    _('--continue is incompatible with '
                      'branch, all or merge'))
            return
        if not (opts.get('source') or revs or opts.get('merge')
                or opts.get('branch')):
            raise util.Abort(
                _('no source URL, branch tag or revision '
                  'list provided'))
        if opts.get('all'):
            if not opts.get('branch'):
                raise util.Abort(_('--all requires a branch revision'))
            if revs:
                raise util.Abort(
                    _('--all is incompatible with a '
                      'revision list'))

    checkopts(opts, revs)

    if not opts.get('log'):
        opts['log'] = ui.config('transplant', 'log')
    if not opts.get('filter'):
        opts['filter'] = ui.config('transplant', 'filter')

    tp = transplanter(ui, repo)
    if opts.get('edit'):
        tp.editor = cmdutil.commitforceeditor

    p1, p2 = repo.dirstate.parents()
    if len(repo) > 0 and p1 == revlog.nullid:
        raise util.Abort(_('no revision checked out'))
    if not opts.get('continue'):
        if p2 != revlog.nullid:
            raise util.Abort(_('outstanding uncommitted merges'))
        m, a, r, d = repo.status()[:4]
        if m or a or r or d:
            raise util.Abort(_('outstanding local changes'))

    sourcerepo = opts.get('source')
    if sourcerepo:
        source = hg.peer(ui, opts, ui.expandpath(sourcerepo))
        branches = map(source.lookup, opts.get('branch', ()))
        source, csets, cleanupfn = bundlerepo.getremotechanges(
            ui, repo, source, onlyheads=branches, force=True)
    else:
        source = repo
        branches = map(source.lookup, opts.get('branch', ()))
        cleanupfn = None

    try:
        if opts.get('continue'):
            tp.resume(repo, source, opts)
            return

        tf = tp.transplantfilter(repo, source, p1)
        if opts.get('prune'):
            prune = [
                source.lookup(r)
                for r in scmutil.revrange(source, opts.get('prune'))
            ]
            matchfn = lambda x: tf(x) and x not in prune
        else:
            matchfn = tf
        merges = map(source.lookup, opts.get('merge', ()))
        revmap = {}
        if revs:
            for r in scmutil.revrange(source, revs):
                revmap[int(r)] = source.lookup(r)
        elif opts.get('all') or not merges:
            if source != repo:
                alltransplants = incwalk(source, csets, match=matchfn)
            else:
                alltransplants = transplantwalk(source,
                                                p1,
                                                branches,
                                                match=matchfn)
            if opts.get('all'):
                revs = alltransplants
            else:
                revs, newmerges = browserevs(ui, source, alltransplants, opts)
                merges.extend(newmerges)
            for r in revs:
                revmap[source.changelog.rev(r)] = r
        for r in merges:
            revmap[source.changelog.rev(r)] = r

        tp.apply(repo, source, revmap, merges, opts)
    finally:
        if cleanupfn:
            cleanupfn()