コード例 #1
0
ファイル: phabstatus.py プロジェクト: davidshepherd7/dotfiles
def populateresponseforphab(repo, diffnum):
    """:populateresponse: Runs the memoization function
        for use of phabstatus and sync status
    """
    if not hgutil.safehasattr(repo, '_phabstatusrevs'):
        return

    if (hgutil.safehasattr(repo, '_phabstatuscache') and
            (repo, diffnum) in repo._phabstatuscache):
        # We already have cached data for this diff
        return

    next_revs = repo._phabstatusrevs.peekahead()
    if repo._phabstatusrevs.done:
        # repo._phabstatusrevs doesn't have anything else to process.
        # Remove it so we will bail out earlier next time.
        del repo._phabstatusrevs

    alldiffnumbers = [getdiffnum(repo, repo[rev])
                      for rev in next_revs]
    okdiffnumbers = set(d for d in alldiffnumbers if d is not None)
    # Make sure we always include the requested diff number
    okdiffnumbers.add(diffnum)
    # To populate the cache, the result will be used by the templater
    getdiffstatus(repo, *okdiffnumbers)
コード例 #2
0
ファイル: blackbox.py プロジェクト: pierfort123/mercurial
        def log(self, event, *msg, **opts):
            global lastblackbox
            super(blackboxui, self).log(event, *msg, **opts)

            if not '*' in self.track and not event in self.track:
                return

            if util.safehasattr(self, '_blackbox'):
                blackbox = self._blackbox
            elif util.safehasattr(self, '_bbopener'):
                try:
                    self._blackbox = self._openlogfile()
                except (IOError, OSError) as err:
                    self.debug('warning: cannot write to blackbox.log: %s\n' %
                               err.strerror)
                    del self._bbopener
                    self._blackbox = None
                blackbox = self._blackbox
            else:
                # certain ui instances exist outside the context of
                # a repo, so just default to the last blackbox that
                # was seen.
                blackbox = lastblackbox

            if blackbox:
                date = util.datestr(None, '%Y/%m/%d %H:%M:%S')
                user = util.getuser()
                formattedmsg = msg[0] % msg[1:]
                try:
                    blackbox.write('%s %s> %s' % (date, user, formattedmsg))
                except IOError as err:
                    self.debug('warning: cannot write to blackbox.log: %s\n' %
                               err.strerror)
                lastblackbox = blackbox
コード例 #3
0
ファイル: perf.py プロジェクト: CSCI-362-02-2015/RedTeam
def clearcaches(cl):
    # behave somewhat consistently across internal API changes
    if util.safehasattr(cl, 'clearcaches'):
        cl.clearcaches()
    elif util.safehasattr(cl, '_nodecache'):
        from mercurial.node import nullid, nullrev
        cl._nodecache = {nullid: nullrev}
        cl._nodepos = None
コード例 #4
0
def extsetup(ui):
    """insert command wrappers for a bunch of commands"""

    docvals = {"extension": "hgsubversion"}
    for cmd, (generic, target, fixdoc, ppopts, opts) in wrapcmds.iteritems():

        if fixdoc and wrappers.generic.__doc__:
            docvals["command"] = cmd
            docvals["Command"] = cmd.capitalize()
            docvals["target"] = target
            doc = wrappers.generic.__doc__.strip() % docvals
            fn = getattr(commands, cmd)
            fn.__doc__ = fn.__doc__.rstrip() + "\n\n    " + doc

        wrapped = generic and wrappers.generic or getattr(wrappers, cmd)
        entry = extensions.wrapcommand(commands.table, cmd, wrapped)
        if ppopts:
            entry[1].extend(svnopts)
        if opts:
            entry[1].extend(opts)

    try:
        rebase = extensions.find("rebase")
        if not rebase:
            return
        entry = extensions.wrapcommand(rebase.cmdtable, "rebase", wrappers.rebase)
        entry[1].append(("", "svn", None, "automatic svn rebase"))
    except:
        pass

    if not hgutil.safehasattr(localrepo.localrepository, "push"):
        # Mercurial >= 3.2
        extensions.wrapfunction(exchange, "push", wrappers.exchangepush)
    if not hgutil.safehasattr(localrepo.localrepository, "pull"):
        # Mercurial >= 3.2
        extensions.wrapfunction(exchange, "pull", wrappers.exchangepull)

    helpdir = os.path.join(os.path.dirname(__file__), "help")

    entries = (
        (
            ["subversion"],
            "Working with Subversion Repositories",
            lambda: open(os.path.join(helpdir, "subversion.rst")).read(),
        ),
    )

    help.helptable.extend(entries)

    templatekw.keywords.update(util.templatekeywords)

    revset.symbols.update(util.revsets)

    subrepo.types["hgsubversion"] = svnexternals.svnsubrepo
コード例 #5
0
ファイル: __init__.py プロジェクト: seewindcn/tortoisehg
def extsetup(ui):
    """insert command wrappers for a bunch of commands"""
    docvals = {'extension': 'hgsubversion'}
    for cmd, (generic, target, fixdoc, ppopts, opts) in wrapcmds.iteritems():

        if fixdoc and wrappers.generic.__doc__:
            docvals['command'] = cmd
            docvals['Command'] = cmd.capitalize()
            docvals['target'] = target
            doc = wrappers.generic.__doc__.strip() % docvals
            fn = getattr(commands, cmd)
            fn.__doc__ = fn.__doc__.rstrip() + '\n\n    ' + doc

        wrapped = generic and wrappers.generic or getattr(wrappers, cmd)
        entry = extensions.wrapcommand(commands.table, cmd, wrapped)
        if ppopts:
            entry[1].extend(svnopts)
        if opts:
            entry[1].extend(opts)

    try:
        rebase = extensions.find('rebase')
        if not rebase:
            return
        entry = extensions.wrapcommand(rebase.cmdtable, 'rebase', wrappers.rebase)
        entry[1].append(('', 'svn', None, 'automatic svn rebase'))
    except:
        pass

    if not hgutil.safehasattr(localrepo.localrepository, 'push'):
        # Mercurial >= 3.2
        extensions.wrapfunction(exchange, 'push', wrappers.exchangepush)
    if not hgutil.safehasattr(localrepo.localrepository, 'pull'):
        # Mercurial >= 3.2
        extensions.wrapfunction(exchange, 'pull', wrappers.exchangepull)

    helpdir = os.path.join(os.path.dirname(__file__), 'help')

    entries = (
        (['subversion'],
         "Working with Subversion Repositories",
         # Mercurial >= 3.6: doc(ui)
         lambda *args: open(os.path.join(helpdir, 'subversion.rst')).read()),
    )

    help.helptable.extend(entries)

    templatekw.keywords.update(util.templatekeywords)

    revset.symbols.update(util.revsets)

    subrepo.types['hgsubversion'] = svnexternals.svnsubrepo
コード例 #6
0
ファイル: relink.py プロジェクト: Distrotech/mercurial
def relink(ui, repo, origin=None, **opts):
    """recreate hardlinks between two repositories

    When repositories are cloned locally, their data files will be
    hardlinked so that they only use the space of a single repository.

    Unfortunately, subsequent pulls into either repository will break
    hardlinks for any files touched by the new changesets, even if
    both repositories end up pulling the same changes.

    Similarly, passing --rev to "hg clone" will fail to use any
    hardlinks, falling back to a complete copy of the source
    repository.

    This command lets you recreate those hardlinks and reclaim that
    wasted space.

    This repository will be relinked to share space with ORIGIN, which
    must be on the same local disk. If ORIGIN is omitted, looks for
    "default-relink", then "default", in [paths].

    Do not attempt any read operations on this repository while the
    command is running. (Both repositories will be locked against
    writes.)
    """
    if (not util.safehasattr(util, 'samefile') or
        not util.safehasattr(util, 'samedevice')):
        raise error.Abort(_('hardlinks are not supported on this system'))
    src = hg.repository(repo.baseui, ui.expandpath(origin or 'default-relink',
                                          origin or 'default'))
    ui.status(_('relinking %s to %s\n') % (src.store.path, repo.store.path))
    if repo.root == src.root:
        ui.status(_('there is nothing to relink\n'))
        return

    if not util.samedevice(src.store.path, repo.store.path):
        # No point in continuing
        raise error.Abort(_('source and destination are on different devices'))

    locallock = repo.lock()
    try:
        remotelock = src.lock()
        try:
            candidates = sorted(collect(src, ui))
            targets = prune(candidates, src.store.path, repo.store.path, ui)
            do_relink(src.store.path, repo.store.path, targets, ui)
        finally:
            remotelock.release()
    finally:
        locallock.release()
コード例 #7
0
 def _cleanup(orig):
     # close pipee first so peer.cleanup reading it won't deadlock,
     # if there are other processes with pipeo open (i.e. us).
     peer = orig.im_self
     if util.safehasattr(peer, 'pipee'):
         peer.pipee.close()
     return orig()
コード例 #8
0
 def _adjustlinkrev(orig, self, *args, **kwargs):
     # When generating file blobs, taking the real path is too slow on large
     # repos, so force it to just return the linkrev directly.
     repo = self._repo
     if util.safehasattr(repo, 'forcelinkrev') and repo.forcelinkrev:
         return self._filelog.linkrev(self._filelog.rev(self._filenode))
     return orig(self, *args, **kwargs)
コード例 #9
0
    def __init__(self, url="", ra=None):
        self.pool = Pool()
        self.svn_url = url
        self.username = ''
        self.password = ''

        # Only Subversion 1.4 has reparent()
        if ra is None or not util.safehasattr(svn.ra, 'reparent'):
            self.client = svn.client.create_context(self.pool)
            ab = _create_auth_baton(self.pool)
            if False:
                svn.core.svn_auth_set_parameter(
                    ab, svn.core.SVN_AUTH_PARAM_DEFAULT_USERNAME, self.username)
                svn.core.svn_auth_set_parameter(
                    ab, svn.core.SVN_AUTH_PARAM_DEFAULT_PASSWORD, self.password)
            self.client.auth_baton = ab
            self.client.config = svn_config
            try:
                self.ra = svn.client.open_ra_session(
                    self.svn_url,
                    self.client, self.pool)
            except SubversionException, (inst, num):
                if num in (svn.core.SVN_ERR_RA_ILLEGAL_URL,
                           svn.core.SVN_ERR_RA_LOCAL_REPOS_OPEN_FAILED,
                           svn.core.SVN_ERR_BAD_URL):
                    raise NotBranchError(url)
                raise
コード例 #10
0
ファイル: strip.py プロジェクト: areshero/ThirdWorldApp
def strip(ui, repo, revs, update=True, backup=True, force=None, bookmark=None):
    wlock = lock = None
    try:
        wlock = repo.wlock()
        lock = repo.lock()

        if update:
            checklocalchanges(repo, force=force)
            urev, p2 = repo.changelog.parents(revs[0])
            if (util.safehasattr(repo, 'mq') and
                p2 != nullid
                and p2 in [x.node for x in repo.mq.applied]):
                urev = p2
            hg.clean(repo, urev)
            repo.dirstate.write()

        repair.strip(ui, repo, revs, backup)

        marks = repo._bookmarks
        if bookmark:
            if bookmark == repo._bookmarkcurrent:
                bookmarks.unsetcurrent(repo)
            del marks[bookmark]
            marks.write()
            ui.write(_("bookmark '%s' deleted\n") % bookmark)
    finally:
        release(lock, wlock)
コード例 #11
0
        def _updatecallstreamopts(self, command, opts):
            if command != 'getbundle':
                return
            if 'remotefilelog' not in shallowutil.peercapabilities(self):
                return
            if not util.safehasattr(self, '_localrepo'):
                return
            if constants.REQUIREMENT not in self._localrepo.requirements:
                return

            bundlecaps = opts.get('bundlecaps')
            if bundlecaps:
                bundlecaps = [bundlecaps]
            else:
                bundlecaps = []

            # shallow, includepattern, and excludepattern are a hacky way of
            # carrying over data from the local repo to this getbundle
            # command. We need to do it this way because bundle1 getbundle
            # doesn't provide any other place we can hook in to manipulate
            # getbundle args before it goes across the wire. Once we get rid
            # of bundle1, we can use bundle2's _pullbundle2extraprepare to
            # do this more cleanly.
            bundlecaps.append('remotefilelog')
            if self._localrepo.includepattern:
                patterns = '\0'.join(self._localrepo.includepattern)
                includecap = "includepattern=" + patterns
                bundlecaps.append(includecap)
            if self._localrepo.excludepattern:
                patterns = '\0'.join(self._localrepo.excludepattern)
                excludecap = "excludepattern=" + patterns
                bundlecaps.append(excludecap)
            opts['bundlecaps'] = ','.join(bundlecaps)
コード例 #12
0
def _create_auth_baton(pool):
    """Create a Subversion authentication baton. """
    import svn.client
    # Give the client context baton a suite of authentication
    # providers.h
    providers = [
        svn.client.get_simple_provider(pool),
        svn.client.get_username_provider(pool),
        svn.client.get_ssl_client_cert_file_provider(pool),
        svn.client.get_ssl_client_cert_pw_file_provider(pool),
        svn.client.get_ssl_server_trust_file_provider(pool),
        ]
    # Platform-dependent authentication methods
    getprovider = getattr(svn.core, 'svn_auth_get_platform_specific_provider',
                          None)
    if getprovider:
        # Available in svn >= 1.6
        for name in ('gnome_keyring', 'keychain', 'kwallet', 'windows'):
            for type in ('simple', 'ssl_client_cert_pw', 'ssl_server_trust'):
                p = getprovider(name, type, pool)
                if p:
                    providers.append(p)
    else:
        if util.safehasattr(svn.client, 'get_windows_simple_provider'):
            providers.append(svn.client.get_windows_simple_provider(pool))

    return svn.core.svn_auth_open(providers, pool)
コード例 #13
0
ファイル: chgserver.py プロジェクト: motlin/cyg
    def getpager(self):
        """Read cmdargs and write pager command to r-channel if enabled

        If pager isn't enabled, this writes '\0' because channeledoutput
        does not allow to write empty data.
        """
        args = self._readlist()
        try:
            cmd, _func, args, options, _cmdoptions = dispatch._parse(self.ui,
                                                                     args)
        except (error.Abort, error.AmbiguousCommand, error.CommandError,
                error.UnknownCommand):
            cmd = None
            options = {}
        if not cmd or 'pager' not in options:
            self.cresult.write('\0')
            return

        pagercmd = _setuppagercmd(self.ui, options, cmd)
        if pagercmd:
            # Python's SIGPIPE is SIG_IGN by default. change to SIG_DFL so
            # we can exit if the pipe to the pager is closed
            if util.safehasattr(signal, 'SIGPIPE') and \
                    signal.getsignal(signal.SIGPIPE) == signal.SIG_IGN:
                signal.signal(signal.SIGPIPE, signal.SIG_DFL)
            self.cresult.write(pagercmd)
        else:
            self.cresult.write('\0')
コード例 #14
0
ファイル: chgserver.py プロジェクト: motlin/cyg
def _loadnewui(srcui, args):
    newui = srcui.__class__()
    for a in ['fin', 'fout', 'ferr', 'environ']:
        setattr(newui, a, getattr(srcui, a))
    if util.safehasattr(srcui, '_csystem'):
        newui._csystem = srcui._csystem

    # internal config: extensions.chgserver
    newui.setconfig('extensions', 'chgserver',
                    srcui.config('extensions', 'chgserver'), '--config')

    # command line args
    args = args[:]
    dispatch._parseconfig(newui, dispatch._earlygetopt(['--config'], args))

    # stolen from tortoisehg.util.copydynamicconfig()
    for section, name, value in srcui.walkconfig():
        source = srcui.configsource(section, name)
        if ':' in source or source == '--config':
            # path:line or command line
            continue
        if source == 'none':
            # ui.configsource returns 'none' by default
            source = ''
        newui.setconfig(section, name, value, source)

    # load wd and repo config, copied from dispatch.py
    cwds = dispatch._earlygetopt(['--cwd'], args)
    cwd = cwds and os.path.realpath(cwds[-1]) or None
    rpath = dispatch._earlygetopt(["-R", "--repository", "--repo"], args)
    path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)

    return (newui, newlui)
コード例 #15
0
ファイル: sampling.py プロジェクト: davidshepherd7/dotfiles
        def log(self, event, *msg, **opts):
            """Redirect filtered log event to a sampling file
            The configuration looks like:
            [sampling]
            filepath = path/to/file
            key.eventname = value
            key.eventname2 = value2

            If an event name appears in the config, it is logged to the
            samplingfile augmented with value stored as ref.

            Example:
            [sampling]
            filepath = path/to/file
            key.perfstatus = perf_status

            Assuming that we call:
            ui.log('perfstatus', t=3)
            ui.log('perfcommit', t=3)
            ui.log('perfstatus', t=42)

            Then we will log in path/to/file, two JSON strings separated by \0
            one for each perfstatus, like:
            {"event":"perfstatus",
             "ref":"perf_status",
             "msg":"",
             "opts":{"t":3}}\0
            {"event":"perfstatus",
             "ref":"perf_status",
             "msg":"",
             "opts":{"t":42}}\0
            """
            if not util.safehasattr(self, 'samplingfilters'):
                self.samplingfilters = logtofile.computesamplingfilters(self)
            if event not in self.samplingfilters:
                return super(logtofile, self).log(event, *msg, **opts)

            # special case: remove less interesting blocked fields starting
            # with "unknown_" or "alias_".
            if event == 'uiblocked':
                opts = {k: v
                        for k, v in opts.items()
                        if (not k.startswith('alias_') and not
                            k.startswith('unknown_'))}

            ref = self.samplingfilters[event]
            script = _getcandidatelocation(ui)
            if script:
                try:
                    opts["metrics_type"] = event
                    if msg:
                        # ui.log treats msg as a format string + format args.
                        opts["msg"] = msg[0] % msg[1:]
                    with open(script, 'a') as outfile:
                        outfile.write(json.dumps({"data": opts,
                                                  "category": ref}))
                        outfile.write("\0")
                except EnvironmentError:
                    pass
            return super(logtofile, self).log(event, *msg, **opts)
コード例 #16
0
ファイル: pager.py プロジェクト: ZanderZhang/Andriod-Learning
    def pagecmd(orig, ui, options, cmd, cmdfunc):
        p = ui.config("pager", "pager", os.environ.get("PAGER"))
        usepager = False
        always = util.parsebool(options['pager'])
        auto = options['pager'] == 'auto'

        if not p:
            pass
        elif always:
            usepager = True
        elif not auto:
            usepager = False
        else:
            attend = ui.configlist('pager', 'attend', attended)
            ignore = ui.configlist('pager', 'ignore')
            cmds, _ = cmdutil.findcmd(cmd, commands.table)

            for cmd in cmds:
                var = 'attend-%s' % cmd
                if ui.config('pager', var):
                    usepager = ui.configbool('pager', var)
                    break
                if (cmd in attend or
                     (cmd not in ignore and not attend)):
                    usepager = True
                    break

        if usepager:
            ui.setconfig('ui', 'formatted', ui.formatted(), 'pager')
            ui.setconfig('ui', 'interactive', False, 'pager')
            if util.safehasattr(signal, "SIGPIPE"):
                signal.signal(signal.SIGPIPE, signal.SIG_DFL)
            _runpager(ui, p)
        return orig(ui, options, cmd, cmdfunc)
コード例 #17
0
def checkhghelps():
    errorcnt = 0
    for names, sec, doc in helptable:
        if util.safehasattr(doc, '__call__'):
            doc = doc()
        errorcnt += checkseclevel(doc,
                                  '%s help topic' % names[0],
                                  initlevel_topic)

    errorcnt += checkcmdtable(table, '%s command', initlevel_cmd)

    for name in sorted(extensions.enabled().keys() +
                       extensions.disabled().keys()):
        mod = extensions.load(None, name, None)
        if not mod.__doc__:
            verbose('skip checking %s extension: no help document' % name)
            continue
        errorcnt += checkseclevel(mod.__doc__,
                                  '%s extension' % name,
                                  initlevel_ext)

        cmdtable = getattr(mod, 'cmdtable', None)
        if cmdtable:
            errorcnt += checkcmdtable(cmdtable,
                                      '%s command of ' + name + ' extension',
                                      initlevel_ext_cmd)
    return errorcnt
コード例 #18
0
def extsetup(ui):
    localrepo.moderncaps.add('_evoext_b2x_obsmarkers_0')
    gboptsmap['evo_obscommon'] = 'nodes'
    if not util.safehasattr(obsolete.obsstore, 'relevantmarkers'):
        obsolete.obsstore = pruneobsstore
        obsolete.obsstore.relevantmarkers = relevantmarkers
    hgweb_mod.perms['evoext_pushobsmarkers_0'] = 'push'
    hgweb_mod.perms['evoext_pullobsmarkers_0'] = 'pull'
    hgweb_mod.perms['evoext_obshash'] = 'pull'
    wireproto.commands['evoext_pushobsmarkers_0'] = (srv_pushobsmarkers, '')
    wireproto.commands['evoext_pullobsmarkers_0'] = (srv_pullobsmarkers, '*')
    # wrap module content
    origfunc = exchange.getbundle2partsmapping['obsmarkers']
    def newfunc(*args, **kwargs):
        return _getbundleobsmarkerpart(origfunc, *args, **kwargs)
    exchange.getbundle2partsmapping['obsmarkers'] = newfunc
    extensions.wrapfunction(wireproto, 'capabilities', capabilities)
    # wrap command content
    oldcap, args = wireproto.commands['capabilities']
    def newcap(repo, proto):
        return capabilities(oldcap, repo, proto)
    wireproto.commands['capabilities'] = (newcap, args)
    wireproto.commands['evoext_obshash'] = (srv_obshash, 'nodes')
    wireproto.commands['evoext_obshash1'] = (srv_obshash1, 'nodes')
    # specific simple4server content
    extensions.wrapfunction(pushkey, '_nslist', _nslist)
    pushkey._namespaces['namespaces'] = (lambda *x: False, pushkey._nslist)
コード例 #19
0
ファイル: __init__.py プロジェクト: cmjonze/mercurial
def wrapdirstate(orig, self):
    ds = orig(self)
    # only override the dirstate when Watchman is available for the repo
    if util.safehasattr(self, "_fsmonitorstate"):
        ds.__class__ = makedirstate(ds.__class__)
        ds._fsmonitorinit(self._fsmonitorstate, self._watchmanclient)
    return ds
コード例 #20
0
ファイル: strip.py プロジェクト: motlin/cyg
def strip(ui, repo, revs, update=True, backup=True, force=None, bookmarks=None):
    wlock = lock = None
    try:
        wlock = repo.wlock()
        lock = repo.lock()

        if update:
            checklocalchanges(repo, force=force)
            urev, p2 = repo.changelog.parents(revs[0])
            if (util.safehasattr(repo, 'mq') and
                p2 != nullid
                and p2 in [x.node for x in repo.mq.applied]):
                urev = p2
            hg.clean(repo, urev)
            repo.dirstate.write(repo.currenttransaction())

        repair.strip(ui, repo, revs, backup)

        repomarks = repo._bookmarks
        if bookmarks:
            with repo.transaction('strip') as tr:
                if repo._activebookmark in bookmarks:
                    bookmarksmod.deactivate(repo)
                for bookmark in bookmarks:
                    del repomarks[bookmark]
                repomarks.recordchange(tr)
            for bookmark in sorted(bookmarks):
                ui.write(_("bookmark '%s' deleted\n") % bookmark)
    finally:
        release(lock, wlock)
コード例 #21
0
ファイル: blackbox.py プロジェクト: motlin/cyg
 def _partialinit(self):
     if util.safehasattr(self, '_bbvfs'):
         return
     self._bbfp = None
     self._bbinlog = False
     self._bbrepo = None
     self._bbvfs = None
コード例 #22
0
ファイル: journal.py プロジェクト: motlin/cyg
def wrapdirstate(orig, repo):
    """Make journal storage available to the dirstate object"""
    dirstate = orig(repo)
    if util.safehasattr(repo, 'journal'):
        dirstate.journalstorage = repo.journal
        dirstate.addparentchangecallback('journal', recorddirstateparents)
    return dirstate
コード例 #23
0
ファイル: pager.py プロジェクト: spraints/for-example
 def killpager():
     if util.safehasattr(signal, "SIGINT"):
         signal.signal(signal.SIGINT, signal.SIG_IGN)
     pager.stdin.close()
     os.dup2(stdout, sys.stdout.fileno())
     os.dup2(stderr, sys.stderr.fileno())
     pager.wait()
コード例 #24
0
ファイル: pager.py プロジェクト: spraints/for-example
def _pagerfork(ui, p):
    if not util.safehasattr(os, 'fork'):
        sys.stdout = util.popen(p, 'wb')
        if ui._isatty(sys.stderr):
            sys.stderr = sys.stdout
        return
    fdin, fdout = os.pipe()
    pid = os.fork()
    if pid == 0:
        os.close(fdin)
        os.dup2(fdout, sys.stdout.fileno())
        if ui._isatty(sys.stderr):
            os.dup2(fdout, sys.stderr.fileno())
        os.close(fdout)
        return
    os.dup2(fdin, sys.stdin.fileno())
    os.close(fdin)
    os.close(fdout)
    try:
        os.execvp('/bin/sh', ['/bin/sh', '-c', p])
    except OSError, e:
        if e.errno == errno.ENOENT:
            # no /bin/sh, try executing the pager directly
            args = shlex.split(p)
            os.execvp(args[0], args)
        else:
            raise
コード例 #25
0
ファイル: sparse.py プロジェクト: davidshepherd7/dotfiles
def _hashmatcher(matcher):
    if util.safehasattr(matcher, 'hash'):
        return matcher.hash()

    sha1 = hashlib.sha1()
    sha1.update(repr(matcher))
    return sha1.hexdigest()
コード例 #26
0
ファイル: sparse.py プロジェクト: davidshepherd7/dotfiles
def reposetup(ui, repo):
    if _fbsparseexists(repo.ui):
        return
    if not util.safehasattr(repo, 'dirstate'):
        return

    _wraprepo(ui, repo)
コード例 #27
0
ファイル: __init__.py プロジェクト: davidshepherd7/dotfiles
def reposetup(ui, repo):
    client = ui.configbool('fastannotate', 'client', default=None)
    if client is None:
        if util.safehasattr(repo, 'requirements'):
            client = 'remotefilelog' in repo.requirements
    if client:
        protocol.clientreposetup(ui, repo)
コード例 #28
0
ファイル: svnexternals.py プロジェクト: seewindcn/tortoisehg
 def __init__(self, ctx, path, state):
     state = (state[0].split(':', 1)[1], state[1])
     super(svnsubrepo, self).__init__(ctx, path, state)
     # Mercurial 3.3+ set 'ui' rather than '_ui' -- set that and use 'ui'
     # everywhere to maintain compatibility across versions
     if not hgutil.safehasattr(self, 'ui'):
         self.ui = ctx._repo.ui
コード例 #29
0
ファイル: chgserver.py プロジェクト: motlin/cyg
 def system(self, cmd, environ=None, cwd=None, onerr=None,
            errprefix=None):
     # fallback to the original system method if the output needs to be
     # captured (to self._buffers), or the output stream is not stdout
     # (e.g. stderr, cStringIO), because the chg client is not aware of
     # these situations and will behave differently (write to stdout).
     if (any(s[1] for s in self._bufferstates)
         or not util.safehasattr(self.fout, 'fileno')
         or self.fout.fileno() != sys.stdout.fileno()):
         return super(chgui, self).system(cmd, environ, cwd, onerr,
                                          errprefix)
     # copied from mercurial/util.py:system()
     self.flush()
     def py2shell(val):
         if val is None or val is False:
             return '0'
         if val is True:
             return '1'
         return str(val)
     env = os.environ.copy()
     if environ:
         env.update((k, py2shell(v)) for k, v in environ.iteritems())
     env['HG'] = util.hgexecutable()
     rc = self._csystem(cmd, env, cwd)
     if rc and onerr:
         errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
                             util.explainexit(rc)[0])
         if errprefix:
             errmsg = '%s: %s' % (errprefix, errmsg)
         raise onerr(errmsg)
     return rc
コード例 #30
0
ファイル: server.py プロジェクト: html-shell/mozilla-build
 def log_request(self, code='-', size='-'):
     xheaders = []
     if util.safehasattr(self, 'headers'):
         xheaders = [h for h in self.headers.items()
                     if h[0].startswith('x-')]
     self.log_message('"%s" %s %s%s',
                      self.requestline, str(code), str(size),
                      ''.join([' %s:%s' % h for h in sorted(xheaders)]))
コード例 #31
0
 def anon():
     if util.safehasattr(repo, 'ranprefetch') and repo.ranprefetch:
         return
     repo.ranprefetch = True
     repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
コード例 #32
0
def gcclient(ui, cachepath):
    # get list of repos that use this cache
    repospath = os.path.join(cachepath, 'repos')
    if not os.path.exists(repospath):
        ui.warn(_("no known cache at %s\n") % cachepath)
        return

    reposfile = open(repospath, 'r')
    repos = set([r[:-1] for r in reposfile.readlines()])
    reposfile.close()

    # build list of useful files
    validrepos = []
    keepkeys = set()

    sharedcache = None
    filesrepacked = False

    count = 0
    progress = ui.makeprogress(_("analyzing repositories"),
                               unit="repos",
                               total=len(repos))
    for path in repos:
        progress.update(count)
        count += 1
        try:
            path = ui.expandpath(os.path.normpath(path))
        except TypeError as e:
            ui.warn(_("warning: malformed path: %r:%s\n") % (path, e))
            traceback.print_exc()
            continue
        try:
            peer = hg.peer(ui, {}, path)
            repo = peer._repo
        except error.RepoError:
            continue

        validrepos.append(path)

        # Protect against any repo or config changes that have happened since
        # this repo was added to the repos file. We'd rather this loop succeed
        # and too much be deleted, than the loop fail and nothing gets deleted.
        if not isenabled(repo):
            continue

        if not util.safehasattr(repo, 'name'):
            ui.warn(
                _("repo %s is a misconfigured remotefilelog repo\n") % path)
            continue

        # If garbage collection on repack and repack on hg gc are enabled
        # then loose files are repacked and garbage collected.
        # Otherwise regular garbage collection is performed.
        repackonhggc = repo.ui.configbool('remotefilelog', 'repackonhggc')
        gcrepack = repo.ui.configbool('remotefilelog', 'gcrepack')
        if repackonhggc and gcrepack:
            try:
                repackmod.incrementalrepack(repo)
                filesrepacked = True
                continue
            except (IOError, repackmod.RepackAlreadyRunning):
                # If repack cannot be performed due to not enough disk space
                # continue doing garbage collection of loose files w/o repack
                pass

        reponame = repo.name
        if not sharedcache:
            sharedcache = repo.sharedstore

        # Compute a keepset which is not garbage collected
        def keyfn(fname, fnode):
            return fileserverclient.getcachekey(reponame, fname, hex(fnode))

        keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)

    progress.complete()

    # write list of valid repos back
    oldumask = os.umask(0o002)
    try:
        reposfile = open(repospath, 'w')
        reposfile.writelines([("%s\n" % r) for r in validrepos])
        reposfile.close()
    finally:
        os.umask(oldumask)

    # prune cache
    if sharedcache is not None:
        sharedcache.gc(keepkeys)
    elif not filesrepacked:
        ui.warn(_("warning: no valid repos in repofile\n"))
コード例 #33
0
def cloneshallow(orig, ui, repo, *args, **opts):
    if opts.get(r'shallow'):
        repos = []

        def pull_shallow(orig, self, *args, **kwargs):
            if not isenabled(self):
                repos.append(self.unfiltered())
                # set up the client hooks so the post-clone update works
                setupclient(self.ui, self.unfiltered())

                # setupclient fixed the class on the repo itself
                # but we also need to fix it on the repoview
                if isinstance(self, repoview.repoview):
                    self.__class__.__bases__ = (self.__class__.__bases__[0],
                                                self.unfiltered().__class__)
                self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
                self._writerequirements()

                # Since setupclient hadn't been called, exchange.pull was not
                # wrapped. So we need to manually invoke our version of it.
                return exchangepull(orig, self, *args, **kwargs)
            else:
                return orig(self, *args, **kwargs)

        extensions.wrapfunction(exchange, 'pull', pull_shallow)

        # Wrap the stream logic to add requirements and to pass include/exclude
        # patterns around.
        def setup_streamout(repo, remote):
            # Replace remote.stream_out with a version that sends file
            # patterns.
            def stream_out_shallow(orig):
                caps = remote.capabilities()
                if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
                    opts = {}
                    if repo.includepattern:
                        opts[r'includepattern'] = '\0'.join(
                            repo.includepattern)
                    if repo.excludepattern:
                        opts[r'excludepattern'] = '\0'.join(
                            repo.excludepattern)
                    return remote._callstream('stream_out_shallow', **opts)
                else:
                    return orig()

            extensions.wrapfunction(remote, 'stream_out', stream_out_shallow)

        def stream_wrap(orig, op):
            setup_streamout(op.repo, op.remote)
            return orig(op)

        extensions.wrapfunction(streamclone, 'maybeperformlegacystreamclone',
                                stream_wrap)

        def canperformstreamclone(orig, pullop, bundle2=False):
            # remotefilelog is currently incompatible with the
            # bundle2 flavor of streamclones, so force us to use
            # v1 instead.
            if 'v2' in pullop.remotebundle2caps.get('stream', []):
                pullop.remotebundle2caps['stream'] = [
                    c for c in pullop.remotebundle2caps['stream'] if c != 'v2'
                ]
            if bundle2:
                return False, None
            supported, requirements = orig(pullop, bundle2=bundle2)
            if requirements is not None:
                requirements.add(constants.SHALLOWREPO_REQUIREMENT)
            return supported, requirements

        extensions.wrapfunction(streamclone, 'canperformstreamclone',
                                canperformstreamclone)

    try:
        orig(ui, repo, *args, **opts)
    finally:
        if opts.get(r'shallow'):
            for r in repos:
                if util.safehasattr(r, 'fileservice'):
                    r.fileservice.close()
コード例 #34
0
ファイル: bugzilla.py プロジェクト: nermina86/docker
 def __init__(self, use_datetime=0):
     if util.safehasattr(xmlrpclib.Transport, "__init__"):
         xmlrpclib.Transport.__init__(self, use_datetime)
コード例 #35
0
 def repoRootPath(self):
     if util.safehasattr(self._agent, 'rootPath'):
         return self._agent.rootPath()
コード例 #36
0
wireproto = import_module('mercurial.wireprotov1server')
if not wireproto:
    wireproto = import_module('mercurial.wireproto')

testedwith = '4.2 4.3 4.4 4.5 4.6 4.7'
minimumhgversion = '4.2'
buglink = 'https://bugzilla.mozilla.org/enter_bug.cgi?product=Developer%20Services&component=Mercurial%3A%20firefoxtree'
# The root revisions in mozilla-central and comm-central, respectively.
MOZ_ROOT_REV = '8ba995b74e18334ab3707f27e9eb8f4e37ba3d29'
COMM_ROOT_REV = 'e4f4569d451a5e0d12a6aa33ebd916f979dd8faa'

cmdtable = {}

# TRACKING hg43 Mercurial 4.3 introduced registrar.command as a replacement for
# cmdutil.command.
if util.safehasattr(registrar, 'command'):
    command = registrar.command(cmdtable)
else:
    command = cmdutil.command(cmdtable)

# TRACKING hg43 Mercurial 4.3 introduced the config registrar. 4.4 requires
# config items to be registered to avoid a devel warning.
if util.safehasattr(registrar, 'configitem'):
    configtable = {}
    configitem = registrar.configitem(configtable)

    configitem('firefoxtree', 'servetags',
               default=configitems.dynamicdefault)
    configitem('firefoxtree', 'servetagsfrombookmarks',
               default=configitems.dynamicdefault)
コード例 #37
0
ファイル: hgcia.py プロジェクト: michalliu/MyCygwin
  [web]
  # If you want hyperlinks (optional)
  baseurl = http://server/path/to/repo
"""

from mercurial.i18n import _
from mercurial.node import bin, short
from mercurial import cmdutil, patch, templater, util, mail
import email.Parser

import socket, xmlrpclib
from xml.sax import saxutils
testedwith = 'internal'

socket_timeout = 30 # seconds
if util.safehasattr(socket, 'setdefaulttimeout'):
    # set a timeout for the socket so you don't have to wait so looooong
    # when cia.vc is having problems. requires python >= 2.3:
    socket.setdefaulttimeout(socket_timeout)

HGCIA_VERSION = '0.1'
HGCIA_URL = 'http://hg.kublai.com/mercurial/hgcia'


class ciamsg(object):
    """ A CIA message """
    def __init__(self, cia, ctx):
        self.cia = cia
        self.ctx = ctx
        self.url = self.cia.url
        if self.url:
コード例 #38
0
ファイル: git.py プロジェクト: sdr01810/intellij-idea-ce
class convert_git(converter_source):
    # Windows does not support GIT_DIR= construct while other systems
    # cannot remove environment variable. Just assume none have
    # both issues.
    if util.safehasattr(os, 'unsetenv'):
        def gitopen(self, s, err=None):
            prevgitdir = os.environ.get('GIT_DIR')
            os.environ['GIT_DIR'] = self.path
            try:
                if err == subprocess.PIPE:
                    (stdin, stdout, stderr) = util.popen3(s)
                    return stdout
                elif err == subprocess.STDOUT:
                    return self.popen_with_stderr(s)
                else:
                    return util.popen(s, 'rb')
            finally:
                if prevgitdir is None:
                    del os.environ['GIT_DIR']
                else:
                    os.environ['GIT_DIR'] = prevgitdir
    else:
        def gitopen(self, s, err=None):
            if err == subprocess.PIPE:
                (sin, so, se) = util.popen3('GIT_DIR=%s %s' % (self.path, s))
                return so
            elif err == subprocess.STDOUT:
                    return self.popen_with_stderr(s)
            else:
                return util.popen('GIT_DIR=%s %s' % (self.path, s), 'rb')

    def popen_with_stderr(self, s):
        p = subprocess.Popen(s, shell=True, bufsize=-1,
                             close_fds=util.closefds,
                             stdin=subprocess.PIPE,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT,
                             universal_newlines=False,
                             env=None)
        return p.stdout

    def gitread(self, s):
        fh = self.gitopen(s)
        data = fh.read()
        return data, fh.close()

    def __init__(self, ui, path, rev=None):
        super(convert_git, self).__init__(ui, path, rev=rev)

        if os.path.isdir(path + "/.git"):
            path += "/.git"
        if not os.path.exists(path + "/objects"):
            raise NoRepo(_("%s does not look like a Git repository") % path)

        checktool('git', 'git')

        self.path = path
        self.submodules = []

    def getheads(self):
        if not self.rev:
            heads, ret = self.gitread('git rev-parse --branches --remotes')
            heads = heads.splitlines()
        else:
            heads, ret = self.gitread("git rev-parse --verify %s" % self.rev)
            heads = [heads[:-1]]
        if ret:
            raise util.Abort(_('cannot retrieve git heads'))
        return heads

    def catfile(self, rev, type):
        if rev == hex(nullid):
            raise IOError
        data, ret = self.gitread("git cat-file %s %s" % (type, rev))
        if ret:
            raise util.Abort(_('cannot read %r object at %s') % (type, rev))
        return data

    def getfile(self, name, rev):
        if name == '.hgsub':
            data = '\n'.join([m.hgsub() for m in self.submoditer()])
            mode = ''
        elif name == '.hgsubstate':
            data = '\n'.join([m.hgsubstate() for m in self.submoditer()])
            mode = ''
        else:
            data = self.catfile(rev, "blob")
            mode = self.modecache[(name, rev)]
        return data, mode

    def submoditer(self):
        null = hex(nullid)
        for m in sorted(self.submodules, key=lambda p: p.path):
            if m.node != null:
                yield m

    def parsegitmodules(self, content):
        """Parse the formatted .gitmodules file, example file format:
        [submodule "sub"]\n
        \tpath = sub\n
        \turl = git://giturl\n
        """
        self.submodules = []
        c = config.config()
        # Each item in .gitmodules starts with \t that cant be parsed
        c.parse('.gitmodules', content.replace('\t',''))
        for sec in c.sections():
            s = c[sec]
            if 'url' in s and 'path' in s:
                self.submodules.append(submodule(s['path'], '', s['url']))

    def retrievegitmodules(self, version):
        modules, ret = self.gitread("git show %s:%s" % (version, '.gitmodules'))
        if ret:
            raise util.Abort(_('cannot read submodules config file in %s') %
                             version)
        self.parsegitmodules(modules)
        for m in self.submodules:
            node, ret = self.gitread("git rev-parse %s:%s" % (version, m.path))
            if ret:
                continue
            m.node = node.strip()

    def getchanges(self, version):
        self.modecache = {}
        fh = self.gitopen("git diff-tree -z --root -m -r %s" % version)
        changes = []
        seen = set()
        entry = None
        subexists = False
        for l in fh.read().split('\x00'):
            if not entry:
                if not l.startswith(':'):
                    continue
                entry = l
                continue
            f = l
            if f not in seen:
                seen.add(f)
                entry = entry.split()
                h = entry[3]
                p = (entry[1] == "100755")
                s = (entry[1] == "120000")

                if f == '.gitmodules':
                    subexists = True
                    changes.append(('.hgsub', ''))
                elif entry[1] == '160000' or entry[0] == ':160000':
                    subexists = True
                else:
                    self.modecache[(f, h)] = (p and "x") or (s and "l") or ""
                    changes.append((f, h))
            entry = None
        if fh.close():
            raise util.Abort(_('cannot read changes in %s') % version)

        if subexists:
            self.retrievegitmodules(version)
            changes.append(('.hgsubstate', ''))
        return (changes, {})

    def getcommit(self, version):
        c = self.catfile(version, "commit") # read the commit hash
        end = c.find("\n\n")
        message = c[end + 2:]
        message = self.recode(message)
        l = c[:end].splitlines()
        parents = []
        author = committer = None
        for e in l[1:]:
            n, v = e.split(" ", 1)
            if n == "author":
                p = v.split()
                tm, tz = p[-2:]
                author = " ".join(p[:-2])
                if author[0] == "<": author = author[1:-1]
                author = self.recode(author)
            if n == "committer":
                p = v.split()
                tm, tz = p[-2:]
                committer = " ".join(p[:-2])
                if committer[0] == "<": committer = committer[1:-1]
                committer = self.recode(committer)
            if n == "parent":
                parents.append(v)

        if committer and committer != author:
            message += "\ncommitter: %s\n" % committer
        tzs, tzh, tzm = tz[-5:-4] + "1", tz[-4:-2], tz[-2:]
        tz = -int(tzs) * (int(tzh) * 3600 + int(tzm))
        date = tm + " " + str(tz)

        c = commit(parents=parents, date=date, author=author, desc=message,
                   rev=version)
        return c

    def gettags(self):
        tags = {}
        alltags = {}
        fh = self.gitopen('git ls-remote --tags "%s"' % self.path,
                          err=subprocess.STDOUT)
        prefix = 'refs/tags/'

        # Build complete list of tags, both annotated and bare ones
        for line in fh:
            line = line.strip()
            if line.startswith("error:") or line.startswith("fatal:"):
                raise util.Abort(_('cannot read tags from %s') % self.path)
            node, tag = line.split(None, 1)
            if not tag.startswith(prefix):
                continue
            alltags[tag[len(prefix):]] = node
        if fh.close():
            raise util.Abort(_('cannot read tags from %s') % self.path)

        # Filter out tag objects for annotated tag refs
        for tag in alltags:
            if tag.endswith('^{}'):
                tags[tag[:-3]] = alltags[tag]
            else:
                if tag + '^{}' in alltags:
                    continue
                else:
                    tags[tag] = alltags[tag]

        return tags

    def getchangedfiles(self, version, i):
        changes = []
        if i is None:
            fh = self.gitopen("git diff-tree --root -m -r %s" % version)
            for l in fh:
                if "\t" not in l:
                    continue
                m, f = l[:-1].split("\t")
                changes.append(f)
        else:
            fh = self.gitopen('git diff-tree --name-only --root -r %s '
                              '"%s^%s" --' % (version, version, i + 1))
            changes = [f.rstrip('\n') for f in fh]
        if fh.close():
            raise util.Abort(_('cannot read changes in %s') % version)

        return changes

    def getbookmarks(self):
        bookmarks = {}

        # Interesting references in git are prefixed
        prefix = 'refs/heads/'
        prefixlen = len(prefix)

        # factor two commands
        gitcmd = { 'remote/': 'git ls-remote --heads origin',
                          '': 'git show-ref'}

        # Origin heads
        for reftype in gitcmd:
            try:
                fh = self.gitopen(gitcmd[reftype], err=subprocess.PIPE)
                for line in fh:
                    line = line.strip()
                    rev, name = line.split(None, 1)
                    if not name.startswith(prefix):
                        continue
                    name = '%s%s' % (reftype, name[prefixlen:])
                    bookmarks[name] = rev
            except Exception:
                pass

        return bookmarks
コード例 #39
0
def _docheckout(ui,
                url,
                dest,
                upstream,
                revision,
                branch,
                purge,
                sharebase,
                optimes,
                behaviors,
                networkattemptlimit,
                networkattempts=None,
                sparse_profile=None,
                noupdate=False):
    if not networkattempts:
        networkattempts = [1]

    def callself():
        return _docheckout(ui,
                           url,
                           dest,
                           upstream,
                           revision,
                           branch,
                           purge,
                           sharebase,
                           optimes,
                           behaviors,
                           networkattemptlimit,
                           networkattempts=networkattempts,
                           sparse_profile=sparse_profile,
                           noupdate=noupdate)

    @contextlib.contextmanager
    def timeit(op, behavior):
        behaviors.add(behavior)
        errored = False
        try:
            start = time.time()
            yield
        except Exception:
            errored = True
            raise
        finally:
            elapsed = time.time() - start

            if errored:
                op += '_errored'

            optimes.append((op, elapsed))

    ui.write(b'ensuring %s@%s is available at %s\n' %
             (url, revision or branch, dest))

    # We assume that we're the only process on the machine touching the
    # repository paths that we were told to use. This means our recovery
    # scenario when things aren't "right" is to just nuke things and start
    # from scratch. This is easier to implement than verifying the state
    # of the data and attempting recovery. And in some scenarios (such as
    # potential repo corruption), it is probably faster, since verifying
    # repos can take a while.

    destvfs = vfs.vfs(dest, audit=False, realpath=True)

    def deletesharedstore(path=None):
        storepath = path or destvfs.read(b'.hg/sharedpath').strip()
        if storepath.endswith(b'.hg'):
            storepath = os.path.dirname(storepath)

        storevfs = vfs.vfs(storepath, audit=False)
        storevfs.rmtree(forcibly=True)

    if destvfs.exists() and not destvfs.exists(b'.hg'):
        raise error.Abort(b'destination exists but no .hg directory')

    # Refuse to enable sparse checkouts on existing checkouts. The reasoning
    # here is that another consumer of this repo may not be sparse aware. If we
    # enabled sparse, we would lock them out.
    if destvfs.exists(
    ) and sparse_profile and not destvfs.exists(b'.hg/sparse'):
        raise error.Abort(
            b'cannot enable sparse profile on existing '
            b'non-sparse checkout',
            hint=b'use a separate working directory to use sparse')

    # And the other direction for symmetry.
    if not sparse_profile and destvfs.exists(b'.hg/sparse'):
        raise error.Abort(
            b'cannot use non-sparse checkout on existing sparse '
            b'checkout',
            hint=b'use a separate working directory to use sparse')

    # Require checkouts to be tied to shared storage because efficiency.
    if destvfs.exists(b'.hg') and not destvfs.exists(b'.hg/sharedpath'):
        ui.warn(b'(destination is not shared; deleting)\n')
        with timeit('remove_unshared_dest', 'remove-wdir'):
            destvfs.rmtree(forcibly=True)

    # Verify the shared path exists and is using modern pooled storage.
    if destvfs.exists(b'.hg/sharedpath'):
        storepath = destvfs.read(b'.hg/sharedpath').strip()

        ui.write(b'(existing repository shared store: %s)\n' % storepath)

        if not os.path.exists(storepath):
            ui.warn(b'(shared store does not exist; deleting destination)\n')
            with timeit('removed_missing_shared_store', 'remove-wdir'):
                destvfs.rmtree(forcibly=True)
        elif not re.search(b'[a-f0-9]{40}/\.hg$', storepath.replace(
                b'\\', b'/')):
            ui.warn(b'(shared store does not belong to pooled storage; '
                    b'deleting destination to improve efficiency)\n')
            with timeit('remove_unpooled_store', 'remove-wdir'):
                destvfs.rmtree(forcibly=True)

    if destvfs.isfileorlink(b'.hg/wlock'):
        ui.warn(b'(dest has an active working directory lock; assuming it is '
                b'left over from a previous process and that the destination '
                b'is corrupt; deleting it just to be sure)\n')
        with timeit('remove_locked_wdir', 'remove-wdir'):
            destvfs.rmtree(forcibly=True)

    def handlerepoerror(e):
        if pycompat.bytestr(e) == _(b'abandoned transaction found'):
            ui.warn(b'(abandoned transaction found; trying to recover)\n')
            repo = hg.repository(ui, dest)
            if not repo.recover():
                ui.warn(b'(could not recover repo state; '
                        b'deleting shared store)\n')
                with timeit('remove_unrecovered_shared_store', 'remove-store'):
                    deletesharedstore()

            ui.warn(b'(attempting checkout from beginning)\n')
            return callself()

        raise

    # At this point we either have an existing working directory using
    # shared, pooled storage or we have nothing.

    def handlenetworkfailure():
        if networkattempts[0] >= networkattemptlimit:
            raise error.Abort(b'reached maximum number of network attempts; '
                              b'giving up\n')

        ui.warn(b'(retrying after network failure on attempt %d of %d)\n' %
                (networkattempts[0], networkattemptlimit))

        # Do a backoff on retries to mitigate the thundering herd
        # problem. This is an exponential backoff with a multipler
        # plus random jitter thrown in for good measure.
        # With the default settings, backoffs will be:
        # 1) 2.5 - 6.5
        # 2) 5.5 - 9.5
        # 3) 11.5 - 15.5
        backoff = (2**networkattempts[0] - 1) * 1.5
        jittermin = ui.configint(b'robustcheckout', b'retryjittermin', 1000)
        jittermax = ui.configint(b'robustcheckout', b'retryjittermax', 5000)
        backoff += float(random.randint(jittermin, jittermax)) / 1000.0
        ui.warn(b'(waiting %.2fs before retry)\n' % backoff)
        time.sleep(backoff)

        networkattempts[0] += 1

    def handlepullerror(e):
        """Handle an exception raised during a pull.

        Returns True if caller should call ``callself()`` to retry.
        """
        if isinstance(e, error.Abort):
            if e.args[0] == _(b'repository is unrelated'):
                ui.warn(b'(repository is unrelated; deleting)\n')
                destvfs.rmtree(forcibly=True)
                return True
            elif e.args[0].startswith(_(b'stream ended unexpectedly')):
                ui.warn(b'%s\n' % e.args[0])
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        # TODO test this branch
        elif isinstance(e, error.ResponseError):
            if e.args[0].startswith(
                    _(b'unexpected response from remote server:')):
                ui.warn(
                    b'(unexpected response from remote server; retrying)\n')
                destvfs.rmtree(forcibly=True)
                # Will raise if failure limit reached.
                handlenetworkfailure()
                return True
        elif isinstance(e, ssl.SSLError):
            # Assume all SSL errors are due to the network, as Mercurial
            # should convert non-transport errors like cert validation failures
            # to error.Abort.
            ui.warn(b'ssl error: %s\n' % e)
            handlenetworkfailure()
            return True
        elif isinstance(e, urllibcompat.urlerr.urlerror):
            if isinstance(e.reason, socket.error):
                ui.warn(b'socket error: %s\n' % pycompat.bytestr(e.reason))
                handlenetworkfailure()
                return True
            else:
                ui.warn(b'unhandled URLError; reason type: %s; value: %s\n' %
                        (e.reason.__class__.__name__, e.reason))
        else:
            ui.warn(b'unhandled exception during network operation; type: %s; '
                    b'value: %s\n' % (e.__class__.__name__, e))

        return False

    # Perform sanity checking of store. We may or may not know the path to the
    # local store. It depends if we have an existing destvfs pointing to a
    # share. To ensure we always find a local store, perform the same logic
    # that Mercurial's pooled storage does to resolve the local store path.
    cloneurl = upstream or url

    try:
        clonepeer = hg.peer(ui, {}, cloneurl)
        rootnode = peerlookup(clonepeer, b'0')
    except error.RepoLookupError:
        raise error.Abort(b'unable to resolve root revision from clone '
                          b'source')
    except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
        if handlepullerror(e):
            return callself()
        raise

    if rootnode == nullid:
        raise error.Abort(b'source repo appears to be empty')

    storepath = os.path.join(sharebase, hex(rootnode))
    storevfs = vfs.vfs(storepath, audit=False)

    if storevfs.isfileorlink(b'.hg/store/lock'):
        ui.warn(b'(shared store has an active lock; assuming it is left '
                b'over from a previous process and that the store is '
                b'corrupt; deleting store and destination just to be '
                b'sure)\n')
        if destvfs.exists():
            with timeit('remove_dest_active_lock', 'remove-wdir'):
                destvfs.rmtree(forcibly=True)

        with timeit('remove_shared_store_active_lock', 'remove-store'):
            storevfs.rmtree(forcibly=True)

    if storevfs.exists() and not storevfs.exists(b'.hg/requires'):
        ui.warn(b'(shared store missing requires file; this is a really '
                b'odd failure; deleting store and destination)\n')
        if destvfs.exists():
            with timeit('remove_dest_no_requires', 'remove-wdir'):
                destvfs.rmtree(forcibly=True)

        with timeit('remove_shared_store_no_requires', 'remove-store'):
            storevfs.rmtree(forcibly=True)

    if storevfs.exists(b'.hg/requires'):
        requires = set(storevfs.read(b'.hg/requires').splitlines())
        # FUTURE when we require generaldelta, this is where we can check
        # for that.
        required = {b'dotencode', b'fncache'}

        missing = required - requires
        if missing:
            ui.warn(b'(shared store missing requirements: %s; deleting '
                    b'store and destination to ensure optimal behavior)\n' %
                    b', '.join(sorted(missing)))
            if destvfs.exists():
                with timeit('remove_dest_missing_requires', 'remove-wdir'):
                    destvfs.rmtree(forcibly=True)

            with timeit('remove_shared_store_missing_requires',
                        'remove-store'):
                storevfs.rmtree(forcibly=True)

    created = False

    if not destvfs.exists():
        # Ensure parent directories of destination exist.
        # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
        if util.safehasattr(util, 'ensuredirs'):
            makedirs = util.ensuredirs
        else:
            makedirs = util.makedirs

        makedirs(os.path.dirname(destvfs.base), notindexed=True)
        makedirs(sharebase, notindexed=True)

        if upstream:
            ui.write(b'(cloning from upstream repo %s)\n' % upstream)

        if not storevfs.exists():
            behaviors.add(b'create-store')

        try:
            with timeit('clone', 'clone'):
                shareopts = {b'pool': sharebase, b'mode': b'identity'}
                res = hg.clone(ui, {},
                               clonepeer,
                               dest=dest,
                               update=False,
                               shareopts=shareopts,
                               stream=True)
        except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn(b'(repo corruption: %s; deleting shared store)\n' % e)
            with timeit('remove_shared_store_revlogerror', 'remote-store'):
                deletesharedstore()
            return callself()

        # TODO retry here.
        if res is None:
            raise error.Abort(b'clone failed')

        # Verify it is using shared pool storage.
        if not destvfs.exists(b'.hg/sharedpath'):
            raise error.Abort(b'clone did not create a shared repo')

        created = True

    # The destination .hg directory should exist. Now make sure we have the
    # wanted revision.

    repo = hg.repository(ui, dest)

    # We only pull if we are using symbolic names or the requested revision
    # doesn't exist.
    havewantedrev = False

    if revision:
        try:
            ctx = scmutil.revsingle(repo, revision)
        except error.RepoLookupError:
            ctx = None

        if ctx:
            if not ctx.hex().startswith(revision):
                raise error.Abort(
                    b'--revision argument is ambiguous',
                    hint=b'must be the first 12+ characters of a '
                    b'SHA-1 fragment')

            checkoutrevision = ctx.hex()
            havewantedrev = True

    if not havewantedrev:
        ui.write(b'(pulling to obtain %s)\n' % (revision or branch, ))

        remote = None
        try:
            remote = hg.peer(repo, {}, url)
            pullrevs = [peerlookup(remote, revision or branch)]
            checkoutrevision = hex(pullrevs[0])
            if branch:
                ui.warn(b'(remote resolved %s to %s; '
                        b'result is not deterministic)\n' %
                        (branch, checkoutrevision))

            if checkoutrevision in repo:
                ui.warn(b'(revision already present locally; not pulling)\n')
            else:
                with timeit('pull', 'pull'):
                    pullop = exchange.pull(repo, remote, heads=pullrevs)
                    if not pullop.rheads:
                        raise error.Abort(b'unable to pull requested revision')
        except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
            if handlepullerror(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn(b'(repo corruption: %s; deleting shared store)\n' % e)
            deletesharedstore()
            return callself()
        finally:
            if remote:
                remote.close()

    # Now we should have the wanted revision in the store. Perform
    # working directory manipulation.

    # Avoid any working directory manipulations if `-U`/`--noupdate` was passed
    if noupdate:
        ui.write(b'(skipping update since `-U` was passed)\n')
        return None

    # Purge if requested. We purge before update because this way we're
    # guaranteed to not have conflicts on `hg update`.
    if purge and not created:
        ui.write(b'(purging working directory)\n')
        purgeext = extensions.find(b'purge')

        # Mercurial 4.3 doesn't purge files outside the sparse checkout.
        # See https://bz.mercurial-scm.org/show_bug.cgi?id=5626. Force
        # purging by monkeypatching the sparse matcher.
        try:
            old_sparse_fn = getattr(repo.dirstate, '_sparsematchfn', None)
            if old_sparse_fn is not None:
                # TRACKING hg50
                # Arguments passed to `matchmod.always` were unused and have been removed
                if util.versiontuple(n=2) >= (5, 0):
                    repo.dirstate._sparsematchfn = lambda: matchmod.always()
                else:
                    repo.dirstate._sparsematchfn = lambda: matchmod.always(
                        repo.root, '')

            with timeit('purge', 'purge'):
                if purgeext.purge(
                        ui,
                        repo,
                        all=True,
                        abort_on_err=True,
                        # The function expects all arguments to be
                        # defined.
                        **{
                            'print': None,
                            'print0': None,
                            'dirs': None,
                            'files': None
                        }):
                    raise error.Abort(b'error purging')
        finally:
            if old_sparse_fn is not None:
                repo.dirstate._sparsematchfn = old_sparse_fn

    # Update the working directory.

    if repo[b'.'].node() == nullid:
        behaviors.add('empty-wdir')
    else:
        behaviors.add('populated-wdir')

    if sparse_profile:
        sparsemod = getsparse()

        # By default, Mercurial will ignore unknown sparse profiles. This could
        # lead to a full checkout. Be more strict.
        try:
            repo.filectx(sparse_profile, changeid=checkoutrevision).data()
        except error.ManifestLookupError:
            raise error.Abort(b'sparse profile %s does not exist at revision '
                              b'%s' % (sparse_profile, checkoutrevision))

        # TRACKING hg48 - parseconfig takes `action` param
        if util.versiontuple(n=2) >= (4, 8):
            old_config = sparsemod.parseconfig(repo.ui,
                                               repo.vfs.tryread(b'sparse'),
                                               b'sparse')
        else:
            old_config = sparsemod.parseconfig(repo.ui,
                                               repo.vfs.tryread(b'sparse'))

        old_includes, old_excludes, old_profiles = old_config

        if old_profiles == {sparse_profile} and not old_includes and not \
                old_excludes:
            ui.write(b'(sparse profile %s already set; no need to update '
                     b'sparse config)\n' % sparse_profile)
        else:
            if old_includes or old_excludes or old_profiles:
                ui.write(b'(replacing existing sparse config with profile '
                         b'%s)\n' % sparse_profile)
            else:
                ui.write(b'(setting sparse config to profile %s)\n' %
                         sparse_profile)

            # If doing an incremental update, this will perform two updates:
            # one to change the sparse profile and another to update to the new
            # revision. This is not desired. But there's not a good API in
            # Mercurial to do this as one operation.
            with repo.wlock(), timeit('sparse_update_config',
                                      'sparse-update-config'):
                fcounts = map(
                    len,
                    sparsemod._updateconfigandrefreshwdir(repo, [], [],
                                                          [sparse_profile],
                                                          force=True))

                repo.ui.status(b'%d files added, %d files dropped, '
                               b'%d files conflicting\n' % tuple(fcounts))

            ui.write(b'(sparse refresh complete)\n')

    op = 'update_sparse' if sparse_profile else 'update'
    behavior = 'update-sparse' if sparse_profile else 'update'

    with timeit(op, behavior):
        if commands.update(ui, repo, rev=checkoutrevision, clean=True):
            raise error.Abort(b'error updating')

    ui.write(b'updated to %s\n' % checkoutrevision)

    return None
コード例 #40
0
        firstpush = int(firstpush)

        for pushid, who, when, nodes in repo.pushlog.pushes(
                start_id=firstpush):
            lines.append('%d %s %d %s' % (pushid, who, when, ' '.join(nodes)))

        return '\n'.join(lines)
    except Exception as e:
        return '\n'.join(['0', str(e)])


# TRACKING hg46
# 4.5.3 added wireproto.permissions.
# 4.6 removed it and factored permissions into @wireprotocommand. We bypass
# @wireprotocommand for now and set the permission on the command entry.
if util.safehasattr(wireproto, 'permissions'):
    wireproto.permissions['pushlog'] = 'pull'
else:
    try:
        wireproto.commands['pushlog'].permission = 'pull'
    except AttributeError:
        pass


def exchangepullpushlog(orig, pullop):
    """This is called during pull to fetch pushlog data.

    The goal of this function is to replicate the entire pushlog. This is
    in contrast to replicating only the pushlog data for changesets the
    client has pulled. Put another way, this attempts complete replication
    as opposed to partial, hole-y replication.
コード例 #41
0
 def _repo(self):
     if util.safehasattr(self._agent, 'rawRepo'):
         return self._agent.rawRepo()
コード例 #42
0
def stripcmd(ui, repo, *revs, **opts):
    """strip changesets and all their descendants from the repository

    The strip command removes the specified changesets and all their
    descendants. If the working directory has uncommitted changes, the
    operation is aborted unless the --force flag is supplied, in which
    case changes will be discarded.

    If a parent of the working directory is stripped, then the working
    directory will automatically be updated to the most recent
    available ancestor of the stripped parent after the operation
    completes.

    Any stripped changesets are stored in ``.hg/strip-backup`` as a
    bundle (see :hg:`help bundle` and :hg:`help unbundle`). They can
    be restored by running :hg:`unbundle .hg/strip-backup/BUNDLE`,
    where BUNDLE is the bundle file created by the strip. Note that
    the local revision numbers will in general be different after the
    restore.

    Use the --no-backup option to discard the backup bundle once the
    operation completes.

    Strip is not a history-rewriting operation and can be used on
    changesets in the public phase. But if the stripped changesets have
    been pushed to a remote repository you will likely pull them again.

    Return 0 on success.
    """
    backup = True
    if opts.get('no_backup') or opts.get('nobackup'):
        backup = False

    cl = repo.changelog
    revs = list(revs) + opts.get('rev')
    revs = set(scmutil.revrange(repo, revs))

    wlock = repo.wlock()
    try:
        if opts.get('bookmark'):
            mark = opts.get('bookmark')
            marks = repo._bookmarks
            if mark not in marks:
                raise error.Abort(_("bookmark '%s' not found") % mark)

            # If the requested bookmark is not the only one pointing to a
            # a revision we have to only delete the bookmark and not strip
            # anything. revsets cannot detect that case.
            uniquebm = True
            for m, n in marks.iteritems():
                if m != mark and n == repo[mark].node():
                    uniquebm = False
                    break
            if uniquebm:
                rsrevs = repair.stripbmrevset(repo, mark)
                revs.update(set(rsrevs))
            if not revs:
                del marks[mark]
                marks.write()
                ui.write(_("bookmark '%s' deleted\n") % mark)

        if not revs:
            raise error.Abort(_('empty revision set'))

        descendants = set(cl.descendants(revs))
        strippedrevs = revs.union(descendants)
        roots = revs.difference(descendants)

        update = False
        # if one of the wdir parent is stripped we'll need
        # to update away to an earlier revision
        for p in repo.dirstate.parents():
            if p != nullid and cl.rev(p) in strippedrevs:
                update = True
                break

        rootnodes = set(cl.node(r) for r in roots)

        q = getattr(repo, 'mq', None)
        if q is not None and q.applied:
            # refresh queue state if we're about to strip
            # applied patches
            if cl.rev(repo.lookup('qtip')) in strippedrevs:
                q.applieddirty = True
                start = 0
                end = len(q.applied)
                for i, statusentry in enumerate(q.applied):
                    if statusentry.node in rootnodes:
                        # if one of the stripped roots is an applied
                        # patch, only part of the queue is stripped
                        start = i
                        break
                del q.applied[start:end]
                q.savedirty()

        revs = sorted(rootnodes)
        if update and opts.get('keep'):
            urev, p2 = repo.changelog.parents(revs[0])
            if (util.safehasattr(repo, 'mq') and p2 != nullid
                    and p2 in [x.node for x in repo.mq.applied]):
                urev = p2
            uctx = repo[urev]

            # only reset the dirstate for files that would actually change
            # between the working context and uctx
            descendantrevs = repo.revs("%s::." % uctx.rev())
            changedfiles = []
            for rev in descendantrevs:
                # blindly reset the files, regardless of what actually changed
                changedfiles.extend(repo[rev].files())

            # reset files that only changed in the dirstate too
            dirstate = repo.dirstate
            dirchanges = [f for f in dirstate if dirstate[f] != 'n']
            changedfiles.extend(dirchanges)

            repo.dirstate.rebuild(urev, uctx.manifest(), changedfiles)
            repo.dirstate.write(repo.currenttransaction())

            # clear resolve state
            ms = merge.mergestate(repo)
            ms.reset(repo['.'].node())

            update = False

        strip(ui,
              repo,
              revs,
              backup=backup,
              update=update,
              force=opts.get('force'),
              bookmark=opts.get('bookmark'))
    finally:
        wlock.release()

    return 0
コード例 #43
0
ファイル: cvsps.py プロジェクト: sdr01810/intellij-idea-ce
def createlog(ui, directory=None, root="", rlog=True, cache=None):
    '''Collect the CVS rlog'''

    # Because we store many duplicate commit log messages, reusing strings
    # saves a lot of memory and pickle storage space.
    _scache = {}

    def scache(s):
        "return a shared version of a string"
        return _scache.setdefault(s, s)

    ui.status(_('collecting CVS rlog\n'))

    log = []  # list of logentry objects containing the CVS state

    # patterns to match in CVS (r)log output, by state of use
    re_00 = re.compile('RCS file: (.+)$')
    re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
    re_02 = re.compile('cvs (r?log|server): (.+)\n$')
    re_03 = re.compile("(Cannot access.+CVSROOT)|"
                       "(can't create temporary directory.+)$")
    re_10 = re.compile('Working file: (.+)$')
    re_20 = re.compile('symbolic names:')
    re_30 = re.compile('\t(.+): ([\\d.]+)$')
    re_31 = re.compile('----------------------------$')
    re_32 = re.compile('======================================='
                       '======================================$')
    re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
    re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
                       r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
                       r'(\s+commitid:\s+([^;]+);)?'
                       r'(.*mergepoint:\s+([^;]+);)?')
    re_70 = re.compile('branches: (.+);$')

    file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')

    prefix = ''  # leading path to strip of what we get from CVS

    if directory is None:
        # Current working directory

        # Get the real directory in the repository
        try:
            prefix = open(os.path.join('CVS', 'Repository')).read().strip()
            directory = prefix
            if prefix == ".":
                prefix = ""
        except IOError:
            raise logerror(_('not a CVS sandbox'))

        if prefix and not prefix.endswith(os.sep):
            prefix += os.sep

        # Use the Root file in the sandbox, if it exists
        try:
            root = open(os.path.join('CVS', 'Root')).read().strip()
        except IOError:
            pass

    if not root:
        root = os.environ.get('CVSROOT', '')

    # read log cache if one exists
    oldlog = []
    date = None

    if cache:
        cachedir = os.path.expanduser('~/.hg.cvsps')
        if not os.path.exists(cachedir):
            os.mkdir(cachedir)

        # The cvsps cache pickle needs a uniquified name, based on the
        # repository location. The address may have all sort of nasties
        # in it, slashes, colons and such. So here we take just the
        # alphanumeric characters, concatenated in a way that does not
        # mix up the various components, so that
        #    :pserver:user@server:/path
        # and
        #    /pserver/user/server/path
        # are mapped to different cache file names.
        cachefile = root.split(":") + [directory, "cache"]
        cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
        cachefile = os.path.join(cachedir,
                                 '.'.join([s for s in cachefile if s]))

    if cache == 'update':
        try:
            ui.note(_('reading cvs log cache %s\n') % cachefile)
            oldlog = pickle.load(open(cachefile))
            for e in oldlog:
                if not (util.safehasattr(e, 'branchpoints')
                        and util.safehasattr(e, 'commitid')
                        and util.safehasattr(e, 'mergepoint')):
                    ui.status(_('ignoring old cache\n'))
                    oldlog = []
                    break

            ui.note(_('cache has %d log entries\n') % len(oldlog))
        except Exception, e:
            ui.note(_('error reading cache: %r\n') % e)

        if oldlog:
            date = oldlog[-1].date  # last commit date as a (time,tz) tuple
            date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
コード例 #44
0
    resolve_uri_to_tree,
    TRY_TREES,
)

testedwith = '4.1 4.2 4.3 4.4'
minimumhgversion = '4.1'
buglink = 'https://bugzilla.mozilla.org/enter_bug.cgi?product=Developer%20Services&component=Mercurial%3A%20firefoxtree'
# The root revisions in mozilla-central and comm-central, respectively.
MOZ_ROOT_REV = '8ba995b74e18334ab3707f27e9eb8f4e37ba3d29'
COMM_ROOT_REV = 'e4f4569d451a5e0d12a6aa33ebd916f979dd8faa'

cmdtable = {}

# TRACKING hg43 Mercurial 4.3 introduced registrar.command as a replacement for
# cmdutil.command.
if util.safehasattr(registrar, 'command'):
    command = registrar.command(cmdtable)
else:
    command = cmdutil.command(cmdtable)

# TRACKING hg43 Mercurial 4.3 introduced the config registrar. 4.4 requires
# config items to be registered to avoid a devel warning.
if util.safehasattr(registrar, 'configitem'):
    configtable = {}
    configitem = registrar.configitem(configtable)

    configitem('firefoxtree', 'servetags', default=configitems.dynamicdefault)
    configitem('firefoxtree',
               'servetagsfrombookmarks',
               default=configitems.dynamicdefault)
コード例 #45
0
ファイル: robustcheckout.py プロジェクト: AlexxNica/gecko
def _docheckout(ui,
                url,
                dest,
                upstream,
                revision,
                branch,
                purge,
                sharebase,
                networkattemptlimit,
                networkattempts=None):
    if not networkattempts:
        networkattempts = [1]

    def callself():
        return _docheckout(ui, url, dest, upstream, revision, branch, purge,
                           sharebase, networkattemptlimit, networkattempts)

    ui.write('ensuring %s@%s is available at %s\n' %
             (url, revision or branch, dest))

    destvfs = scmutil.vfs(dest, audit=False, realpath=True)

    if destvfs.exists() and not destvfs.exists('.hg'):
        raise error.Abort('destination exists but no .hg directory')

    # Require checkouts to be tied to shared storage because efficiency.
    if destvfs.exists('.hg') and not destvfs.exists('.hg/sharedpath'):
        ui.warn('(destination is not shared; deleting)\n')
        destvfs.rmtree(forcibly=True)

    # Verify the shared path exists and is using modern pooled storage.
    if destvfs.exists('.hg/sharedpath'):
        storepath = destvfs.read('.hg/sharedpath').strip()

        ui.write('(existing repository shared store: %s)\n' % storepath)

        if not os.path.exists(storepath):
            ui.warn('(shared store does not exist; deleting)\n')
            destvfs.rmtree(forcibly=True)
        elif not re.search('[a-f0-9]{40}/\.hg$', storepath.replace('\\', '/')):
            ui.warn('(shared store does not belong to pooled storage; '
                    'deleting to improve efficiency)\n')
            destvfs.rmtree(forcibly=True)

        # FUTURE when we require generaldelta, this is where we can check
        # for that.

    def deletesharedstore():
        storepath = destvfs.read('.hg/sharedpath').strip()
        if storepath.endswith('.hg'):
            storepath = os.path.dirname(storepath)

        storevfs = scmutil.vfs(storepath, audit=False)
        storevfs.rmtree(forcibly=True)

    def handlerepoerror(e):
        if e.message == _('abandoned transaction found'):
            ui.warn('(abandoned transaction found; trying to recover)\n')
            repo = hg.repository(ui, dest)
            if not repo.recover():
                ui.warn('(could not recover repo state; '
                        'deleting shared store)\n')
                deletesharedstore()

            ui.warn('(attempting checkout from beginning)\n')
            return callself()

        raise

    # At this point we either have an existing working directory using
    # shared, pooled storage or we have nothing.

    def handlepullabort(e):
        """Handle an error.Abort raised during a pull.

        Returns True if caller should call ``callself()`` to retry.
        """
        if e.args[0] == _('repository is unrelated'):
            ui.warn('(repository is unrelated; deleting)\n')
            destvfs.rmtree(forcibly=True)
            return True
        elif e.args[0].startswith(_('stream ended unexpectedly')):
            ui.warn('%s\n' % e.args[0])
            if networkattempts[0] < networkattemptlimit:
                ui.warn(
                    '(retrying after network failure on attempt %d of %d)\n' %
                    (networkattempts[0], networkattemptlimit))

                # Do a backoff on retries to mitigate the thundering herd
                # problem. This is an exponential backoff with a multipler
                # plus random jitter thrown in for good measure.
                # With the default settings, backoffs will be:
                # 1) 2.5 - 6.5
                # 2) 5.5 - 9.5
                # 3) 11.5 - 15.5
                backoff = (2**networkattempts[0] - 1) * 1.5
                jittermin = ui.configint('robustcheckout', 'retryjittermin',
                                         1000)
                jittermax = ui.configint('robustcheckout', 'retryjittermax',
                                         5000)
                backoff += float(random.randint(jittermin, jittermax)) / 1000.0
                ui.warn('(waiting %.2fs before retry)\n' % backoff)
                time.sleep(backoff)

                networkattempts[0] += 1

                return True
            else:
                raise error.Abort(
                    'reached maximum number of network attempts; '
                    'giving up\n')

        return False

    created = False

    if not destvfs.exists():
        # Ensure parent directories of destination exist.
        # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
        if util.safehasattr(util, 'ensuredirs'):
            makedirs = util.ensuredirs
        else:
            makedirs = util.makedirs

        makedirs(os.path.dirname(destvfs.base), notindexed=True)
        makedirs(sharebase, notindexed=True)

        if upstream:
            ui.write('(cloning from upstream repo %s)\n' % upstream)
        cloneurl = upstream or url

        try:
            res = hg.clone(ui, {},
                           cloneurl,
                           dest=dest,
                           update=False,
                           shareopts={
                               'pool': sharebase,
                               'mode': 'identity'
                           })
        except error.Abort as e:
            if handlepullabort(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' %
                    e.message)
            deletesharedstore()
            return callself()

        # TODO retry here.
        if res is None:
            raise error.Abort('clone failed')

        # Verify it is using shared pool storage.
        if not destvfs.exists('.hg/sharedpath'):
            raise error.Abort('clone did not create a shared repo')

        created = True

    # The destination .hg directory should exist. Now make sure we have the
    # wanted revision.

    repo = hg.repository(ui, dest)

    # We only pull if we are using symbolic names or the requested revision
    # doesn't exist.
    havewantedrev = False
    if revision and revision in repo:
        ctx = repo[revision]

        if not ctx.hex().startswith(revision):
            raise error.Abort('--revision argument is ambiguous',
                              hint='must be the first 12+ characters of a '
                              'SHA-1 fragment')

        checkoutrevision = ctx.hex()
        havewantedrev = True

    if not havewantedrev:
        ui.write('(pulling to obtain %s)\n' % (revision or branch, ))

        remote = None
        try:
            remote = hg.peer(repo, {}, url)
            pullrevs = [remote.lookup(revision or branch)]
            checkoutrevision = hex(pullrevs[0])
            if branch:
                ui.warn('(remote resolved %s to %s; '
                        'result is not deterministic)\n' %
                        (branch, checkoutrevision))

            if checkoutrevision in repo:
                ui.warn('(revision already present locally; not pulling)\n')
            else:
                pullop = exchange.pull(repo, remote, heads=pullrevs)
                if not pullop.rheads:
                    raise error.Abort('unable to pull requested revision')
        except error.Abort as e:
            if handlepullabort(e):
                return callself()
            raise
        except error.RepoError as e:
            return handlerepoerror(e)
        except error.RevlogError as e:
            ui.warn('(repo corruption: %s; deleting shared store)\n' %
                    e.message)
            deletesharedstore()
            return callself()
        finally:
            if remote:
                remote.close()

    # Now we should have the wanted revision in the store. Perform
    # working directory manipulation.

    # Purge if requested. We purge before update because this way we're
    # guaranteed to not have conflicts on `hg update`.
    if purge and not created:
        ui.write('(purging working directory)\n')
        purgeext = extensions.find('purge')

        if purgeext.purge(
                ui,
                repo,
                all=True,
                abort_on_err=True,
                # The function expects all arguments to be
                # defined.
                **{
                    'print': None,
                    'print0': None,
                    'dirs': None,
                    'files': None
                }):
            raise error.Abort('error purging')

    # Update the working directory.
    if commands.update(ui, repo, rev=checkoutrevision, clean=True):
        raise error.Abort('error updating')

    ui.write('updated to %s\n' % checkoutrevision)
    return None
コード例 #46
0
def has_absimport():
    import __future__
    from mercurial import util
    return util.safehasattr(__future__, "absolute_import")
コード例 #47
0
    def annotate(self, rev, master=None, showpath=False, showlines=False):
        """incrementally update the cache so it includes revisions in the main
        branch till 'master'. and run annotate on 'rev', which may or may not be
        included in the main branch.

        if master is None, do not update linelog.

        the first value returned is the annotate result, it is [(node, linenum)]
        by default. [(node, linenum, path)] if showpath is True.

        if showlines is True, a second value will be returned, it is a list of
        corresponding line contents.
        """

        # the fast path test requires commit hash, convert rev number to hash,
        # so it may hit the fast path. note: in the "fctx" mode, the "annotate"
        # command could give us a revision number even if the user passes a
        # commit hash.
        if isinstance(rev, int):
            rev = node.hex(self.repo.changelog.node(rev))

        # fast path: if rev is in the main branch already
        directly, revfctx = self.canannotatedirectly(rev)
        if directly:
            if self.ui.debugflag:
                self.ui.debug(
                    'fastannotate: %s: using fast path '
                    '(resolved fctx: %s)\n' %
                    (self.path,
                     stringutil.pprint(util.safehasattr(revfctx, 'node'))))
            return self.annotatedirectly(revfctx, showpath, showlines)

        # resolve master
        masterfctx = None
        if master:
            try:
                masterfctx = self._resolvefctx(master,
                                               resolverev=True,
                                               adjustctx=True)
            except LookupError:  # master does not have the file
                pass
            else:
                if masterfctx in self.revmap:  # no need to update linelog
                    masterfctx = None

        #                  ... - @ <- rev (can be an arbitrary changeset,
        #                 /                not necessarily a descendant
        #      master -> o                 of master)
        #                |
        #     a merge -> o         'o': new changesets in the main branch
        #                |\        '#': revisions in the main branch that
        #                o *            exist in linelog / revmap
        #                | .       '*': changesets in side branches, or
        # last master -> # .            descendants of master
        #                | .
        #                # *       joint: '#', and is a parent of a '*'
        #                |/
        #     a joint -> # ^^^^ --- side branches
        #                |
        #                ^ --- main branch (in linelog)

        # these DFSes are similar to the traditional annotate algorithm.
        # we cannot really reuse the code for perf reason.

        # 1st DFS calculates merges, joint points, and needed.
        # "needed" is a simple reference counting dict to free items in
        # "hist", reducing its memory usage otherwise could be huge.
        initvisit = [revfctx]
        if masterfctx:
            if masterfctx.rev() is None:
                raise error.Abort(_('cannot update linelog to wdir()'),
                                  hint=_('set fastannotate.mainbranch'))
            initvisit.append(masterfctx)
        visit = initvisit[:]
        pcache = {}
        needed = {revfctx: 1}
        hist = {}  # {fctx: ([(llrev or fctx, linenum)], text)}
        while visit:
            f = visit.pop()
            if f in pcache or f in hist:
                continue
            if f in self.revmap:  # in the old main branch, it's a joint
                llrev = self.revmap.hsh2rev(f.node())
                self.linelog.annotate(llrev)
                result = self.linelog.annotateresult
                hist[f] = (result, f.data())
                continue
            pl = self._parentfunc(f)
            pcache[f] = pl
            for p in pl:
                needed[p] = needed.get(p, 0) + 1
                if p not in pcache:
                    visit.append(p)

        # 2nd (simple) DFS calculates new changesets in the main branch
        # ('o' nodes in # the above graph), so we know when to update linelog.
        newmainbranch = set()
        f = masterfctx
        while f and f not in self.revmap:
            newmainbranch.add(f)
            pl = pcache[f]
            if pl:
                f = pl[0]
            else:
                f = None
                break

        # f, if present, is the position where the last build stopped at, and
        # should be the "master" last time. check to see if we can continue
        # building the linelog incrementally. (we cannot if diverged)
        if masterfctx is not None:
            self._checklastmasterhead(f)

        if self.ui.debugflag:
            if newmainbranch:
                self.ui.debug('fastannotate: %s: %d new changesets in the main'
                              ' branch\n' % (self.path, len(newmainbranch)))
            elif not hist:  # no joints, no updates
                self.ui.debug('fastannotate: %s: linelog cannot help in '
                              'annotating this revision\n' % self.path)

        # prepare annotateresult so we can update linelog incrementally
        self.linelog.annotate(self.linelog.maxrev)

        # 3rd DFS does the actual annotate
        visit = initvisit[:]
        progress = 0
        while visit:
            f = visit[-1]
            if f in hist:
                visit.pop()
                continue

            ready = True
            pl = pcache[f]
            for p in pl:
                if p not in hist:
                    ready = False
                    visit.append(p)
            if not ready:
                continue

            visit.pop()
            blocks = None  # mdiff blocks, used for appending linelog
            ismainbranch = (f in newmainbranch)
            # curr is the same as the traditional annotate algorithm,
            # if we only care about linear history (do not follow merge),
            # then curr is not actually used.
            assert f not in hist
            curr = _decorate(f)
            for i, p in enumerate(pl):
                bs = list(self._diffblocks(hist[p][1], curr[1]))
                if i == 0 and ismainbranch:
                    blocks = bs
                curr = _pair(hist[p], curr, bs)
                if needed[p] == 1:
                    del hist[p]
                    del needed[p]
                else:
                    needed[p] -= 1

            hist[f] = curr
            del pcache[f]

            if ismainbranch:  # need to write to linelog
                if not self.ui.quiet:
                    progress += 1
                    self.ui.progress(_('building cache'),
                                     progress,
                                     total=len(newmainbranch))
                bannotated = None
                if len(pl) == 2 and self.opts.followmerge:  # merge
                    bannotated = curr[0]
                if blocks is None:  # no parents, add an empty one
                    blocks = list(self._diffblocks('', curr[1]))
                self._appendrev(f, blocks, bannotated)
            elif showpath:  # not append linelog, but we need to record path
                self._node2path[f.node()] = f.path()

        if progress:  # clean progress bar
            self.ui.write()

        result = [
            ((self.revmap.rev2hsh(fr) if isinstance(fr, int) else fr.node()),
             l) for fr, l in hist[revfctx][0]
        ]  # [(node, linenumber)]
        return self._refineannotateresult(result, revfctx, showpath, showlines)
コード例 #48
0
ファイル: __init__.py プロジェクト: Kryndex/eden-hg
def merge_update(orig,
                 repo,
                 node,
                 branchmerge,
                 force,
                 ancestor=None,
                 mergeancestor=False,
                 labels=None,
                 matcher=None,
                 mergeforce=False,
                 updatecheck=None):
    assert node is not None

    if not util.safehasattr(repo.dirstate, 'eden_client'):
        # This is not an eden repository
        useeden = False
    if matcher is not None and not matcher.always():
        # We don't support doing a partial update through eden yet.
        useeden = False
    elif branchmerge or ancestor is not None:
        useeden = False
    else:
        # TODO: We probably also need to set useeden = False if there are
        # subrepositories.  (Personally I might vote for just not supporting
        # subrepos in eden.)
        useeden = True

    if not useeden:
        repo.ui.debug("falling back to non-eden update code path")
        return orig(repo,
                    node,
                    branchmerge,
                    force,
                    ancestor=ancestor,
                    mergeancestor=mergeancestor,
                    labels=labels,
                    matcher=matcher,
                    mergeforce=mergeforce)

    with repo.wlock():
        wctx = repo[None]
        parents = wctx.parents()

        p1ctx = parents[0]
        destctx = repo[node]
        deststr = str(destctx)

        if not force:
            # Make sure there isn't an outstanding merge or unresolved files.
            if len(parents) > 1:
                raise error.Abort(_("outstanding uncommitted merge"))
            ms = mergemod.mergestate.read(repo)
            if list(ms.unresolved()):
                raise error.Abort(_("outstanding merge conflicts"))

            # The vanilla merge code disallows updating between two unrelated
            # branches if the working directory is dirty.  I don't really see a
            # good reason to disallow this; it should be treated the same as if
            # we committed the changes, checked out the other branch then tried
            # to graft the changes here.

        # Invoke the preupdate hook
        repo.hook('preupdate', throw=True, parent1=deststr, parent2='')
        # note that we're in the middle of an update
        repo.vfs.write('updatestate', destctx.hex())

        # Ask eden to perform the checkout
        if force or p1ctx != destctx:
            conflicts = repo.dirstate.eden_client.checkout(destctx.node(),
                                                           force=force)
        else:
            conflicts = None

        # Handle any conflicts
        # The stats returned are numbers of files affected:
        #   (updated, merged, removed, unresolved)
        # The updated and removed file counts will always be 0 in our case.
        if conflicts and not force:
            stats = _handleupdateconflicts(repo, wctx, p1ctx, destctx, labels,
                                           conflicts)
        else:
            stats = 0, 0, 0, 0

        # Clear the update state
        util.unlink(repo.vfs.join('updatestate'))

    # Invoke the update hook
    repo.hook('update', parent1=deststr, parent2='', error=stats[3])

    return stats
コード例 #49
0
ファイル: server.py プロジェクト: CSCI-362-02-2015/RedTeam
                                            server_side=True,
                                            certfile=ssl_cert,
                                            ssl_version=ssl.PROTOCOL_TLSv1)

    def setup(self):
        self.connection = self.request
        self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
        self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)


try:
    from threading import activeCount
    activeCount()  # silence pyflakes
    _mixin = SocketServer.ThreadingMixIn
except ImportError:
    if util.safehasattr(os, "fork"):
        _mixin = SocketServer.ForkingMixIn
    else:

        class _mixin(object):
            pass


def openlog(opt, default):
    if opt and opt != '-':
        return open(opt, 'a')
    return default


class MercurialHTTPServer(object, _mixin, BaseHTTPServer.HTTPServer):
コード例 #50
0
def wrapdirstate(orig, repo):
    """Make journal storage available to the dirstate object"""
    dirstate = orig(repo)
    if util.safehasattr(repo, 'journal'):
        _setupdirstate(repo, dirstate)
    return dirstate
コード例 #51
0
    config,
    cmdutil,
    error,
    exchange,
    extensions,
    hg,
    registrar,
    util,
)


cmdtable = {}

# Mercurial 4.3 introduced registrar.command as a replacement for
# cmdutil.command.
if util.safehasattr(registrar, 'command'):
    command = registrar.command(cmdtable)
else:
    command = cmdutil.command(cmdtable)


class unifyconfig(object):
    def __init__(self, path):
        self._c = config.config()
        with open(path, 'rb') as fh:
            self._c.read(path, fh)

        if 'GLOBAL' not in self._c:
            raise error.Abort('config file missing GLOBAL section')

        self.stagepath = self._c.get('GLOBAL', 'stagepath')
コード例 #52
0
def wrappedpullobsolete(orig, pullop):
    res = orig(pullop)

    repo = pullop.repo
    remote = pullop.remote

    if not isfirefoxrepo(repo):
        return res

    if remote.capable('firefoxtrees'):
        bmstore = bookmarks.bmstore(repo)
        # remote.local() returns a localrepository or None. If local,
        # just pass it into the wire protocol command/function to simulate
        # the remote command call.
        if remote.local():
            lines = firefoxtrees(remote.local(), None).splitlines()
        else:
            lines = remote._call('firefoxtrees').splitlines()
        oldtags = {}
        for tag, node, tree, uri in get_firefoxtrees(repo):
            oldtags[tag] = node
        newtags = {}
        changes = []
        for line in lines:
            tag, node = line.split()
            newtags[tag] = node

            node = bin(node)

            # A local bookmark of the incoming tag name is already set.
            # Wipe it out - the server takes precedence.
            if tag in bmstore:
                oldtags[tag] = bmstore[tag]
                repo.ui.status(
                    '(removing bookmark on %s matching firefoxtree %s)\n' %
                    (short(bmstore[tag]), tag))

                # TRACKING hg43 applychanges() introduced in Mercurial 4.3.
                if util.safehasattr(bmstore, 'applychanges'):
                    changes.append((tag, None))
                else:
                    del bmstore[tag]
                    bmstore.recordchange(pullop.trmanager.transaction())

                if bmstore.active == tag:
                    repo.ui.status('(deactivating bookmark %s)\n' % tag)
                    bookmarks.deactivate(repo)

            if oldtags.get(tag, None) == node:
                continue

            repo.firefoxtrees[tag] = node

            between = None
            if tag in oldtags:
                between = len(repo.revs('%n::%n', oldtags[tag], node)) - 1

                if not between:
                    continue

            msg = _('updated firefox tree tag %s') % tag
            if between:
                msg += _(' (+%d commits)') % between
            msg += '\n'
            repo.ui.status(msg)

        # TRACKING hg43
        if changes and util.safehasattr(bmstore, 'applychanges'):
            bmstore.applychanges(repo, pullop.gettransaction(), changes)

        writefirefoxtrees(repo)

    tree = resolve_uri_to_tree(remote.url())
    if tree:
        tree = tree.encode('utf-8')
        updateremoterefs(repo, remote, tree)

    return res
コード例 #53
0
def reposetup(ui, repo):
    if not util.safehasattr(repo, 'mq'):
        return

    ui.setconfig('hooks', 'prechangegroup.mqpreventpull', prechangegroup_hook,
                 'mqext')
コード例 #54
0
try:
    from mercurial import configitems
except ImportError:
    configitems = None


def _vcthome():  # Returns the directory where the vct clone is located
    here = os.path.dirname(os.path.abspath(__file__))
    ext_dir = os.path.normpath(os.path.join(here, '..'))
    vct_dir = os.path.normpath(os.path.join(ext_dir, '..'))
    vcthome_dir = os.path.normpath(os.path.join(vct_dir, '..'))

    return vcthome_dir


if registrar and util.safehasattr(registrar, 'configitem'):
    configtable = {}
    configitem = registrar.configitem(configtable)

    # TODO some of these are registered elsewhere. This can produce a warning
    # for duplicate registration. We should ideally call a shared function
    # that only registers once.
    configitem('configwizard', 'steps',
               default=[])
    configitem('bugzilla', 'username',
               default=None)
    configitem('bugzilla', 'apikey',
               default=None)
    configitem('mozilla', 'ircnick',
               default=None)
    configitem('mozilla', 'mozbuild_state_path',
コード例 #55
0
ファイル: __init__.py プロジェクト: termim/hg-git
def safebranchrevs(orig, lrepo, repo, branches, revs):
    revs, co = orig(lrepo, repo, branches, revs)
    if hgutil.safehasattr(lrepo, 'changelog') and co not in lrepo.changelog:
        co = None
    return revs, co
コード例 #56
0
ファイル: git.py プロジェクト: CSCI-362-02-2015/RedTeam
class convert_git(converter_source):
    # Windows does not support GIT_DIR= construct while other systems
    # cannot remove environment variable. Just assume none have
    # both issues.
    if util.safehasattr(os, 'unsetenv'):

        def gitopen(self, s, err=None):
            prevgitdir = os.environ.get('GIT_DIR')
            os.environ['GIT_DIR'] = self.path
            try:
                if err == subprocess.PIPE:
                    (stdin, stdout, stderr) = util.popen3(s)
                    return stdout
                elif err == subprocess.STDOUT:
                    return self.popen_with_stderr(s)
                else:
                    return util.popen(s, 'rb')
            finally:
                if prevgitdir is None:
                    del os.environ['GIT_DIR']
                else:
                    os.environ['GIT_DIR'] = prevgitdir

        def gitpipe(self, s):
            prevgitdir = os.environ.get('GIT_DIR')
            os.environ['GIT_DIR'] = self.path
            try:
                return util.popen3(s)
            finally:
                if prevgitdir is None:
                    del os.environ['GIT_DIR']
                else:
                    os.environ['GIT_DIR'] = prevgitdir

    else:

        def gitopen(self, s, err=None):
            if err == subprocess.PIPE:
                (sin, so, se) = util.popen3('GIT_DIR=%s %s' % (self.path, s))
                return so
            elif err == subprocess.STDOUT:
                return self.popen_with_stderr(s)
            else:
                return util.popen('GIT_DIR=%s %s' % (self.path, s), 'rb')

        def gitpipe(self, s):
            return util.popen3('GIT_DIR=%s %s' % (self.path, s))

    def popen_with_stderr(self, s):
        p = subprocess.Popen(s,
                             shell=True,
                             bufsize=-1,
                             close_fds=util.closefds,
                             stdin=subprocess.PIPE,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT,
                             universal_newlines=False,
                             env=None)
        return p.stdout

    def gitread(self, s):
        fh = self.gitopen(s)
        data = fh.read()
        return data, fh.close()

    def __init__(self, ui, path, revs=None):
        super(convert_git, self).__init__(ui, path, revs=revs)

        if os.path.isdir(path + "/.git"):
            path += "/.git"
        if not os.path.exists(path + "/objects"):
            raise NoRepo(_("%s does not look like a Git repository") % path)

        # The default value (50) is based on the default for 'git diff'.
        similarity = ui.configint('convert', 'git.similarity', default=50)
        if similarity < 0 or similarity > 100:
            raise error.Abort(_('similarity must be between 0 and 100'))
        if similarity > 0:
            self.simopt = '-C%d%%' % similarity
            findcopiesharder = ui.configbool('convert', 'git.findcopiesharder',
                                             False)
            if findcopiesharder:
                self.simopt += ' --find-copies-harder'
        else:
            self.simopt = ''

        checktool('git', 'git')

        self.path = path
        self.submodules = []

        self.catfilepipe = self.gitpipe('git cat-file --batch')

    def after(self):
        for f in self.catfilepipe:
            f.close()

    def getheads(self):
        if not self.revs:
            heads, ret = self.gitread('git rev-parse --branches --remotes')
            heads = heads.splitlines()
            if ret:
                raise error.Abort(_('cannot retrieve git heads'))
        else:
            heads = []
            for rev in self.revs:
                rawhead, ret = self.gitread("git rev-parse --verify %s" % rev)
                heads.append(rawhead[:-1])
                if ret:
                    raise error.Abort(_('cannot retrieve git head "%s"') % rev)
        return heads

    def catfile(self, rev, type):
        if rev == hex(nullid):
            raise IOError
        self.catfilepipe[0].write(rev + '\n')
        self.catfilepipe[0].flush()
        info = self.catfilepipe[1].readline().split()
        if info[1] != type:
            raise error.Abort(_('cannot read %r object at %s') % (type, rev))
        size = int(info[2])
        data = self.catfilepipe[1].read(size)
        if len(data) < size:
            raise error.Abort(
                _('cannot read %r object at %s: unexpected size') %
                (type, rev))
        # read the trailing newline
        self.catfilepipe[1].read(1)
        return data

    def getfile(self, name, rev):
        if rev == hex(nullid):
            return None, None
        if name == '.hgsub':
            data = '\n'.join([m.hgsub() for m in self.submoditer()])
            mode = ''
        elif name == '.hgsubstate':
            data = '\n'.join([m.hgsubstate() for m in self.submoditer()])
            mode = ''
        else:
            data = self.catfile(rev, "blob")
            mode = self.modecache[(name, rev)]
        return data, mode

    def submoditer(self):
        null = hex(nullid)
        for m in sorted(self.submodules, key=lambda p: p.path):
            if m.node != null:
                yield m

    def parsegitmodules(self, content):
        """Parse the formatted .gitmodules file, example file format:
        [submodule "sub"]\n
        \tpath = sub\n
        \turl = git://giturl\n
        """
        self.submodules = []
        c = config.config()
        # Each item in .gitmodules starts with whitespace that cant be parsed
        c.parse('.gitmodules',
                '\n'.join(line.strip() for line in content.split('\n')))
        for sec in c.sections():
            s = c[sec]
            if 'url' in s and 'path' in s:
                self.submodules.append(submodule(s['path'], '', s['url']))

    def retrievegitmodules(self, version):
        modules, ret = self.gitread("git show %s:%s" %
                                    (version, '.gitmodules'))
        if ret:
            # This can happen if a file is in the repo that has permissions
            # 160000, but there is no .gitmodules file.
            self.ui.warn(
                _("warning: cannot read submodules config file in "
                  "%s\n") % version)
            return

        try:
            self.parsegitmodules(modules)
        except error.ParseError:
            self.ui.warn(
                _("warning: unable to parse .gitmodules in %s\n") % version)
            return

        for m in self.submodules:
            node, ret = self.gitread("git rev-parse %s:%s" % (version, m.path))
            if ret:
                continue
            m.node = node.strip()

    def getchanges(self, version, full):
        if full:
            raise error.Abort(_("convert from git does not support --full"))
        self.modecache = {}
        fh = self.gitopen("git diff-tree -z --root -m -r %s %s" %
                          (self.simopt, version))
        changes = []
        copies = {}
        seen = set()
        entry = None
        subexists = [False]
        subdeleted = [False]
        difftree = fh.read().split('\x00')
        lcount = len(difftree)
        i = 0

        skipsubmodules = self.ui.configbool('convert', 'git.skipsubmodules',
                                            False)

        def add(entry, f, isdest):
            seen.add(f)
            h = entry[3]
            p = (entry[1] == "100755")
            s = (entry[1] == "120000")
            renamesource = (not isdest and entry[4][0] == 'R')

            if f == '.gitmodules':
                if skipsubmodules:
                    return

                subexists[0] = True
                if entry[4] == 'D' or renamesource:
                    subdeleted[0] = True
                    changes.append(('.hgsub', hex(nullid)))
                else:
                    changes.append(('.hgsub', ''))
            elif entry[1] == '160000' or entry[0] == ':160000':
                if not skipsubmodules:
                    subexists[0] = True
            else:
                if renamesource:
                    h = hex(nullid)
                self.modecache[(f, h)] = (p and "x") or (s and "l") or ""
                changes.append((f, h))

        while i < lcount:
            l = difftree[i]
            i += 1
            if not entry:
                if not l.startswith(':'):
                    continue
                entry = l.split()
                continue
            f = l
            if entry[4][0] == 'C':
                copysrc = f
                copydest = difftree[i]
                i += 1
                f = copydest
                copies[copydest] = copysrc
            if f not in seen:
                add(entry, f, False)
            # A file can be copied multiple times, or modified and copied
            # simultaneously. So f can be repeated even if fdest isn't.
            if entry[4][0] == 'R':
                # rename: next line is the destination
                fdest = difftree[i]
                i += 1
                if fdest not in seen:
                    add(entry, fdest, True)
                    # .gitmodules isn't imported at all, so it being copied to
                    # and fro doesn't really make sense
                    if f != '.gitmodules' and fdest != '.gitmodules':
                        copies[fdest] = f
            entry = None
        if fh.close():
            raise error.Abort(_('cannot read changes in %s') % version)

        if subexists[0]:
            if subdeleted[0]:
                changes.append(('.hgsubstate', hex(nullid)))
            else:
                self.retrievegitmodules(version)
                changes.append(('.hgsubstate', ''))
        return (changes, copies, set())

    def getcommit(self, version):
        c = self.catfile(version, "commit")  # read the commit hash
        end = c.find("\n\n")
        message = c[end + 2:]
        message = self.recode(message)
        l = c[:end].splitlines()
        parents = []
        author = committer = None
        for e in l[1:]:
            n, v = e.split(" ", 1)
            if n == "author":
                p = v.split()
                tm, tz = p[-2:]
                author = " ".join(p[:-2])
                if author[0] == "<": author = author[1:-1]
                author = self.recode(author)
            if n == "committer":
                p = v.split()
                tm, tz = p[-2:]
                committer = " ".join(p[:-2])
                if committer[0] == "<": committer = committer[1:-1]
                committer = self.recode(committer)
            if n == "parent":
                parents.append(v)

        if committer and committer != author:
            message += "\ncommitter: %s\n" % committer
        tzs, tzh, tzm = tz[-5:-4] + "1", tz[-4:-2], tz[-2:]
        tz = -int(tzs) * (int(tzh) * 3600 + int(tzm))
        date = tm + " " + str(tz)

        c = commit(parents=parents,
                   date=date,
                   author=author,
                   desc=message,
                   rev=version)
        return c

    def numcommits(self):
        return len([None for _ in self.gitopen('git rev-list --all')])

    def gettags(self):
        tags = {}
        alltags = {}
        fh = self.gitopen('git ls-remote --tags "%s"' % self.path,
                          err=subprocess.STDOUT)
        prefix = 'refs/tags/'

        # Build complete list of tags, both annotated and bare ones
        for line in fh:
            line = line.strip()
            if line.startswith("error:") or line.startswith("fatal:"):
                raise error.Abort(_('cannot read tags from %s') % self.path)
            node, tag = line.split(None, 1)
            if not tag.startswith(prefix):
                continue
            alltags[tag[len(prefix):]] = node
        if fh.close():
            raise error.Abort(_('cannot read tags from %s') % self.path)

        # Filter out tag objects for annotated tag refs
        for tag in alltags:
            if tag.endswith('^{}'):
                tags[tag[:-3]] = alltags[tag]
            else:
                if tag + '^{}' in alltags:
                    continue
                else:
                    tags[tag] = alltags[tag]

        return tags

    def getchangedfiles(self, version, i):
        changes = []
        if i is None:
            fh = self.gitopen("git diff-tree --root -m -r %s" % version)
            for l in fh:
                if "\t" not in l:
                    continue
                m, f = l[:-1].split("\t")
                changes.append(f)
        else:
            fh = self.gitopen('git diff-tree --name-only --root -r %s '
                              '"%s^%s" --' % (version, version, i + 1))
            changes = [f.rstrip('\n') for f in fh]
        if fh.close():
            raise error.Abort(_('cannot read changes in %s') % version)

        return changes

    def getbookmarks(self):
        bookmarks = {}

        # Handle local and remote branches
        remoteprefix = self.ui.config('convert', 'git.remoteprefix', 'remote')
        reftypes = [
            # (git prefix, hg prefix)
            ('refs/remotes/origin/', remoteprefix + '/'),
            ('refs/heads/', '')
        ]

        exclude = set([
            'refs/remotes/origin/HEAD',
        ])

        try:
            fh = self.gitopen('git show-ref', err=subprocess.PIPE)
            for line in fh:
                line = line.strip()
                rev, name = line.split(None, 1)
                # Process each type of branch
                for gitprefix, hgprefix in reftypes:
                    if not name.startswith(gitprefix) or name in exclude:
                        continue
                    name = '%s%s' % (hgprefix, name[len(gitprefix):])
                    bookmarks[name] = rev
        except Exception:
            pass

        return bookmarks

    def checkrevformat(self, revstr, mapname='splicemap'):
        """ git revision string is a 40 byte hex """
        self.checkhexformat(revstr, mapname)
コード例 #57
0
    def generate(orig, self, commonrevs, clnodes, fastpathlinkrev, source):
        '''yield a sequence of changegroup chunks (strings)'''
        # Note: other than delegating to orig, the only deviation in
        # logic from normal hg's generate is marked with BEGIN/END
        # NARROW HACK.
        if not util.safehasattr(self, 'full_nodes'):
            # not sending a narrow bundle
            for x in orig(self, commonrevs, clnodes, fastpathlinkrev, source):
                yield x
            return

        repo = self._repo
        cl = repo.changelog
        mfl = repo.manifestlog
        mfrevlog = mfl._revlog

        clrevorder = {}
        mfs = {}  # needed manifests
        fnodes = {}  # needed file nodes
        changedfiles = set()

        # Callback for the changelog, used to collect changed files and manifest
        # nodes.
        # Returns the linkrev node (identity in the changelog case).
        def lookupcl(x):
            c = cl.read(x)
            clrevorder[x] = len(clrevorder)
            # BEGIN NARROW HACK
            #
            # Only update mfs if x is going to be sent. Otherwise we
            # end up with bogus linkrevs specified for manifests and
            # we skip some manifest nodes that we should otherwise
            # have sent.
            if x in self.full_nodes or cl.rev(x) in self.precomputed_ellipsis:
                n = c[0]
                # record the first changeset introducing this manifest version
                mfs.setdefault(n, x)
                # Set this narrow-specific dict so we have the lowest manifest
                # revnum to look up for this cl revnum. (Part of mapping
                # changelog ellipsis parents to manifest ellipsis parents)
                self.next_clrev_to_localrev.setdefault(cl.rev(x),
                                                       mfrevlog.rev(n))
            # We can't trust the changed files list in the changeset if the
            # client requested a shallow clone.
            if self.is_shallow:
                changedfiles.update(mfl[c[0]].read().keys())
            else:
                changedfiles.update(c[3])
            # END NARROW HACK
            # Record a complete list of potentially-changed files in
            # this manifest.
            return x

        self._verbosenote(_('uncompressed size of bundle content:\n'))
        size = 0
        for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
            size += len(chunk)
            yield chunk
        self._verbosenote(_('%8.i (changelog)\n') % size)

        # We need to make sure that the linkrev in the changegroup refers to
        # the first changeset that introduced the manifest or file revision.
        # The fastpath is usually safer than the slowpath, because the filelogs
        # are walked in revlog order.
        #
        # When taking the slowpath with reorder=None and the manifest revlog
        # uses generaldelta, the manifest may be walked in the "wrong" order.
        # Without 'clrevorder', we would get an incorrect linkrev (see fix in
        # cc0ff93d0c0c).
        #
        # When taking the fastpath, we are only vulnerable to reordering
        # of the changelog itself. The changelog never uses generaldelta, so
        # it is only reordered when reorder=True. To handle this case, we
        # simply take the slowpath, which already has the 'clrevorder' logic.
        # This was also fixed in cc0ff93d0c0c.
        fastpathlinkrev = fastpathlinkrev and not self._reorder
        # Treemanifests don't work correctly with fastpathlinkrev
        # either, because we don't discover which directory nodes to
        # send along with files. This could probably be fixed.
        fastpathlinkrev = fastpathlinkrev and ('treemanifest'
                                               not in repo.requirements)
        # Shallow clones also don't work correctly with fastpathlinkrev
        # because file nodes may need to be sent for a manifest even if they
        # weren't introduced by that manifest.
        fastpathlinkrev = fastpathlinkrev and not self.is_shallow

        for chunk in self.generatemanifests(commonrevs, clrevorder,
                                            fastpathlinkrev, mfs, fnodes,
                                            source):
            yield chunk
        # BEGIN NARROW HACK
        mfdicts = None
        if self.is_shallow:
            mfdicts = [(self._repo.manifestlog[n].read(), lr)
                       for (n, lr) in mfs.iteritems()]
        # END NARROW HACK
        mfs.clear()
        clrevs = set(cl.rev(x) for x in clnodes)

        if not fastpathlinkrev:

            def linknodes(unused, fname):
                return fnodes.get(fname, {})
        else:
            cln = cl.node

            def linknodes(filerevlog, fname):
                llr = filerevlog.linkrev
                fln = filerevlog.node
                revs = ((r, llr(r)) for r in filerevlog)
                return dict(
                    (fln(r), cln(lr)) for r, lr in revs if lr in clrevs)

        # BEGIN NARROW HACK
        #
        # We need to pass the mfdicts variable down into
        # generatefiles(), but more than one command might have
        # wrapped generatefiles so we can't modify the function
        # signature. Instead, we pass the data to ourselves using an
        # instance attribute. I'm sorry.
        self._mfdicts = mfdicts
        # END NARROW HACK
        for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
                                        source):
            yield chunk

        yield self.close()

        if clnodes:
            repo.hook('outgoing', node=node.hex(clnodes[0]), source=source)
コード例 #58
0
    def revchunk(orig, self, revlog, rev, prev, linknode):
        if not util.safehasattr(self, 'full_nodes'):
            # not sending a narrow changegroup
            for x in orig(self, revlog, rev, prev, linknode):
                yield x
            return
        # build up some mapping information that's useful later. See
        # the local() nested function below.
        if not self.changelog_done:
            self.clnode_to_rev[linknode] = rev
            linkrev = rev
            self.clrev_to_localrev[linkrev] = rev
        else:
            linkrev = self.clnode_to_rev[linknode]
            self.clrev_to_localrev[linkrev] = rev
        # This is a node to send in full, because the changeset it
        # corresponds to was a full changeset.
        if linknode in self.full_nodes:
            for x in orig(self, revlog, rev, prev, linknode):
                yield x
            return
        # At this point, a node can either be one we should skip or an
        # ellipsis. If it's not an ellipsis, bail immediately.
        if linkrev not in self.precomputed_ellipsis:
            return
        linkparents = self.precomputed_ellipsis[linkrev]

        def local(clrev):
            """Turn a changelog revnum into a local revnum.

            The ellipsis dag is stored as revnums on the changelog,
            but when we're producing ellipsis entries for
            non-changelog revlogs, we need to turn those numbers into
            something local. This does that for us, and during the
            changelog sending phase will also expand the stored
            mappings as needed.
            """
            if clrev == node.nullrev:
                return node.nullrev
            if not self.changelog_done:
                # If we're doing the changelog, it's possible that we
                # have a parent that is already on the client, and we
                # need to store some extra mapping information so that
                # our contained ellipsis nodes will be able to resolve
                # their parents.
                if clrev not in self.clrev_to_localrev:
                    clnode = revlog.node(clrev)
                    self.clnode_to_rev[clnode] = clrev
                return clrev
            # Walk the ellipsis-ized changelog breadth-first looking for a
            # change that has been linked from the current revlog.
            #
            # For a flat manifest revlog only a single step should be necessary
            # as all relevant changelog entries are relevant to the flat
            # manifest.
            #
            # For a filelog or tree manifest dirlog however not every changelog
            # entry will have been relevant, so we need to skip some changelog
            # nodes even after ellipsis-izing.
            walk = [clrev]
            while walk:
                p = walk[0]
                walk = walk[1:]
                if p in self.clrev_to_localrev:
                    return self.clrev_to_localrev[p]
                elif p in self.full_nodes:
                    walk.extend([
                        pp for pp in self._repo.changelog.parentrevs(p)
                        if pp != node.nullrev
                    ])
                elif p in self.precomputed_ellipsis:
                    walk.extend([
                        pp for pp in self.precomputed_ellipsis[p]
                        if pp != node.nullrev
                    ])
                else:
                    # In this case, we've got an ellipsis with parents
                    # outside the current bundle (likely an
                    # incremental pull). We "know" that we can use the
                    # value of this same revlog at whatever revision
                    # is pointed to by linknode. "Know" is in scare
                    # quotes because I haven't done enough examination
                    # of edge cases to convince myself this is really
                    # a fact - it works for all the (admittedly
                    # thorough) cases in our testsuite, but I would be
                    # somewhat unsurprised to find a case in the wild
                    # where this breaks down a bit. That said, I don't
                    # know if it would hurt anything.
                    for i in xrange(rev, 0, -1):
                        if revlog.linkrev(i) == clrev:
                            return i
                    # We failed to resolve a parent for this node, so
                    # we crash the changegroup construction.
                    raise error.Abort(
                        'unable to resolve parent while packing %r %r'
                        ' for changeset %r' % (revlog.indexfile, rev, clrev))
            return node.nullrev

        if not linkparents or (revlog.parentrevs(rev)
                               == (node.nullrev, node.nullrev)):
            p1, p2 = node.nullrev, node.nullrev
        elif len(linkparents) == 1:
            p1, = sorted(local(p) for p in linkparents)
            p2 = node.nullrev
        else:
            p1, p2 = sorted(local(p) for p in linkparents)
        n = revlog.node(rev)
        yield ellipsisdata(self, rev, revlog, p1, p2, revlog.revision(n),
                           linknode)
コード例 #59
0
 def wrappedcghandler(op, inpart):
     origcghandler(op, inpart)
     if util.safehasattr(op, '_widen_bundle'):
         handlechangegroup_widen(op, inpart)
コード例 #60
0
 def _sortgroup(orig, self, revlog, nodelist, lookup):
     if not util.safehasattr(self, 'full_nodes') or not self.clnode_to_rev:
         return orig(self, revlog, nodelist, lookup)
     key = lambda n: self.clnode_to_rev[lookup(n)]
     return [revlog.rev(n) for n in sorted(nodelist, key=key)]