示例#1
0
def checkfilename(f):
    '''Check that the filename f is an acceptable filename for a tracked file'''
    if '\r' in f or '\n' in f:
        raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
            else:
                ui.debug("copied %d files\n" % num)

            # we need to re-init the repo after manually copying the data
            # into it
            dest_repo = repository(ui, dest)
            src_repo.hook('outgoing',
                          source='clone',
                          node=node.hex(node.nullid))
        else:
            try:
                dest_repo = repository(ui, dest, create=True)
            except OSError, inst:
                if inst.errno == errno.EEXIST:
                    dir_cleanup.close()
                    raise util.Abort(
                        _("destination '%s' already exists") % dest)
                raise

            revs = None
            if rev:
                if 'lookup' not in src_repo.capabilities:
                    raise util.Abort(
                        _("src repository does not support "
                          "revision lookup and so doesn't "
                          "support clone by revision"))
                revs = [src_repo.lookup(r) for r in rev]
                checkout = revs[0]
            if dest_repo.local():
                dest_repo.clone(src_repo, heads=revs, stream=stream)
            elif src_repo.local():
                src_repo.push(dest_repo, revs=revs)
示例#3
0
                try:
                    ui.warn(_('exception raised - generating '
                             'profile anyway\n'))
                except:
                    pass
                raise
        finally:
            prof.close()
            stats = hotshot.stats.load("hg.prof")
            stats.strip_dirs()
            stats.sort_stats('time', 'calls')
            stats.print_stats(40)
    elif options['lsprof']:
        try:
            from mercurial import lsprof
        except ImportError:
            raise util.Abort(_(
                'lsprof not available - install from '
                'http://codespeak.net/svn/user/arigo/hack/misc/lsprof/'))
        p = lsprof.Profiler()
        p.enable(subcalls=True)
        try:
            return checkargs()
        finally:
            p.disable()
            stats = lsprof.Stats(p.getstats())
            stats.sort()
            stats.pprint(top=10, file=sys.stderr, climit=5)
    else:
        return checkargs()
示例#4
0
def addchangegroup(repo,
                   source,
                   srctype,
                   url,
                   emptyok=False,
                   targetphase=phases.draft):
    """Add the changegroup returned by source.read() to this repo.
    srctype is a string like 'push', 'pull', or 'unbundle'.  url is
    the URL of the repo where this changegroup is coming from.

    Return an integer summarizing the change to this repo:
    - nothing changed or no source: 0
    - more heads than before: 1+added heads (2..n)
    - fewer heads than before: -1-removed heads (-2..-n)
    - number of heads stays the same: 1
    """
    repo = repo.unfiltered()

    def csmap(x):
        repo.ui.debug("add changeset %s\n" % short(x))
        return len(cl)

    def revmap(x):
        return cl.rev(x)

    if not source:
        return 0

    changesets = files = revisions = 0
    efiles = set()

    tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
    # The transaction could have been created before and already carries source
    # information. In this case we use the top level data. We overwrite the
    # argument because we need to use the top level value (if they exist) in
    # this function.
    srctype = tr.hookargs.setdefault('source', srctype)
    url = tr.hookargs.setdefault('url', url)

    # write changelog data to temp files so concurrent readers will not see
    # inconsistent view
    cl = repo.changelog
    cl.delayupdate(tr)
    oldheads = cl.heads()
    try:
        repo.hook('prechangegroup', throw=True, **tr.hookargs)

        trp = weakref.proxy(tr)
        # pull off the changeset group
        repo.ui.status(_("adding changesets\n"))
        clstart = len(cl)

        class prog(object):
            step = _('changesets')
            count = 1
            ui = repo.ui
            total = None

            def __call__(repo):
                repo.ui.progress(repo.step,
                                 repo.count,
                                 unit=_('chunks'),
                                 total=repo.total)
                repo.count += 1

        pr = prog()
        source.callback = pr

        source.changelogheader()
        srccontent = cl.addgroup(source, csmap, trp)
        if not (srccontent or emptyok):
            raise util.Abort(_("received changelog group is empty"))
        clend = len(cl)
        changesets = clend - clstart
        for c in xrange(clstart, clend):
            efiles.update(repo[c].files())
        efiles = len(efiles)
        repo.ui.progress(_('changesets'), None)

        # pull off the manifest group
        repo.ui.status(_("adding manifests\n"))
        pr.step = _('manifests')
        pr.count = 1
        pr.total = changesets  # manifests <= changesets
        # no need to check for empty manifest group here:
        # if the result of the merge of 1 and 2 is the same in 3 and 4,
        # no new manifest will be created and the manifest group will
        # be empty during the pull
        source.manifestheader()
        repo.manifest.addgroup(source, revmap, trp)
        repo.ui.progress(_('manifests'), None)

        needfiles = {}
        if repo.ui.configbool('server', 'validate', default=False):
            # validate incoming csets have their manifests
            for cset in xrange(clstart, clend):
                mfest = repo.changelog.read(repo.changelog.node(cset))[0]
                mfest = repo.manifest.readdelta(mfest)
                # store file nodes we must see
                for f, n in mfest.iteritems():
                    needfiles.setdefault(f, set()).add(n)

        # process the files
        repo.ui.status(_("adding file changes\n"))
        pr.step = _('files')
        pr.count = 1
        pr.total = efiles
        source.callback = None

        newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
                                                needfiles)
        revisions += newrevs
        files += newfiles

        dh = 0
        if oldheads:
            heads = cl.heads()
            dh = len(heads) - len(oldheads)
            for h in heads:
                if h not in oldheads and repo[h].closesbranch():
                    dh -= 1
        htext = ""
        if dh:
            htext = _(" (%+d heads)") % dh

        repo.ui.status(
            _("added %d changesets"
              " with %d changes to %d files%s\n") %
            (changesets, revisions, files, htext))
        repo.invalidatevolatilesets()

        if changesets > 0:
            p = lambda: tr.writepending() and repo.root or ""
            if 'node' not in tr.hookargs:
                tr.hookargs['node'] = hex(cl.node(clstart))
                hookargs = dict(tr.hookargs)
            else:
                hookargs = dict(tr.hookargs)
                hookargs['node'] = hex(cl.node(clstart))
            repo.hook('pretxnchangegroup', throw=True, pending=p, **hookargs)

        added = [cl.node(r) for r in xrange(clstart, clend)]
        publishing = repo.ui.configbool('phases', 'publish', True)
        if srctype in ('push', 'serve'):
            # Old servers can not push the boundary themselves.
            # New servers won't push the boundary if changeset already
            # exists locally as secret
            #
            # We should not use added here but the list of all change in
            # the bundle
            if publishing:
                phases.advanceboundary(repo, tr, phases.public, srccontent)
            else:
                # Those changesets have been pushed from the outside, their
                # phases are going to be pushed alongside. Therefor
                # `targetphase` is ignored.
                phases.advanceboundary(repo, tr, phases.draft, srccontent)
                phases.retractboundary(repo, tr, phases.draft, added)
        elif srctype != 'strip':
            # publishing only alter behavior during push
            #
            # strip should not touch boundary at all
            phases.retractboundary(repo, tr, targetphase, added)

        if changesets > 0:
            if srctype != 'strip':
                # During strip, branchcache is invalid but coming call to
                # `destroyed` will repair it.
                # In other case we can safely update cache on disk.
                branchmap.updatecache(repo.filtered('served'))

            def runhooks():
                # These hooks run when the lock releases, not when the
                # transaction closes. So it's possible for the changelog
                # to have changed since we last saw it.
                if clstart >= len(repo):
                    return

                # forcefully update the on-disk branch cache
                repo.ui.debug("updating the branch cache\n")
                repo.hook("changegroup", **hookargs)

                for n in added:
                    args = hookargs.copy()
                    args['node'] = hex(n)
                    repo.hook("incoming", **args)

                newheads = [h for h in repo.heads() if h not in oldheads]
                repo.ui.log("incoming",
                            "%s incoming changes - new heads: %s\n",
                            len(added),
                            ', '.join([hex(c[:6]) for c in newheads]))

            tr.addpostclose('changegroup-runhooks-%020i' % clstart,
                            lambda tr: repo._afterlock(runhooks))

        tr.close()

    finally:
        tr.release()
    # never return 0 here:
    if dh < 0:
        return dh - 1
    else:
        return dh + 1
示例#5
0
 def replacer(m):
     num = int(m.group(1)) - 1
     nums.append(num)
     if num < len(givenargs):
         return givenargs[num]
     raise util.Abort(_('too few arguments for command alias'))
示例#6
0
def createservice(ui, repo, opts):
    mode = opts['cmdserver']
    try:
        return _servicemap[mode](ui, repo, opts)
    except KeyError:
        raise util.Abort(_('unknown mode %s') % mode)
 def send(sender, recipients, msg):
     try:
         return s.sendmail(sender, recipients, msg)
     except smtplib.SMTPRecipientsRefused, inst:
         recipients = [r[1] for r in inst.recipients.values()]
         raise util.Abort('\n' + '\n'.join(recipients))
示例#8
0
    rev = {}
    if '.hgsubstate' in ctx:
        try:
            for l in ctx['.hgsubstate'].data().splitlines():
                revision, path = l.split(" ", 1)
                rev[path] = revision
        except IOError, err:
            if err.errno != errno.ENOENT:
                raise

    state = {}
    for path, src in p[''].items():
        kind = 'hg'
        if src.startswith('['):
            if ']' not in src:
                raise util.Abort(_('missing ] in subrepo source'))
            kind, src = src.split(']', 1)
            kind = kind[1:]
        state[path] = (src.strip(), rev.get(path, ''), kind)

    return state


def writestate(repo, state):
    repo.wwrite('.hgsubstate',
                ''.join(['%s %s\n' % (state[s][1], s) for s in sorted(state)]),
                '')


def submerge(repo, wctx, mctx, actx):
    # working context, merging context, ancestor context
示例#9
0
 def read(f, sections=None, remap=None):
     if f in ctx:
         p.parse(f, ctx[f].data(), sections, remap, read)
     else:
         raise util.Abort(_("subrepo spec file %s not found") % f)
示例#10
0
文件: ui.py 项目: as/9front-work
 def getpass(self, prompt=None, default=None):
     if not self.interactive(): return default
     try:
         return getpass.getpass(prompt or _('password: '******'response expected'))
示例#11
0
def checkheads(repo,
               remote,
               outgoing,
               remoteheads,
               newbranch=False,
               inc=False):
    """Check that a push won't add any outgoing head

    raise Abort error and display ui message as needed.
    """
    if remoteheads == [nullid]:
        # remote is empty, nothing to check.
        return

    cl = repo.changelog
    if remote.capable('branchmap'):
        # Check for each named branch if we're creating new remote heads.
        # To be a remote head after push, node must be either:
        # - unknown locally
        # - a local outgoing head descended from update
        # - a remote head that's known locally and not
        #   ancestral to an outgoing head

        # 1. Create set of branches involved in the push.
        branches = set(repo[n].branch() for n in outgoing.missing)

        # 2. Check for new branches on the remote.
        if remote.local():
            remotemap = phases.visiblebranchmap(remote)
        else:
            remotemap = remote.branchmap()
        newbranches = branches - set(remotemap)
        if newbranches and not newbranch:  # new branch requires --new-branch
            branchnames = ', '.join(sorted(newbranches))
            raise util.Abort(_("push creates new remote branches: %s!") %
                             branchnames,
                             hint=_("use 'hg push --new-branch' to create"
                                    " new remote branches"))
        branches.difference_update(newbranches)

        # 3. Construct the initial oldmap and newmap dicts.
        # They contain information about the remote heads before and
        # after the push, respectively.
        # Heads not found locally are not included in either dict,
        # since they won't be affected by the push.
        # unsynced contains all branches with incoming changesets.
        oldmap = {}
        newmap = {}
        unsynced = set()
        for branch in branches:
            remotebrheads = remotemap[branch]
            prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
            oldmap[branch] = prunedbrheads
            newmap[branch] = list(prunedbrheads)
            if len(remotebrheads) > len(prunedbrheads):
                unsynced.add(branch)

        # 4. Update newmap with outgoing changes.
        # This will possibly add new heads and remove existing ones.
        ctxgen = (repo[n] for n in outgoing.missing)
        repo._updatebranchcache(newmap, ctxgen)

    else:
        # 1-4b. old servers: Check for new topological heads.
        # Construct {old,new}map with branch = None (topological branch).
        # (code based on _updatebranchcache)
        oldheads = set(h for h in remoteheads if h in cl.nodemap)
        newheads = oldheads.union(outgoing.missing)
        if len(newheads) > 1:
            for latest in reversed(outgoing.missing):
                if latest not in newheads:
                    continue
                minhrev = min(cl.rev(h) for h in newheads)
                reachable = cl.reachable(latest, cl.node(minhrev))
                reachable.remove(latest)
                newheads.difference_update(reachable)
        branches = set([None])
        newmap = {None: newheads}
        oldmap = {None: oldheads}
        unsynced = inc and branches or set()

    # 5. Check for new heads.
    # If there are more heads after the push than before, a suitable
    # error message, depending on unsynced status, is displayed.
    error = None
    for branch in branches:
        newhs = set(newmap[branch])
        oldhs = set(oldmap[branch])
        if len(newhs) > len(oldhs):
            dhs = list(newhs - oldhs)
            if error is None:
                if branch not in ('default', None):
                    error = _("push creates new remote head %s "
                              "on branch '%s'!") % (short(dhs[0]), branch)
                else:
                    error = _("push creates new remote head %s!") % short(
                        dhs[0])
                if branch in unsynced:
                    hint = _("you should pull and merge or "
                             "use push -f to force")
                else:
                    hint = _("did you forget to merge? "
                             "use push -f to force")
            if branch is not None:
                repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
            for h in dhs:
                repo.ui.note(_("new remote head %s\n") % short(h))
    if error:
        raise util.Abort(error, hint=hint)

    # 6. Check for unsynced changes on involved branches.
    if unsynced:
        repo.ui.warn(_("note: unsynced remote changes!\n"))
示例#12
0
def _smtp(ui):
    '''build an smtp connection and return a function to send mail'''
    local_hostname = ui.config('smtp', 'local_hostname')
    tls = ui.config('smtp', 'tls', 'none')
    # backward compatible: when tls = true, we use starttls.
    starttls = tls == 'starttls' or util.parsebool(tls)
    smtps = tls == 'smtps'
    if (starttls or smtps) and not util.safehasattr(socket, 'ssl'):
        raise util.Abort(_("can't use TLS: Python SSL support not installed"))
    mailhost = ui.config('smtp', 'host')
    if not mailhost:
        raise util.Abort(_('smtp.host not configured - cannot send mail'))
    verifycert = ui.config('smtp', 'verifycert', 'strict')
    if verifycert not in ['strict', 'loose']:
        if util.parsebool(verifycert) is not False:
            raise util.Abort(
                _('invalid smtp.verifycert configuration: %s') % (verifycert))
        verifycert = False
    if (starttls or smtps) and verifycert:
        sslkwargs = sslutil.sslkwargs(ui, mailhost)
    else:
        # 'ui' is required by sslutil.wrapsocket() and set by sslkwargs()
        sslkwargs = {'ui': ui}
    if smtps:
        ui.note(_('(using smtps)\n'))
        s = SMTPS(sslkwargs, local_hostname=local_hostname)
    elif starttls:
        s = STARTTLS(sslkwargs, local_hostname=local_hostname)
    else:
        s = smtplib.SMTP(local_hostname=local_hostname)
    if smtps:
        defaultport = 465
    else:
        defaultport = 25
    mailport = util.getport(ui.config('smtp', 'port', defaultport))
    ui.note(_('sending mail: smtp host %s, port %s\n') % (mailhost, mailport))
    s.connect(host=mailhost, port=mailport)
    if starttls:
        ui.note(_('(using starttls)\n'))
        s.ehlo()
        s.starttls()
        s.ehlo()
    if (starttls or smtps) and verifycert:
        ui.note(_('(verifying remote certificate)\n'))
        sslutil.validator(ui, mailhost)(s.sock, verifycert == 'strict')
    username = ui.config('smtp', 'username')
    password = ui.config('smtp', 'password')
    if username and not password:
        password = ui.getpass()
    if username and password:
        ui.note(_('(authenticating to mail server as %s)\n') % (username))
        try:
            s.login(username, password)
        except smtplib.SMTPException as inst:
            raise util.Abort(inst)

    def send(sender, recipients, msg):
        try:
            return s.sendmail(sender, recipients, msg)
        except smtplib.SMTPRecipientsRefused as inst:
            recipients = [r[1] for r in inst.recipients.values()]
            raise util.Abort('\n' + '\n'.join(recipients))
        except smtplib.SMTPException as inst:
            raise util.Abort(inst)

    return send
示例#13
0
 def SMTPS(sslkwargs, keyfile=None, certfile=None, **kwargs):
     raise util.Abort(_('SMTPS requires Python 2.6 or later'))
示例#14
0
 def __call__(self, path, mode='r', *args, **kw):
     if mode not in ('r', 'rb'):
         raise util.Abort('this vfs is read only')
     return self.vfs(path, mode, *args, **kw)
示例#15
0
    pats = {}
    for f in files:
        try:
            pats[f] = []
            fp = open(f)
            pats[f], warnings = ignorepats(fp)
            for warning in warnings:
                warn("%s: %s\n" % (f, warning))
        except IOError, inst:
            if f != files[0]:
                warn(
                    _("skipping unreadable ignore file '%s': %s\n") %
                    (f, inst.strerror))

    allpats = []
    [allpats.extend(patlist) for patlist in pats.values()]
    if not allpats:
        return util.never

    try:
        ignorefunc = match.match(root, '', [], allpats)
    except util.Abort:
        # Re-raise an exception where the src is the right file
        for f, patlist in pats.iteritems():
            try:
                match.match(root, '', [], patlist)
            except util.Abort, inst:
                raise util.Abort('%s: %s' % (f, inst[0]))

    return ignorefunc
示例#16
0
 def get(self, state):
     status = self._svncommand(
         ['checkout', state[0], '--revision', state[1]])
     if not re.search('Checked out revision [\d]+.', status):
         raise util.Abort(status.splitlines()[-1])
     self._ui.status(status)
示例#17
0
def update(repo,
           node,
           branchmerge,
           force,
           partial,
           ancestor=None,
           mergeancestor=False,
           labels=None):
    """
    Perform a merge between the working directory and the given node

    node = the node to update to, or None if unspecified
    branchmerge = whether to merge between branches
    force = whether to force branch merging or file overwriting
    partial = a function to filter file lists (dirstate not updated)
    mergeancestor = whether it is merging with an ancestor. If true,
      we should accept the incoming changes for any prompts that occur.
      If false, merging with an ancestor (fast-forward) is only allowed
      between different named branches. This flag is used by rebase extension
      as a temporary fix and should be avoided in general.

    The table below shows all the behaviors of the update command
    given the -c and -C or no options, whether the working directory
    is dirty, whether a revision is specified, and the relationship of
    the parent rev to the target rev (linear, on the same named
    branch, or on another named branch).

    This logic is tested by test-update-branches.t.

    -c  -C  dirty  rev  |  linear   same  cross
     n   n    n     n   |    ok     (1)     x
     n   n    n     y   |    ok     ok     ok
     n   n    y     n   |   merge   (2)    (2)
     n   n    y     y   |   merge   (3)    (3)
     n   y    *     *   |    ---  discard  ---
     y   n    y     *   |    ---    (4)    ---
     y   n    n     *   |    ---    ok     ---
     y   y    *     *   |    ---    (5)    ---

    x = can't happen
    * = don't-care
    1 = abort: not a linear update (merge or update --check to force update)
    2 = abort: uncommitted changes (commit and merge, or update --clean to
                 discard changes)
    3 = abort: uncommitted changes (commit or update --clean to discard changes)
    4 = abort: uncommitted changes (checked in commands.py)
    5 = incompatible options (checked in commands.py)

    Return the same tuple as applyupdates().
    """

    onode = node
    wlock = repo.wlock()
    try:
        wc = repo[None]
        pl = wc.parents()
        p1 = pl[0]
        pas = [None]
        if ancestor is not None:
            pas = [repo[ancestor]]

        if node is None:
            # Here is where we should consider bookmarks, divergent bookmarks,
            # foreground changesets (successors), and tip of current branch;
            # but currently we are only checking the branch tips.
            try:
                node = repo.branchtip(wc.branch())
            except errormod.RepoLookupError:
                if wc.branch() == 'default':  # no default branch!
                    node = repo.lookup('tip')  # update to tip
                else:
                    raise util.Abort(_("branch %s not found") % wc.branch())

            if p1.obsolete() and not p1.children():
                # allow updating to successors
                successors = obsolete.successorssets(repo, p1.node())

                # behavior of certain cases is as follows,
                #
                # divergent changesets: update to highest rev, similar to what
                #     is currently done when there are more than one head
                #     (i.e. 'tip')
                #
                # replaced changesets: same as divergent except we know there
                # is no conflict
                #
                # pruned changeset: no update is done; though, we could
                #     consider updating to the first non-obsolete parent,
                #     similar to what is current done for 'hg prune'

                if successors:
                    # flatten the list here handles both divergent (len > 1)
                    # and the usual case (len = 1)
                    successors = [n for sub in successors for n in sub]

                    # get the max revision for the given successors set,
                    # i.e. the 'tip' of a set
                    node = repo.revs('max(%ln)', successors).first()
                    pas = [p1]

        overwrite = force and not branchmerge

        p2 = repo[node]
        if pas[0] is None:
            if repo.ui.config('merge', 'preferancestor', '*') == '*':
                cahs = repo.changelog.commonancestorsheads(
                    p1.node(), p2.node())
                pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
            else:
                pas = [p1.ancestor(p2, warn=branchmerge)]

        fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)

        ### check phase
        if not overwrite and len(pl) > 1:
            raise util.Abort(_("outstanding uncommitted merge"))
        if branchmerge:
            if pas == [p2]:
                raise util.Abort(
                    _("merging with a working directory ancestor"
                      " has no effect"))
            elif pas == [p1]:
                if not mergeancestor and p1.branch() == p2.branch():
                    raise util.Abort(_("nothing to merge"),
                                     hint=_("use 'hg update' "
                                            "or check 'hg heads'"))
            if not force and (wc.files() or wc.deleted()):
                raise util.Abort(_("uncommitted changes"),
                                 hint=_("use 'hg status' to list changes"))
            for s in sorted(wc.substate):
                wc.sub(s).bailifchanged()

        elif not overwrite:
            if p1 == p2:  # no-op update
                # call the hooks and exit early
                repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
                repo.hook('update', parent1=xp2, parent2='', error=0)
                return 0, 0, 0, 0

            if pas not in ([p1], [p2]):  # nonlinear
                dirty = wc.dirty(missing=True)
                if dirty or onode is None:
                    # Branching is a bit strange to ensure we do the minimal
                    # amount of call to obsolete.background.
                    foreground = obsolete.foreground(repo, [p1.node()])
                    # note: the <node> variable contains a random identifier
                    if repo[node].node() in foreground:
                        pas = [p1]  # allow updating to successors
                    elif dirty:
                        msg = _("uncommitted changes")
                        if onode is None:
                            hint = _("commit and merge, or update --clean to"
                                     " discard changes")
                        else:
                            hint = _("commit or update --clean to discard"
                                     " changes")
                        raise util.Abort(msg, hint=hint)
                    else:  # node is none
                        msg = _("not a linear update")
                        hint = _("merge or update --check to force update")
                        raise util.Abort(msg, hint=hint)
                else:
                    # Allow jumping branches if clean and specific rev given
                    pas = [p1]

        followcopies = False
        if overwrite:
            pas = [wc]
        elif pas == [p2]:  # backwards
            pas = [wc.p1()]
        elif not branchmerge and not wc.dirty(missing=True):
            pass
        elif pas[0] and repo.ui.configbool('merge', 'followcopies', True):
            followcopies = True

        ### calculate phase
        actionbyfile, diverge, renamedelete = calculateupdates(
            repo, wc, p2, pas, branchmerge, force, partial, mergeancestor,
            followcopies)
        # Convert to dictionary-of-lists format
        actions = dict((m, []) for m in 'a f g cd dc r dm dg m e k'.split())
        for f, (m, args, msg) in actionbyfile.iteritems():
            if m not in actions:
                actions[m] = []
            actions[m].append((f, args, msg))

        if not util.checkcase(repo.path):
            # check collision between files only in p2 for clean update
            if (not branchmerge
                    and (force or not wc.dirty(missing=True, branch=False))):
                _checkcollision(repo, p2.manifest(), None)
            else:
                _checkcollision(repo, wc.manifest(), actions)

        # Prompt and create actions. TODO: Move this towards resolve phase.
        for f, args, msg in sorted(actions['cd']):
            if repo.ui.promptchoice(
                    _("local changed %s which remote deleted\n"
                      "use (c)hanged version or (d)elete?"
                      "$$ &Changed $$ &Delete") % f, 0):
                actions['r'].append((f, None, "prompt delete"))
            else:
                actions['a'].append((f, None, "prompt keep"))
        del actions['cd'][:]

        for f, args, msg in sorted(actions['dc']):
            flags, = args
            if repo.ui.promptchoice(
                    _("remote changed %s which local deleted\n"
                      "use (c)hanged version or leave (d)eleted?"
                      "$$ &Changed $$ &Deleted") % f, 0) == 0:
                actions['g'].append((f, (flags, ), "prompt recreating"))
        del actions['dc'][:]

        ### apply phase
        if not branchmerge:  # just jump to the new rev
            fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
        if not partial:
            repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
            # note that we're in the middle of an update
            repo.vfs.write('updatestate', p2.hex())

        stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)

        # divergent renames
        for f, fl in sorted(diverge.iteritems()):
            repo.ui.warn(
                _("note: possible conflict - %s was renamed "
                  "multiple times to:\n") % f)
            for nf in fl:
                repo.ui.warn(" %s\n" % nf)

        # rename and delete
        for f, fl in sorted(renamedelete.iteritems()):
            repo.ui.warn(
                _("note: possible conflict - %s was deleted "
                  "and renamed to:\n") % f)
            for nf in fl:
                repo.ui.warn(" %s\n" % nf)

        if not partial:
            repo.dirstate.beginparentchange()
            repo.setparents(fp1, fp2)
            recordupdates(repo, actions, branchmerge)
            # update completed, clear state
            util.unlink(repo.join('updatestate'))

            if not branchmerge:
                repo.dirstate.setbranch(p2.branch())
            repo.dirstate.endparentchange()
    finally:
        wlock.release()

    if not partial:

        def updatehook(parent1=xp1, parent2=xp2, error=stats[3]):
            repo.hook('update', parent1=parent1, parent2=parent2, error=error)

        repo._afterlock(updatehook)
    return stats
示例#18
0
                        striplen = striplen1
                res = lambda p: os.path.join(dest,
                                             util.localpath(p)[striplen:])
            else:
                # a file
                if destdirexists:
                    res = lambda p: os.path.join(dest,
                                        os.path.basename(util.localpath(p)))
                else:
                    res = lambda p: dest
        return res


    pats = scmutil.expandpats(pats)
    if not pats:
        raise util.Abort(_('no source or destination specified'))
    if len(pats) == 1:
        raise util.Abort(_('no destination specified'))
    dest = pats.pop()
    destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
    if not destdirexists:
        if len(pats) > 1 or matchmod.patkind(pats[0]):
            raise util.Abort(_('with multiple sources, destination must be an '
                               'existing directory'))
        if util.endswithsep(dest):
            raise util.Abort(_('destination %s is not a directory') % dest)

    tfn = targetpathfn
    if after:
        tfn = targetpathafterfn
    copylist = []
        password = ui.getpass()
    if username and password:
        ui.note(_('(authenticating to mail server as %s)\n') % (username))
        try:
            s.login(username, password)
        except smtplib.SMTPException, inst:
            raise util.Abort(inst)

    def send(sender, recipients, msg):
        try:
            return s.sendmail(sender, recipients, msg)
        except smtplib.SMTPRecipientsRefused, inst:
            recipients = [r[1] for r in inst.recipients.values()]
            raise util.Abort('\n' + '\n'.join(recipients))
        except smtplib.SMTPException, inst:
            raise util.Abort(inst)

    return send


def _sendmail(ui, sender, recipients, msg):
    '''send mail using sendmail.'''
    program = ui.config('email', 'method')
    cmdline = '%s -f %s %s' % (program, util.email(sender), ' '.join(
        map(util.email, recipients)))
    ui.note(_('sending mail: %s\n') % cmdline)
    fp = util.popen(cmdline, 'w')
    fp.write(msg)
    ret = fp.close()
    if ret:
        raise util.Abort('%s %s' % (os.path.basename(
示例#20
0
class changeset_templater(changeset_printer):
    '''format changeset information.'''

    def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
        changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
        formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
        defaulttempl = {
            'parent': '{rev}:{node|formatnode} ',
            'manifest': '{rev}:{node|formatnode}',
            'file_copy': '{name} ({source})',
            'extra': '{key}={value|stringescape}'
            }
        # filecopy is preserved for compatibility reasons
        defaulttempl['filecopy'] = defaulttempl['file_copy']
        self.t = templater.templater(mapfile, {'formatnode': formatnode},
                                     cache=defaulttempl)
        self.cache = {}

    def use_template(self, t):
        '''set template string to use'''
        self.t.cache['changeset'] = t

    def _meaningful_parentrevs(self, ctx):
        """Return list of meaningful (or all if debug) parentrevs for rev.
        """
        parents = ctx.parents()
        if len(parents) > 1:
            return parents
        if self.ui.debugflag:
            return [parents[0], self.repo['null']]
        if parents[0].rev() >= ctx.rev() - 1:
            return []
        return parents

    def _show(self, ctx, copies, matchfn, props):
        '''show a single changeset or file revision'''

        showlist = templatekw.showlist

        # showparents() behaviour depends on ui trace level which
        # causes unexpected behaviours at templating level and makes
        # it harder to extract it in a standalone function. Its
        # behaviour cannot be changed so leave it here for now.
        def showparents(**args):
            ctx = args['ctx']
            parents = [[('rev', p.rev()), ('node', p.hex())]
                       for p in self._meaningful_parentrevs(ctx)]
            return showlist('parent', parents, **args)

        props = props.copy()
        props.update(templatekw.keywords)
        props['parents'] = showparents
        props['templ'] = self.t
        props['ctx'] = ctx
        props['repo'] = self.repo
        props['revcache'] = {'copies': copies}
        props['cache'] = self.cache

        # find correct templates for current mode

        tmplmodes = [
            (True, None),
            (self.ui.verbose, 'verbose'),
            (self.ui.quiet, 'quiet'),
            (self.ui.debugflag, 'debug'),
        ]

        types = {'header': '', 'footer':'', 'changeset': 'changeset'}
        for mode, postfix  in tmplmodes:
            for type in types:
                cur = postfix and ('%s_%s' % (type, postfix)) or type
                if mode and cur in self.t:
                    types[type] = cur

        try:

            # write header
            if types['header']:
                h = templater.stringify(self.t(types['header'], **props))
                if self.buffered:
                    self.header[ctx.rev()] = h
                else:
                    if self.lastheader != h:
                        self.lastheader = h
                        self.ui.write(h)

            # write changeset metadata, then patch if requested
            key = types['changeset']
            self.ui.write(templater.stringify(self.t(key, **props)))
            self.showpatch(ctx.node(), matchfn)

            if types['footer']:
                if not self.footer:
                    self.footer = templater.stringify(self.t(types['footer'],
                                                      **props))

        except KeyError, inst:
            msg = _("%s: no key named '%s'")
            raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
        except SyntaxError, inst:
            raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
示例#21
0
def bisect(changelog, state):
    """find the next node (if any) for testing during a bisect search.
    returns a (nodes, number, good) tuple.

    'nodes' is the final result of the bisect if 'number' is 0.
    Otherwise 'number' indicates the remaining possible candidates for
    the search and 'nodes' contains the next bisect target.
    'good' is True if bisect is searching for a first good changeset, False
    if searching for a first bad one.
    """

    clparents = changelog.parentrevs
    skip = set([changelog.rev(n) for n in state['skip']])

    def buildancestors(bad, good):
        # only the earliest bad revision matters
        badrev = min([changelog.rev(n) for n in bad])
        goodrevs = [changelog.rev(n) for n in good]
        goodrev = min(goodrevs)
        # build visit array
        ancestors = [None] * (len(changelog) + 1) # an extra for [-1]

        # set nodes descended from goodrevs
        for rev in goodrevs:
            ancestors[rev] = []
        for rev in changelog.revs(goodrev + 1):
            for prev in clparents(rev):
                if ancestors[prev] == []:
                    ancestors[rev] = []

        # clear good revs from array
        for rev in goodrevs:
            ancestors[rev] = None
        for rev in changelog.revs(len(changelog), goodrev):
            if ancestors[rev] is None:
                for prev in clparents(rev):
                    ancestors[prev] = None

        if ancestors[badrev] is None:
            return badrev, None
        return badrev, ancestors

    good = False
    badrev, ancestors = buildancestors(state['bad'], state['good'])
    if not ancestors: # looking for bad to good transition?
        good = True
        badrev, ancestors = buildancestors(state['good'], state['bad'])
    bad = changelog.node(badrev)
    if not ancestors: # now we're confused
        if (len(state['bad']) == 1 and len(state['good']) == 1 and
            state['bad'] != state['good']):
            raise util.Abort(_("starting revisions are not directly related"))
        raise util.Abort(_("inconsistent state, %s:%s is good and bad")
                         % (badrev, short(bad)))

    # build children dict
    children = {}
    visit = collections.deque([badrev])
    candidates = []
    while visit:
        rev = visit.popleft()
        if ancestors[rev] == []:
            candidates.append(rev)
            for prev in clparents(rev):
                if prev != -1:
                    if prev in children:
                        children[prev].append(rev)
                    else:
                        children[prev] = [rev]
                        visit.append(prev)

    candidates.sort()
    # have we narrowed it down to one entry?
    # or have all other possible candidates besides 'bad' have been skipped?
    tot = len(candidates)
    unskipped = [c for c in candidates if (c not in skip) and (c != badrev)]
    if tot == 1 or not unskipped:
        return ([changelog.node(rev) for rev in candidates], 0, good)
    perfect = tot // 2

    # find the best node to test
    best_rev = None
    best_len = -1
    poison = set()
    for rev in candidates:
        if rev in poison:
            # poison children
            poison.update(children.get(rev, []))
            continue

        a = ancestors[rev] or [rev]
        ancestors[rev] = None

        x = len(a) # number of ancestors
        y = tot - x # number of non-ancestors
        value = min(x, y) # how good is this test?
        if value > best_len and rev not in skip:
            best_len = value
            best_rev = rev
            if value == perfect: # found a perfect candidate? quit early
                break

        if y < perfect and rev not in skip: # all downhill from here?
            # poison children
            poison.update(children.get(rev, []))
            continue

        for c in children.get(rev, []):
            if ancestors[c]:
                ancestors[c] = list(set(ancestors[c] + a))
            else:
                ancestors[c] = a + [c]

    assert best_rev is not None
    best_node = changelog.node(best_rev)

    return ([best_node], tot, good)
示例#22
0
    def _show(self, ctx, copies, matchfn, props):
        '''show a single changeset or file revision'''

        showlist = templatekw.showlist

        # showparents() behaviour depends on ui trace level which
        # causes unexpected behaviours at templating level and makes
        # it harder to extract it in a standalone function. Its
        # behaviour cannot be changed so leave it here for now.
        def showparents(**args):
            ctx = args['ctx']
            parents = [[('rev', p.rev()), ('node', p.hex())]
                       for p in self._meaningful_parentrevs(ctx)]
            return showlist('parent', parents, **args)

        props = props.copy()
        props.update(templatekw.keywords)
        props['parents'] = showparents
        props['templ'] = self.t
        props['ctx'] = ctx
        props['repo'] = self.repo
        props['revcache'] = {'copies': copies}
        props['cache'] = self.cache

        # find correct templates for current mode

        tmplmodes = [
            (True, None),
            (self.ui.verbose, 'verbose'),
            (self.ui.quiet, 'quiet'),
            (self.ui.debugflag, 'debug'),
        ]

        types = {'header': '', 'footer':'', 'changeset': 'changeset'}
        for mode, postfix  in tmplmodes:
            for type in types:
                cur = postfix and ('%s_%s' % (type, postfix)) or type
                if mode and cur in self.t:
                    types[type] = cur

        try:

            # write header
            if types['header']:
                h = templater.stringify(self.t(types['header'], **props))
                if self.buffered:
                    self.header[ctx.rev()] = h
                else:
                    if self.lastheader != h:
                        self.lastheader = h
                        self.ui.write(h)

            # write changeset metadata, then patch if requested
            key = types['changeset']
            self.ui.write(templater.stringify(self.t(key, **props)))
            self.showpatch(ctx.node(), matchfn)

            if types['footer']:
                if not self.footer:
                    self.footer = templater.stringify(self.t(types['footer'],
                                                      **props))

        except KeyError, inst:
            msg = _("%s: no key named '%s'")
            raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
示例#23
0
def update(repo, node, branchmerge, force, partial, ancestor=None):
    """
    Perform a merge between the working directory and the given node

    node = the node to update to, or None if unspecified
    branchmerge = whether to merge between branches
    force = whether to force branch merging or file overwriting
    partial = a function to filter file lists (dirstate not updated)

    The table below shows all the behaviors of the update command
    given the -c and -C or no options, whether the working directory
    is dirty, whether a revision is specified, and the relationship of
    the parent rev to the target rev (linear, on the same named
    branch, or on another named branch).

    This logic is tested by test-update-branches.t.

    -c  -C  dirty  rev  |  linear   same  cross
     n   n    n     n   |    ok     (1)     x
     n   n    n     y   |    ok     ok     ok
     n   n    y     *   |   merge   (2)    (2)
     n   y    *     *   |    ---  discard  ---
     y   n    y     *   |    ---    (3)    ---
     y   n    n     *   |    ---    ok     ---
     y   y    *     *   |    ---    (4)    ---

    x = can't happen
    * = don't-care
    1 = abort: crosses branches (use 'hg merge' or 'hg update -c')
    2 = abort: crosses branches (use 'hg merge' to merge or
                 use 'hg update -C' to discard changes)
    3 = abort: uncommitted local changes
    4 = incompatible options (checked in commands.py)

    Return the same tuple as applyupdates().
    """

    onode = node
    wlock = repo.wlock()
    try:
        wc = repo[None]
        if node is None:
            # tip of current branch
            try:
                node = repo.branchtags()[wc.branch()]
            except KeyError:
                if wc.branch() == "default":  # no default branch!
                    node = repo.lookup("tip")  # update to tip
                else:
                    raise util.Abort(_("branch %s not found") % wc.branch())
        overwrite = force and not branchmerge
        pl = wc.parents()
        p1, p2 = pl[0], repo[node]
        if ancestor:
            pa = repo[ancestor]
        else:
            pa = p1.ancestor(p2)

        fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)

        ### check phase
        if not overwrite and len(pl) > 1:
            raise util.Abort(_("outstanding uncommitted merges"))
        if branchmerge:
            if pa == p2:
                raise util.Abort(
                    _("merging with a working directory ancestor"
                      " has no effect"))
            elif pa == p1:
                if p1.branch() == p2.branch():
                    raise util.Abort(_("nothing to merge"),
                                     hint=_("use 'hg update' "
                                            "or check 'hg heads'"))
            if not force and (wc.files() or wc.deleted()):
                raise util.Abort(_("outstanding uncommitted changes"),
                                 hint=_("use 'hg status' to list changes"))
            for s in wc.substate:
                if wc.sub(s).dirty():
                    raise util.Abort(
                        _("outstanding uncommitted changes in "
                          "subrepository '%s'") % s)

        elif not overwrite:
            if pa == p1 or pa == p2:  # linear
                pass  # all good
            elif wc.dirty(missing=True):
                raise util.Abort(
                    _("crosses branches (merge branches or use"
                      " --clean to discard changes)"))
            elif onode is None:
                raise util.Abort(
                    _("crosses branches (merge branches or update"
                      " --check to force update)"))
            else:
                # Allow jumping branches if clean and specific rev given
                pa = p1

        ### calculate phase
        action = []
        folding = not util.checkcase(repo.path)
        if folding:
            # collision check is not needed for clean update
            if (not branchmerge
                    and (force or not wc.dirty(missing=True, branch=False))):
                _checkcollision(p2, None)
            else:
                _checkcollision(p2, wc)
        if not force:
            _checkunknown(repo, wc, p2)
        action += _forgetremoved(wc, p2, branchmerge)
        action += manifestmerge(repo, wc, p2, pa, overwrite, partial)

        ### apply phase
        if not branchmerge:  # just jump to the new rev
            fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
        if not partial:
            repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)

        stats = applyupdates(repo, action, wc, p2, pa, overwrite)

        if not partial:
            repo.setparents(fp1, fp2)
            recordupdates(repo, action, branchmerge)
            if not branchmerge:
                repo.dirstate.setbranch(p2.branch())
    finally:
        wlock.release()

    if not partial:
        repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
    return stats
示例#24
0
def walkchangerevs(repo, match, opts, prepare):
    '''Iterate over files and the revs in which they changed.

    Callers most commonly need to iterate backwards over the history
    in which they are interested. Doing so has awful (quadratic-looking)
    performance, so we use iterators in a "windowed" way.

    We walk a window of revisions in the desired order.  Within the
    window, we first walk forwards to gather data, then in the desired
    order (usually backwards) to display it.

    This function returns an iterator yielding contexts. Before
    yielding each context, the iterator will first call the prepare
    function on each context in the window in forward order.'''

    def increasing_windows(start, end, windowsize=8, sizelimit=512):
        if start < end:
            while start < end:
                yield start, min(windowsize, end - start)
                start += windowsize
                if windowsize < sizelimit:
                    windowsize *= 2
        else:
            while start > end:
                yield start, min(windowsize, start - end - 1)
                start -= windowsize
                if windowsize < sizelimit:
                    windowsize *= 2

    follow = opts.get('follow') or opts.get('follow_first')

    if not len(repo):
        return []

    if follow:
        defrange = '%s:0' % repo['.'].rev()
    else:
        defrange = '-1:0'
    revs = scmutil.revrange(repo, opts['rev'] or [defrange])
    if not revs:
        return []
    wanted = set()
    slowpath = match.anypats() or (match.files() and opts.get('removed'))
    fncache = {}
    change = repo.changectx

    # First step is to fill wanted, the set of revisions that we want to yield.
    # When it does not induce extra cost, we also fill fncache for revisions in
    # wanted: a cache of filenames that were changed (ctx.files()) and that
    # match the file filtering conditions.

    if not slowpath and not match.files():
        # No files, no patterns.  Display all revs.
        wanted = set(revs)
    copies = []

    if not slowpath:
        # We only have to read through the filelog to find wanted revisions

        minrev, maxrev = min(revs), max(revs)
        def filerevgen(filelog, last):
            """
            Only files, no patterns.  Check the history of each file.

            Examines filelog entries within minrev, maxrev linkrev range
            Returns an iterator yielding (linkrev, parentlinkrevs, copied)
            tuples in backwards order
            """
            cl_count = len(repo)
            revs = []
            for j in xrange(0, last + 1):
                linkrev = filelog.linkrev(j)
                if linkrev < minrev:
                    continue
                # only yield rev for which we have the changelog, it can
                # happen while doing "hg log" during a pull or commit
                if linkrev >= cl_count:
                    break

                parentlinkrevs = []
                for p in filelog.parentrevs(j):
                    if p != nullrev:
                        parentlinkrevs.append(filelog.linkrev(p))
                n = filelog.node(j)
                revs.append((linkrev, parentlinkrevs,
                             follow and filelog.renamed(n)))

            return reversed(revs)
        def iterfiles():
            pctx = repo['.']
            for filename in match.files():
                if follow:
                    if filename not in pctx:
                        raise util.Abort(_('cannot follow file not in parent '
                                           'revision: "%s"') % filename)
                    yield filename, pctx[filename].filenode()
                else:
                    yield filename, None
            for filename_node in copies:
                yield filename_node
        for file_, node in iterfiles():
            filelog = repo.file(file_)
            if not len(filelog):
                if node is None:
                    # A zero count may be a directory or deleted file, so
                    # try to find matching entries on the slow path.
                    if follow:
                        raise util.Abort(
                            _('cannot follow nonexistent file: "%s"') % file_)
                    slowpath = True
                    break
                else:
                    continue

            if node is None:
                last = len(filelog) - 1
            else:
                last = filelog.rev(node)


            # keep track of all ancestors of the file
            ancestors = set([filelog.linkrev(last)])

            # iterate from latest to oldest revision
            for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
                if not follow:
                    if rev > maxrev:
                        continue
                else:
                    # Note that last might not be the first interesting
                    # rev to us:
                    # if the file has been changed after maxrev, we'll
                    # have linkrev(last) > maxrev, and we still need
                    # to explore the file graph
                    if rev not in ancestors:
                        continue
                    # XXX insert 1327 fix here
                    if flparentlinkrevs:
                        ancestors.update(flparentlinkrevs)

                fncache.setdefault(rev, []).append(file_)
                wanted.add(rev)
                if copied:
                    copies.append(copied)
    if slowpath:
        # We have to read the changelog to match filenames against
        # changed files

        if follow:
            raise util.Abort(_('can only follow copies/renames for explicit '
                               'filenames'))

        # The slow path checks files modified in every changeset.
        for i in sorted(revs):
            ctx = change(i)
            matches = filter(match, ctx.files())
            if matches:
                fncache[i] = matches
                wanted.add(i)

    class followfilter(object):
        def __init__(self, onlyfirst=False):
            self.startrev = nullrev
            self.roots = set()
            self.onlyfirst = onlyfirst

        def match(self, rev):
            def realparents(rev):
                if self.onlyfirst:
                    return repo.changelog.parentrevs(rev)[0:1]
                else:
                    return filter(lambda x: x != nullrev,
                                  repo.changelog.parentrevs(rev))

            if self.startrev == nullrev:
                self.startrev = rev
                return True

            if rev > self.startrev:
                # forward: all descendants
                if not self.roots:
                    self.roots.add(self.startrev)
                for parent in realparents(rev):
                    if parent in self.roots:
                        self.roots.add(rev)
                        return True
            else:
                # backwards: all parents
                if not self.roots:
                    self.roots.update(realparents(self.startrev))
                if rev in self.roots:
                    self.roots.remove(rev)
                    self.roots.update(realparents(rev))
                    return True

            return False

    # it might be worthwhile to do this in the iterator if the rev range
    # is descending and the prune args are all within that range
    for rev in opts.get('prune', ()):
        rev = repo.changelog.rev(repo.lookup(rev))
        ff = followfilter()
        stop = min(revs[0], revs[-1])
        for x in xrange(rev, stop - 1, -1):
            if ff.match(x):
                wanted.discard(x)

    # Now that wanted is correctly initialized, we can iterate over the
    # revision range, yielding only revisions in wanted.
    def iterate():
        if follow and not match.files():
            ff = followfilter(onlyfirst=opts.get('follow_first'))
            def want(rev):
                return ff.match(rev) and rev in wanted
        else:
            def want(rev):
                return rev in wanted

        for i, window in increasing_windows(0, len(revs)):
            nrevs = [rev for rev in revs[i:i + window] if want(rev)]
            for rev in sorted(nrevs):
                fns = fncache.get(rev)
                ctx = change(rev)
                if not fns:
                    def fns_generator():
                        for f in ctx.files():
                            if match(f):
                                yield f
                    fns = fns_generator()
                prepare(ctx, fns)
            for rev in nrevs:
                yield change(rev)
    return iterate()
示例#25
0
def _dispatch(req):
    args = req.args
    ui = req.ui

    # check for cwd
    cwd = _earlygetopt(['--cwd'], args)
    if cwd:
        os.chdir(cwd[-1])

    rpath = _earlygetopt(["-R", "--repository", "--repo"], args)
    path, lui = _getlocal(ui, rpath)

    # Now that we're operating in the right directory/repository with
    # the right config settings, check for shell aliases
    shellaliasfn = _checkshellalias(lui, ui, args)
    if shellaliasfn:
        return shellaliasfn()

    # Configure extensions in phases: uisetup, extsetup, cmdtable, and
    # reposetup. Programs like TortoiseHg will call _dispatch several
    # times so we keep track of configured extensions in _loaded.
    extensions.loadall(lui)
    exts = [ext for ext in extensions.extensions() if ext[0] not in _loaded]
    # Propagate any changes to lui.__class__ by extensions
    ui.__class__ = lui.__class__

    # (uisetup and extsetup are handled in extensions.loadall)

    for name, module in exts:
        cmdtable = getattr(module, 'cmdtable', {})
        overrides = [cmd for cmd in cmdtable if cmd in commands.table]
        if overrides:
            ui.warn(_("extension '%s' overrides commands: %s\n")
                    % (name, " ".join(overrides)))
        commands.table.update(cmdtable)
        _loaded.add(name)

    # (reposetup is handled in hg.repository)

    addaliases(lui, commands.table)

    # check for fallback encoding
    fallback = lui.config('ui', 'fallbackencoding')
    if fallback:
        encoding.fallbackencoding = fallback

    fullargs = args
    cmd, func, args, options, cmdoptions = _parse(lui, args)

    if options["config"]:
        raise util.Abort(_("option --config may not be abbreviated!"))
    if options["cwd"]:
        raise util.Abort(_("option --cwd may not be abbreviated!"))
    if options["repository"]:
        raise util.Abort(_(
            "option -R has to be separated from other options (e.g. not -qR) "
            "and --repository may only be abbreviated as --repo!"))

    if options["encoding"]:
        encoding.encoding = options["encoding"]
    if options["encodingmode"]:
        encoding.encodingmode = options["encodingmode"]
    if options["time"]:
        def get_times():
            t = os.times()
            if t[4] == 0.0: # Windows leaves this as zero, so use time.clock()
                t = (t[0], t[1], t[2], t[3], time.clock())
            return t
        s = get_times()
        def print_time():
            t = get_times()
            ui.warn(_("time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
                (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
        atexit.register(print_time)

    uis = set([ui, lui])

    if req.repo:
        uis.add(req.repo.ui)

    if options['verbose'] or options['debug'] or options['quiet']:
        for opt in ('verbose', 'debug', 'quiet'):
            val = str(bool(options[opt]))
            for ui_ in uis:
                ui_.setconfig('ui', opt, val, '--' + opt)

    if options['traceback']:
        for ui_ in uis:
            ui_.setconfig('ui', 'traceback', 'on', '--traceback')

    if options['noninteractive']:
        for ui_ in uis:
            ui_.setconfig('ui', 'interactive', 'off', '-y')

    if cmdoptions.get('insecure', False):
        for ui_ in uis:
            ui_.setconfig('web', 'cacerts', '', '--insecure')

    if options['version']:
        return commands.version_(ui)
    if options['help']:
        return commands.help_(ui, cmd)
    elif not cmd:
        return commands.help_(ui, 'shortlist')

    repo = None
    cmdpats = args[:]
    if cmd not in commands.norepo.split():
        # use the repo from the request only if we don't have -R
        if not rpath and not cwd:
            repo = req.repo

        if repo:
            # set the descriptors of the repo ui to those of ui
            repo.ui.fin = ui.fin
            repo.ui.fout = ui.fout
            repo.ui.ferr = ui.ferr
        else:
            try:
                repo = hg.repository(ui, path=path)
                if not repo.local():
                    raise util.Abort(_("repository '%s' is not local") % path)
                repo.ui.setconfig("bundle", "mainreporoot", repo.root, 'repo')
            except error.RequirementError:
                raise
            except error.RepoError:
                if cmd not in commands.optionalrepo.split():
                    if (cmd in commands.inferrepo.split() and
                        args and not path): # try to infer -R from command args
                        repos = map(cmdutil.findrepo, args)
                        guess = repos[0]
                        if guess and repos.count(guess) == len(repos):
                            req.args = ['--repository', guess] + fullargs
                            return _dispatch(req)
                    if not path:
                        raise error.RepoError(_("no repository found in '%s'"
                                                " (.hg not found)")
                                              % os.getcwd())
                    raise
        if repo:
            ui = repo.ui
            if options['hidden']:
                repo = repo.unfiltered()
        args.insert(0, repo)
    elif rpath:
        ui.warn(_("warning: --repository ignored\n"))

    msg = ' '.join(' ' in a and repr(a) or a for a in fullargs)
    ui.log("command", '%s\n', msg)
    d = lambda: util.checksignature(func)(ui, *args, **cmdoptions)
    try:
        return runcommand(lui, repo, cmd, fullargs, ui, options, d,
                          cmdpats, cmdoptions)
    finally:
        if repo and repo != req.repo:
            repo.close()
示例#26
0
def _verify(repo):
    repo = repo.unfiltered()
    mflinkrevs = {}
    filelinkrevs = {}
    filenodes = {}
    revisions = 0
    badrevs = set()
    errors = [0]
    warnings = [0]
    ui = repo.ui
    cl = repo.changelog
    mf = repo.manifest
    lrugetctx = util.lrucachefunc(repo.changectx)

    if not repo.url().startswith('file:'):
        raise util.Abort(_("cannot verify bundle or remote repos"))

    def err(linkrev, msg, filename=None):
        if linkrev is not None:
            badrevs.add(linkrev)
        else:
            linkrev = '?'
        msg = "%s: %s" % (linkrev, msg)
        if filename:
            msg = "%s@%s" % (filename, msg)
        ui.warn(" " + msg + "\n")
        errors[0] += 1

    def exc(linkrev, msg, inst, filename=None):
        if isinstance(inst, KeyboardInterrupt):
            ui.warn(_("interrupted"))
            raise
        if not str(inst):
            inst = repr(inst)
        err(linkrev, "%s: %s" % (msg, inst), filename)

    def warn(msg):
        ui.warn(msg + "\n")
        warnings[0] += 1

    def checklog(obj, name, linkrev):
        if not len(obj) and (havecl or havemf):
            err(linkrev, _("empty or missing %s") % name)
            return

        d = obj.checksize()
        if d[0]:
            err(None, _("data length off by %d bytes") % d[0], name)
        if d[1]:
            err(None, _("index contains %d extra bytes") % d[1], name)

        if obj.version != revlog.REVLOGV0:
            if not revlogv1:
                warn(_("warning: `%s' uses revlog format 1") % name)
        elif revlogv1:
            warn(_("warning: `%s' uses revlog format 0") % name)

    def checkentry(obj, i, node, seen, linkrevs, f):
        lr = obj.linkrev(obj.rev(node))
        if lr < 0 or (havecl and lr not in linkrevs):
            if lr < 0 or lr >= len(cl):
                msg = _("rev %d points to nonexistent changeset %d")
            else:
                msg = _("rev %d points to unexpected changeset %d")
            err(None, msg % (i, lr), f)
            if linkrevs:
                if f and len(linkrevs) > 1:
                    try:
                        # attempt to filter down to real linkrevs
                        linkrevs = [
                            l for l in linkrevs
                            if lrugetctx(l)[f].filenode() == node
                        ]
                    except Exception:
                        pass
                warn(_(" (expected %s)") % " ".join(map(str, linkrevs)))
            lr = None  # can't be trusted

        try:
            p1, p2 = obj.parents(node)
            if p1 not in seen and p1 != nullid:
                err(lr,
                    _("unknown parent 1 %s of %s") % (short(p1), short(node)),
                    f)
            if p2 not in seen and p2 != nullid:
                err(lr,
                    _("unknown parent 2 %s of %s") % (short(p2), short(node)),
                    f)
        except Exception, inst:
            exc(lr, _("checking parents of %s") % short(node), inst, f)

        if node in seen:
            err(lr, _("duplicate revision %d (%d)") % (i, seen[node]), f)
        seen[node] = i
        return lr
def clone(ui,
          source,
          dest=None,
          pull=False,
          rev=None,
          update=True,
          stream=False,
          branch=None):
    """Make a copy of an existing repository.

    Create a copy of an existing repository in a new directory.  The
    source and destination are URLs, as passed to the repository
    function.  Returns a pair of repository objects, the source and
    newly created destination.

    The location of the source is added to the new repository's
    .hg/hgrc file, as the default to be used for future pulls and
    pushes.

    If an exception is raised, the partly cloned/updated destination
    repository will be deleted.

    Arguments:

    source: repository object or URL

    dest: URL of destination repository to create (defaults to base
    name of source repository)

    pull: always pull from source repository, even in local case

    stream: stream raw data uncompressed from repository (fast over
    LAN, slow over WAN)

    rev: revision to clone up to (implies pull=True)

    update: update working directory after clone completes, if
    destination is local repository (True means update to default rev,
    anything else is treated as a revision)

    branch: branches to clone
    """

    if isinstance(source, str):
        origsource = ui.expandpath(source)
        source, branch = parseurl(origsource, branch)
        src_repo = repository(ui, source)
    else:
        src_repo = source
        branch = (None, branch or [])
        origsource = source = src_repo.url()
    rev, checkout = addbranchrevs(src_repo, src_repo, branch, rev)

    if dest is None:
        dest = defaultdest(source)
        ui.status(_("destination directory: %s\n") % dest)
    else:
        dest = ui.expandpath(dest)

    dest = localpath(dest)
    source = localpath(source)

    if os.path.exists(dest):
        if not os.path.isdir(dest):
            raise util.Abort(_("destination '%s' already exists") % dest)
        elif os.listdir(dest):
            raise util.Abort(_("destination '%s' is not empty") % dest)

    class DirCleanup(object):
        def __init__(self, dir_):
            self.rmtree = shutil.rmtree
            self.dir_ = dir_

        def close(self):
            self.dir_ = None

        def cleanup(self):
            if self.dir_:
                self.rmtree(self.dir_, True)

    src_lock = dest_lock = dir_cleanup = None
    try:
        if islocal(dest):
            dir_cleanup = DirCleanup(dest)

        abspath = origsource
        copy = False
        if src_repo.cancopy() and islocal(dest):
            abspath = os.path.abspath(util.drop_scheme('file', origsource))
            copy = not pull and not rev

        if copy:
            try:
                # we use a lock here because if we race with commit, we
                # can end up with extra data in the cloned revlogs that's
                # not pointed to by changesets, thus causing verify to
                # fail
                src_lock = src_repo.lock(wait=False)
            except error.LockError:
                copy = False

        if copy:
            src_repo.hook('preoutgoing', throw=True, source='clone')
            hgdir = os.path.realpath(os.path.join(dest, ".hg"))
            if not os.path.exists(dest):
                os.mkdir(dest)
            else:
                # only clean up directories we create ourselves
                dir_cleanup.dir_ = hgdir
            try:
                dest_path = hgdir
                os.mkdir(dest_path)
            except OSError, inst:
                if inst.errno == errno.EEXIST:
                    dir_cleanup.close()
                    raise util.Abort(
                        _("destination '%s' already exists") % dest)
                raise

            hardlink = None
            num = 0
            for f in src_repo.store.copylist():
                src = os.path.join(src_repo.sharedpath, f)
                dst = os.path.join(dest_path, f)
                dstbase = os.path.dirname(dst)
                if dstbase and not os.path.exists(dstbase):
                    os.mkdir(dstbase)
                if os.path.exists(src):
                    if dst.endswith('data'):
                        # lock to avoid premature writing to the target
                        dest_lock = lock.lock(os.path.join(dstbase, "lock"))
                    hardlink, n = util.copyfiles(src, dst, hardlink)
                    num += n
            if hardlink:
                ui.debug("linked %d files\n" % num)
            else:
                ui.debug("copied %d files\n" % num)

            # we need to re-init the repo after manually copying the data
            # into it
            dest_repo = repository(ui, dest)
            src_repo.hook('outgoing',
                          source='clone',
                          node=node.hex(node.nullid))
        else:
示例#28
0
def prepush(repo, remote, force, revs, newbranch):
    '''Analyze the local and remote repositories and determine which
    changesets need to be pushed to the remote. Return value depends
    on circumstances:

    If we are not going to push anything, return a tuple (None,
    outgoing) where outgoing is 0 if there are no outgoing
    changesets and 1 if there are, but we refuse to push them
    (e.g. would create new remote heads).

    Otherwise, return a tuple (changegroup, remoteheads), where
    changegroup is a readable file-like object whose read() returns
    successive changegroup chunks ready to be sent over the wire and
    remoteheads is the list of remote heads.'''
    commoninc = findcommonincoming(repo, remote, force=force)
    common, revs = findcommonoutgoing(repo,
                                      remote,
                                      onlyheads=revs,
                                      commoninc=commoninc,
                                      force=force)
    _common, inc, remoteheads = commoninc

    cl = repo.changelog
    outg = cl.findmissing(common, revs)

    if not outg:
        repo.ui.status(_("no changes found\n"))
        return None, 1

    if not force and remoteheads != [nullid]:
        if remote.capable('branchmap'):
            # Check for each named branch if we're creating new remote heads.
            # To be a remote head after push, node must be either:
            # - unknown locally
            # - a local outgoing head descended from update
            # - a remote head that's known locally and not
            #   ancestral to an outgoing head

            # 1. Create set of branches involved in the push.
            branches = set(repo[n].branch() for n in outg)

            # 2. Check for new branches on the remote.
            remotemap = remote.branchmap()
            newbranches = branches - set(remotemap)
            if newbranches and not newbranch:  # new branch requires --new-branch
                branchnames = ', '.join(sorted(newbranches))
                raise util.Abort(_("push creates new remote branches: %s!") %
                                 branchnames,
                                 hint=_("use 'hg push --new-branch' to create"
                                        " new remote branches"))
            branches.difference_update(newbranches)

            # 3. Construct the initial oldmap and newmap dicts.
            # They contain information about the remote heads before and
            # after the push, respectively.
            # Heads not found locally are not included in either dict,
            # since they won't be affected by the push.
            # unsynced contains all branches with incoming changesets.
            oldmap = {}
            newmap = {}
            unsynced = set()
            for branch in branches:
                remotebrheads = remotemap[branch]
                prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
                oldmap[branch] = prunedbrheads
                newmap[branch] = list(prunedbrheads)
                if len(remotebrheads) > len(prunedbrheads):
                    unsynced.add(branch)

            # 4. Update newmap with outgoing changes.
            # This will possibly add new heads and remove existing ones.
            ctxgen = (repo[n] for n in outg)
            repo._updatebranchcache(newmap, ctxgen)

        else:
            # 1-4b. old servers: Check for new topological heads.
            # Construct {old,new}map with branch = None (topological branch).
            # (code based on _updatebranchcache)
            oldheads = set(h for h in remoteheads if h in cl.nodemap)
            newheads = oldheads.union(outg)
            if len(newheads) > 1:
                for latest in reversed(outg):
                    if latest not in newheads:
                        continue
                    minhrev = min(cl.rev(h) for h in newheads)
                    reachable = cl.reachable(latest, cl.node(minhrev))
                    reachable.remove(latest)
                    newheads.difference_update(reachable)
            branches = set([None])
            newmap = {None: newheads}
            oldmap = {None: oldheads}
            unsynced = inc and branches or set()

        # 5. Check for new heads.
        # If there are more heads after the push than before, a suitable
        # error message, depending on unsynced status, is displayed.
        error = None
        for branch in branches:
            newhs = set(newmap[branch])
            oldhs = set(oldmap[branch])
            if len(newhs) > len(oldhs):
                dhs = list(newhs - oldhs)
                if error is None:
                    if branch not in ('default', None):
                        error = _("push creates new remote head %s "
                                  "on branch '%s'!") % (short(dhs[0]), branch)
                    else:
                        error = _("push creates new remote head %s!") % short(
                            dhs[0])
                    if branch in unsynced:
                        hint = _("you should pull and merge or "
                                 "use push -f to force")
                    else:
                        hint = _("did you forget to merge? "
                                 "use push -f to force")
                if branch is not None:
                    repo.ui.note("new remote heads on branch '%s'\n" % branch)
                for h in dhs:
                    repo.ui.note("new remote head %s\n" % short(h))
        if error:
            raise util.Abort(error, hint=hint)

        # 6. Check for unsynced changes on involved branches.
        if unsynced:
            repo.ui.warn(_("note: unsynced remote changes!\n"))

    if revs is None:
        # use the fast path, no race possible on push
        cg = repo._changegroup(outg, 'push')
    else:
        cg = repo.getbundle('push', heads=revs, common=common)
    return cg, remoteheads
示例#29
0
def _dispatch(ui, args):
    # read --config before doing anything else
    # (e.g. to change trust settings for reading .hg/hgrc)
    config = _earlygetopt(['--config'], args)
    if config:
        ui.updateopts(config=_parseconfig(config))

    # check for cwd
    cwd = _earlygetopt(['--cwd'], args)
    if cwd:
        os.chdir(cwd[-1])

    # read the local repository .hgrc into a local ui object
    path = _findrepo(os.getcwd()) or ""
    if not path:
        lui = ui
    if path:
        try:
            lui = _ui.ui(parentui=ui)
            lui.readconfig(os.path.join(path, ".hg", "hgrc"))
        except IOError:
            pass

    # now we can expand paths, even ones in .hg/hgrc
    rpath = _earlygetopt(["-R", "--repository", "--repo"], args)
    if rpath:
        path = lui.expandpath(rpath[-1])
        lui = _ui.ui(parentui=ui)
        lui.readconfig(os.path.join(path, ".hg", "hgrc"))

    extensions.loadall(lui)
    for name, module in extensions.extensions():
        if name in _loaded:
            continue

        # setup extensions
        # TODO this should be generalized to scheme, where extensions can
        #      redepend on other extensions.  then we should toposort them, and
        #      do initialization in correct order
        extsetup = getattr(module, 'extsetup', None)
        if extsetup:
            extsetup()

        cmdtable = getattr(module, 'cmdtable', {})
        overrides = [cmd for cmd in cmdtable if cmd in commands.table]
        if overrides:
            ui.warn(_("extension '%s' overrides commands: %s\n")
                    % (name, " ".join(overrides)))
        commands.table.update(cmdtable)
        _loaded[name] = 1
    # check for fallback encoding
    fallback = lui.config('ui', 'fallbackencoding')
    if fallback:
        util._fallbackencoding = fallback

    fullargs = args
    cmd, func, args, options, cmdoptions = _parse(lui, args)

    if options["config"]:
        raise util.Abort(_("Option --config may not be abbreviated!"))
    if options["cwd"]:
        raise util.Abort(_("Option --cwd may not be abbreviated!"))
    if options["repository"]:
        raise util.Abort(_(
            "Option -R has to be separated from other options (i.e. not -qR) "
            "and --repository may only be abbreviated as --repo!"))

    if options["encoding"]:
        util._encoding = options["encoding"]
    if options["encodingmode"]:
        util._encodingmode = options["encodingmode"]
    if options["time"]:
        def get_times():
            t = os.times()
            if t[4] == 0.0: # Windows leaves this as zero, so use time.clock()
                t = (t[0], t[1], t[2], t[3], time.clock())
            return t
        s = get_times()
        def print_time():
            t = get_times()
            ui.warn(_("Time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
                (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
        atexit.register(print_time)

    ui.updateopts(options["verbose"], options["debug"], options["quiet"],
                 not options["noninteractive"], options["traceback"])

    if options['help']:
        return commands.help_(ui, cmd, options['version'])
    elif options['version']:
        return commands.version_(ui)
    elif not cmd:
        return commands.help_(ui, 'shortlist')

    repo = None
    if cmd not in commands.norepo.split():
        try:
            repo = hg.repository(ui, path=path)
            ui = repo.ui
            if not repo.local():
                raise util.Abort(_("repository '%s' is not local") % path)
            ui.setconfig("bundle", "mainreporoot", repo.root)
        except RepoError:
            if cmd not in commands.optionalrepo.split():
                if args and not path: # try to infer -R from command args
                    repos = map(_findrepo, args)
                    guess = repos[0]
                    if guess and repos.count(guess) == len(repos):
                        return _dispatch(ui, ['--repository', guess] + fullargs)
                if not path:
                    raise RepoError(_("There is no Mercurial repository here"
                                      " (.hg not found)"))
                raise
        d = lambda: func(ui, repo, *args, **cmdoptions)
    else:
        d = lambda: func(ui, *args, **cmdoptions)

    # run pre-hook, and abort if it fails
    ret = hook.hook(lui, repo, "pre-%s" % cmd, False, args=" ".join(fullargs))
    if ret:
        return ret
    ret = _runcommand(ui, options, cmd, d)
    # run post-hook, passing command result
    hook.hook(lui, repo, "post-%s" % cmd, False, args=" ".join(fullargs),
              result = ret)
    return ret
示例#30
0
    def __init__(self, ui, path, bundlename):
        self._tempparent = None
        try:
            localrepo.localrepository.__init__(self, ui, path)
        except error.RepoError:
            self._tempparent = tempfile.mkdtemp()
            localrepo.instance(ui, self._tempparent, 1)
            localrepo.localrepository.__init__(self, ui, self._tempparent)
        self.ui.setconfig('phases', 'publish', False, 'bundlerepo')

        if path:
            self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
        else:
            self._url = 'bundle:' + bundlename

        self.tempfile = None
        f = util.posixfile(bundlename, "rb")
        self.bundlefile = self.bundle = exchange.readbundle(ui, f, bundlename)
        if self.bundle.compressed():
            fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
                                            suffix=".hg10un")
            self.tempfile = temp
            fptemp = os.fdopen(fdtemp, 'wb')

            try:
                fptemp.write("HG10UN")
                while True:
                    chunk = self.bundle.read(2**18)
                    if not chunk:
                        break
                    fptemp.write(chunk)
            finally:
                fptemp.close()

            f = self.vfs.open(self.tempfile, mode="rb")
            self.bundlefile = self.bundle = exchange.readbundle(
                ui, f, bundlename, self.vfs)

        if isinstance(self.bundle, bundle2.unbundle20):
            cgparts = [
                part for part in self.bundle.iterparts()
                if (part.type == 'changegroup') and (
                    part.params.get('version', '01') in changegroup.packermap)
            ]

            if not cgparts:
                raise util.Abort('No changegroups found')
            version = cgparts[0].params.get('version', '01')
            cgparts = [
                p for p in cgparts if p.params.get('version', '01') == version
            ]
            if len(cgparts) > 1:
                raise NotImplementedError(
                    "Can't process multiple changegroups")
            part = cgparts[0]

            part.seek(0)
            self.bundle = changegroup.packermap[version][1](part, 'UN')

        # dict with the mapping 'filename' -> position in the bundle
        self.bundlefilespos = {}

        self.firstnewrev = self.changelog.repotiprev + 1
        phases.retractboundary(self, None, phases.draft,
                               [ctx.node() for ctx in self[self.firstnewrev:]])